LCOV - code coverage report
Current view: top level - drivers/base/power - runtime.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 104 673 15.5 %
Date: 2023-04-06 08:38:28 Functions: 13 50 26.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * drivers/base/power/runtime.c - Helper functions for device runtime PM
       4             :  *
       5             :  * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
       6             :  * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
       7             :  */
       8             : #include <linux/sched/mm.h>
       9             : #include <linux/ktime.h>
      10             : #include <linux/hrtimer.h>
      11             : #include <linux/export.h>
      12             : #include <linux/pm_runtime.h>
      13             : #include <linux/pm_wakeirq.h>
      14             : #include <trace/events/rpm.h>
      15             : 
      16             : #include "../base.h"
      17             : #include "power.h"
      18             : 
      19             : typedef int (*pm_callback_t)(struct device *);
      20             : 
      21           0 : static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
      22             : {
      23             :         pm_callback_t cb;
      24             :         const struct dev_pm_ops *ops;
      25             : 
      26           0 :         if (dev->pm_domain)
      27           0 :                 ops = &dev->pm_domain->ops;
      28           0 :         else if (dev->type && dev->type->pm)
      29             :                 ops = dev->type->pm;
      30           0 :         else if (dev->class && dev->class->pm)
      31             :                 ops = dev->class->pm;
      32           0 :         else if (dev->bus && dev->bus->pm)
      33           0 :                 ops = dev->bus->pm;
      34             :         else
      35             :                 ops = NULL;
      36             : 
      37           0 :         if (ops)
      38           0 :                 cb = *(pm_callback_t *)((void *)ops + cb_offset);
      39             :         else
      40             :                 cb = NULL;
      41             : 
      42           0 :         if (!cb && dev->driver && dev->driver->pm)
      43           0 :                 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
      44             : 
      45           0 :         return cb;
      46             : }
      47             : 
      48             : #define RPM_GET_CALLBACK(dev, callback) \
      49             :                 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
      50             : 
      51             : static int rpm_resume(struct device *dev, int rpmflags);
      52             : static int rpm_suspend(struct device *dev, int rpmflags);
      53             : 
      54             : /**
      55             :  * update_pm_runtime_accounting - Update the time accounting of power states
      56             :  * @dev: Device to update the accounting for
      57             :  *
      58             :  * In order to be able to have time accounting of the various power states
      59             :  * (as used by programs such as PowerTOP to show the effectiveness of runtime
      60             :  * PM), we need to track the time spent in each state.
      61             :  * update_pm_runtime_accounting must be called each time before the
      62             :  * runtime_status field is updated, to account the time in the old state
      63             :  * correctly.
      64             :  */
      65           0 : static void update_pm_runtime_accounting(struct device *dev)
      66             : {
      67             :         u64 now, last, delta;
      68             : 
      69           0 :         if (dev->power.disable_depth > 0)
      70             :                 return;
      71             : 
      72           0 :         last = dev->power.accounting_timestamp;
      73             : 
      74           0 :         now = ktime_get_mono_fast_ns();
      75           0 :         dev->power.accounting_timestamp = now;
      76             : 
      77             :         /*
      78             :          * Because ktime_get_mono_fast_ns() is not monotonic during
      79             :          * timekeeping updates, ensure that 'now' is after the last saved
      80             :          * timesptamp.
      81             :          */
      82           0 :         if (now < last)
      83             :                 return;
      84             : 
      85           0 :         delta = now - last;
      86             : 
      87           0 :         if (dev->power.runtime_status == RPM_SUSPENDED)
      88           0 :                 dev->power.suspended_time += delta;
      89             :         else
      90           0 :                 dev->power.active_time += delta;
      91             : }
      92             : 
      93             : static void __update_runtime_status(struct device *dev, enum rpm_status status)
      94             : {
      95           0 :         update_pm_runtime_accounting(dev);
      96           0 :         dev->power.runtime_status = status;
      97             : }
      98             : 
      99           0 : static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
     100             : {
     101             :         u64 time;
     102             :         unsigned long flags;
     103             : 
     104           0 :         spin_lock_irqsave(&dev->power.lock, flags);
     105             : 
     106           0 :         update_pm_runtime_accounting(dev);
     107           0 :         time = suspended ? dev->power.suspended_time : dev->power.active_time;
     108             : 
     109           0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
     110             : 
     111           0 :         return time;
     112             : }
     113             : 
     114           0 : u64 pm_runtime_active_time(struct device *dev)
     115             : {
     116           0 :         return rpm_get_accounted_time(dev, false);
     117             : }
     118             : 
     119           0 : u64 pm_runtime_suspended_time(struct device *dev)
     120             : {
     121           0 :         return rpm_get_accounted_time(dev, true);
     122             : }
     123             : EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
     124             : 
     125             : /**
     126             :  * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
     127             :  * @dev: Device to handle.
     128             :  */
     129             : static void pm_runtime_deactivate_timer(struct device *dev)
     130             : {
     131         553 :         if (dev->power.timer_expires > 0) {
     132           0 :                 hrtimer_try_to_cancel(&dev->power.suspend_timer);
     133           0 :                 dev->power.timer_expires = 0;
     134             :         }
     135             : }
     136             : 
     137             : /**
     138             :  * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
     139             :  * @dev: Device to handle.
     140             :  */
     141             : static void pm_runtime_cancel_pending(struct device *dev)
     142             : {
     143           0 :         pm_runtime_deactivate_timer(dev);
     144             :         /*
     145             :          * In case there's a request pending, make sure its work function will
     146             :          * return without doing anything.
     147             :          */
     148           0 :         dev->power.request = RPM_REQ_NONE;
     149             : }
     150             : 
     151             : /*
     152             :  * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
     153             :  * @dev: Device to handle.
     154             :  *
     155             :  * Compute the autosuspend-delay expiration time based on the device's
     156             :  * power.last_busy time.  If the delay has already expired or is disabled
     157             :  * (negative) or the power.use_autosuspend flag isn't set, return 0.
     158             :  * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
     159             :  *
     160             :  * This function may be called either with or without dev->power.lock held.
     161             :  * Either way it can be racy, since power.last_busy may be updated at any time.
     162             :  */
     163           0 : u64 pm_runtime_autosuspend_expiration(struct device *dev)
     164             : {
     165             :         int autosuspend_delay;
     166             :         u64 expires;
     167             : 
     168           0 :         if (!dev->power.use_autosuspend)
     169             :                 return 0;
     170             : 
     171           0 :         autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
     172           0 :         if (autosuspend_delay < 0)
     173             :                 return 0;
     174             : 
     175           0 :         expires  = READ_ONCE(dev->power.last_busy);
     176           0 :         expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
     177           0 :         if (expires > ktime_get_mono_fast_ns())
     178             :                 return expires; /* Expires in the future */
     179             : 
     180           0 :         return 0;
     181             : }
     182             : EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
     183             : 
     184           0 : static int dev_memalloc_noio(struct device *dev, void *data)
     185             : {
     186           0 :         return dev->power.memalloc_noio;
     187             : }
     188             : 
     189             : /*
     190             :  * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
     191             :  * @dev: Device to handle.
     192             :  * @enable: True for setting the flag and False for clearing the flag.
     193             :  *
     194             :  * Set the flag for all devices in the path from the device to the
     195             :  * root device in the device tree if @enable is true, otherwise clear
     196             :  * the flag for devices in the path whose siblings don't set the flag.
     197             :  *
     198             :  * The function should only be called by block device, or network
     199             :  * device driver for solving the deadlock problem during runtime
     200             :  * resume/suspend:
     201             :  *
     202             :  *     If memory allocation with GFP_KERNEL is called inside runtime
     203             :  *     resume/suspend callback of any one of its ancestors(or the
     204             :  *     block device itself), the deadlock may be triggered inside the
     205             :  *     memory allocation since it might not complete until the block
     206             :  *     device becomes active and the involed page I/O finishes. The
     207             :  *     situation is pointed out first by Alan Stern. Network device
     208             :  *     are involved in iSCSI kind of situation.
     209             :  *
     210             :  * The lock of dev_hotplug_mutex is held in the function for handling
     211             :  * hotplug race because pm_runtime_set_memalloc_noio() may be called
     212             :  * in async probe().
     213             :  *
     214             :  * The function should be called between device_add() and device_del()
     215             :  * on the affected device(block/network device).
     216             :  */
     217           0 : void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
     218             : {
     219             :         static DEFINE_MUTEX(dev_hotplug_mutex);
     220             : 
     221           0 :         mutex_lock(&dev_hotplug_mutex);
     222             :         for (;;) {
     223             :                 bool enabled;
     224             : 
     225             :                 /* hold power lock since bitfield is not SMP-safe. */
     226           0 :                 spin_lock_irq(&dev->power.lock);
     227           0 :                 enabled = dev->power.memalloc_noio;
     228           0 :                 dev->power.memalloc_noio = enable;
     229           0 :                 spin_unlock_irq(&dev->power.lock);
     230             : 
     231             :                 /*
     232             :                  * not need to enable ancestors any more if the device
     233             :                  * has been enabled.
     234             :                  */
     235           0 :                 if (enabled && enable)
     236             :                         break;
     237             : 
     238           0 :                 dev = dev->parent;
     239             : 
     240             :                 /*
     241             :                  * clear flag of the parent device only if all the
     242             :                  * children don't set the flag because ancestor's
     243             :                  * flag was set by any one of the descendants.
     244             :                  */
     245           0 :                 if (!dev || (!enable &&
     246           0 :                     device_for_each_child(dev, NULL, dev_memalloc_noio)))
     247             :                         break;
     248             :         }
     249           0 :         mutex_unlock(&dev_hotplug_mutex);
     250           0 : }
     251             : EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
     252             : 
     253             : /**
     254             :  * rpm_check_suspend_allowed - Test whether a device may be suspended.
     255             :  * @dev: Device to test.
     256             :  */
     257          72 : static int rpm_check_suspend_allowed(struct device *dev)
     258             : {
     259          72 :         int retval = 0;
     260             : 
     261          72 :         if (dev->power.runtime_error)
     262             :                 retval = -EINVAL;
     263          72 :         else if (dev->power.disable_depth > 0)
     264             :                 retval = -EACCES;
     265           0 :         else if (atomic_read(&dev->power.usage_count))
     266             :                 retval = -EAGAIN;
     267           0 :         else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count))
     268             :                 retval = -EBUSY;
     269             : 
     270             :         /* Pending resume requests take precedence over suspends. */
     271           0 :         else if ((dev->power.deferred_resume &&
     272           0 :             dev->power.runtime_status == RPM_SUSPENDING) ||
     273           0 :             (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME))
     274             :                 retval = -EAGAIN;
     275           0 :         else if (__dev_pm_qos_resume_latency(dev) == 0)
     276             :                 retval = -EPERM;
     277           0 :         else if (dev->power.runtime_status == RPM_SUSPENDED)
     278           0 :                 retval = 1;
     279             : 
     280          72 :         return retval;
     281             : }
     282             : 
     283           0 : static int rpm_get_suppliers(struct device *dev)
     284             : {
     285             :         struct device_link *link;
     286             : 
     287           0 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
     288             :                                 device_links_read_lock_held()) {
     289             :                 int retval;
     290             : 
     291           0 :                 if (!(link->flags & DL_FLAG_PM_RUNTIME))
     292           0 :                         continue;
     293             : 
     294           0 :                 retval = pm_runtime_get_sync(link->supplier);
     295             :                 /* Ignore suppliers with disabled runtime PM. */
     296           0 :                 if (retval < 0 && retval != -EACCES) {
     297           0 :                         pm_runtime_put_noidle(link->supplier);
     298             :                         return retval;
     299             :                 }
     300           0 :                 refcount_inc(&link->rpm_active);
     301             :         }
     302             :         return 0;
     303             : }
     304             : 
     305             : /**
     306             :  * pm_runtime_release_supplier - Drop references to device link's supplier.
     307             :  * @link: Target device link.
     308             :  *
     309             :  * Drop all runtime PM references associated with @link to its supplier device.
     310             :  */
     311           0 : void pm_runtime_release_supplier(struct device_link *link)
     312             : {
     313           0 :         struct device *supplier = link->supplier;
     314             : 
     315             :         /*
     316             :          * The additional power.usage_count check is a safety net in case
     317             :          * the rpm_active refcount becomes saturated, in which case
     318             :          * refcount_dec_not_one() would return true forever, but it is not
     319             :          * strictly necessary.
     320             :          */
     321           0 :         while (refcount_dec_not_one(&link->rpm_active) &&
     322           0 :                atomic_read(&supplier->power.usage_count) > 0)
     323             :                 pm_runtime_put_noidle(supplier);
     324           0 : }
     325             : 
     326           0 : static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
     327             : {
     328             :         struct device_link *link;
     329             : 
     330           0 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
     331             :                                 device_links_read_lock_held()) {
     332           0 :                 pm_runtime_release_supplier(link);
     333           0 :                 if (try_to_suspend)
     334           0 :                         pm_request_idle(link->supplier);
     335             :         }
     336           0 : }
     337             : 
     338           0 : static void rpm_put_suppliers(struct device *dev)
     339             : {
     340           0 :         __rpm_put_suppliers(dev, true);
     341           0 : }
     342             : 
     343           0 : static void rpm_suspend_suppliers(struct device *dev)
     344             : {
     345             :         struct device_link *link;
     346           0 :         int idx = device_links_read_lock();
     347             : 
     348           0 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
     349             :                                 device_links_read_lock_held())
     350           0 :                 pm_request_idle(link->supplier);
     351             : 
     352           0 :         device_links_read_unlock(idx);
     353           0 : }
     354             : 
     355             : /**
     356             :  * __rpm_callback - Run a given runtime PM callback for a given device.
     357             :  * @cb: Runtime PM callback to run.
     358             :  * @dev: Device to run the callback for.
     359             :  */
     360           0 : static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
     361             :         __releases(&dev->power.lock) __acquires(&dev->power.lock)
     362             : {
     363           0 :         int retval = 0, idx;
     364           0 :         bool use_links = dev->power.links_count > 0;
     365             : 
     366           0 :         if (dev->power.irq_safe) {
     367           0 :                 spin_unlock(&dev->power.lock);
     368             :         } else {
     369           0 :                 spin_unlock_irq(&dev->power.lock);
     370             : 
     371             :                 /*
     372             :                  * Resume suppliers if necessary.
     373             :                  *
     374             :                  * The device's runtime PM status cannot change until this
     375             :                  * routine returns, so it is safe to read the status outside of
     376             :                  * the lock.
     377             :                  */
     378           0 :                 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
     379           0 :                         idx = device_links_read_lock();
     380             : 
     381           0 :                         retval = rpm_get_suppliers(dev);
     382           0 :                         if (retval) {
     383           0 :                                 rpm_put_suppliers(dev);
     384           0 :                                 goto fail;
     385             :                         }
     386             : 
     387           0 :                         device_links_read_unlock(idx);
     388             :                 }
     389             :         }
     390             : 
     391           0 :         if (cb)
     392           0 :                 retval = cb(dev);
     393             : 
     394           0 :         if (dev->power.irq_safe) {
     395           0 :                 spin_lock(&dev->power.lock);
     396             :         } else {
     397             :                 /*
     398             :                  * If the device is suspending and the callback has returned
     399             :                  * success, drop the usage counters of the suppliers that have
     400             :                  * been reference counted on its resume.
     401             :                  *
     402             :                  * Do that if resume fails too.
     403             :                  */
     404           0 :                 if (use_links &&
     405           0 :                     ((dev->power.runtime_status == RPM_SUSPENDING && !retval) ||
     406           0 :                     (dev->power.runtime_status == RPM_RESUMING && retval))) {
     407           0 :                         idx = device_links_read_lock();
     408             : 
     409           0 :                         __rpm_put_suppliers(dev, false);
     410             : 
     411             : fail:
     412           0 :                         device_links_read_unlock(idx);
     413             :                 }
     414             : 
     415           0 :                 spin_lock_irq(&dev->power.lock);
     416             :         }
     417             : 
     418           0 :         return retval;
     419             : }
     420             : 
     421             : /**
     422             :  * rpm_callback - Run a given runtime PM callback for a given device.
     423             :  * @cb: Runtime PM callback to run.
     424             :  * @dev: Device to run the callback for.
     425             :  */
     426           0 : static int rpm_callback(int (*cb)(struct device *), struct device *dev)
     427             : {
     428             :         int retval;
     429             : 
     430           0 :         if (dev->power.memalloc_noio) {
     431             :                 unsigned int noio_flag;
     432             : 
     433             :                 /*
     434             :                  * Deadlock might be caused if memory allocation with
     435             :                  * GFP_KERNEL happens inside runtime_suspend and
     436             :                  * runtime_resume callbacks of one block device's
     437             :                  * ancestor or the block device itself. Network
     438             :                  * device might be thought as part of iSCSI block
     439             :                  * device, so network device and its ancestor should
     440             :                  * be marked as memalloc_noio too.
     441             :                  */
     442           0 :                 noio_flag = memalloc_noio_save();
     443           0 :                 retval = __rpm_callback(cb, dev);
     444             :                 memalloc_noio_restore(noio_flag);
     445             :         } else {
     446           0 :                 retval = __rpm_callback(cb, dev);
     447             :         }
     448             : 
     449           0 :         dev->power.runtime_error = retval;
     450           0 :         return retval != -EACCES ? retval : -EIO;
     451             : }
     452             : 
     453             : /**
     454             :  * rpm_idle - Notify device bus type if the device can be suspended.
     455             :  * @dev: Device to notify the bus type about.
     456             :  * @rpmflags: Flag bits.
     457             :  *
     458             :  * Check if the device's runtime PM status allows it to be suspended.  If
     459             :  * another idle notification has been started earlier, return immediately.  If
     460             :  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
     461             :  * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
     462             :  * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
     463             :  *
     464             :  * This function must be called under dev->power.lock with interrupts disabled.
     465             :  */
     466          72 : static int rpm_idle(struct device *dev, int rpmflags)
     467             : {
     468             :         int (*callback)(struct device *);
     469             :         int retval;
     470             : 
     471          72 :         trace_rpm_idle(dev, rpmflags);
     472          72 :         retval = rpm_check_suspend_allowed(dev);
     473          72 :         if (retval < 0)
     474             :                 ;       /* Conditions are wrong. */
     475             : 
     476             :         /* Idle notifications are allowed only in the RPM_ACTIVE state. */
     477           0 :         else if (dev->power.runtime_status != RPM_ACTIVE)
     478             :                 retval = -EAGAIN;
     479             : 
     480             :         /*
     481             :          * Any pending request other than an idle notification takes
     482             :          * precedence over us, except that the timer may be running.
     483             :          */
     484           0 :         else if (dev->power.request_pending &&
     485           0 :             dev->power.request > RPM_REQ_IDLE)
     486             :                 retval = -EAGAIN;
     487             : 
     488             :         /* Act as though RPM_NOWAIT is always set. */
     489           0 :         else if (dev->power.idle_notification)
     490           0 :                 retval = -EINPROGRESS;
     491             : 
     492          72 :         if (retval)
     493             :                 goto out;
     494             : 
     495             :         /* Pending requests need to be canceled. */
     496           0 :         dev->power.request = RPM_REQ_NONE;
     497             : 
     498           0 :         callback = RPM_GET_CALLBACK(dev, runtime_idle);
     499             : 
     500             :         /* If no callback assume success. */
     501           0 :         if (!callback || dev->power.no_callbacks)
     502             :                 goto out;
     503             : 
     504             :         /* Carry out an asynchronous or a synchronous idle notification. */
     505           0 :         if (rpmflags & RPM_ASYNC) {
     506           0 :                 dev->power.request = RPM_REQ_IDLE;
     507           0 :                 if (!dev->power.request_pending) {
     508           0 :                         dev->power.request_pending = true;
     509           0 :                         queue_work(pm_wq, &dev->power.work);
     510             :                 }
     511           0 :                 trace_rpm_return_int(dev, _THIS_IP_, 0);
     512           0 :                 return 0;
     513             :         }
     514             : 
     515           0 :         dev->power.idle_notification = true;
     516             : 
     517           0 :         if (dev->power.irq_safe)
     518           0 :                 spin_unlock(&dev->power.lock);
     519             :         else
     520           0 :                 spin_unlock_irq(&dev->power.lock);
     521             : 
     522           0 :         retval = callback(dev);
     523             : 
     524           0 :         if (dev->power.irq_safe)
     525           0 :                 spin_lock(&dev->power.lock);
     526             :         else
     527           0 :                 spin_lock_irq(&dev->power.lock);
     528             : 
     529           0 :         dev->power.idle_notification = false;
     530           0 :         wake_up_all(&dev->power.wait_queue);
     531             : 
     532             :  out:
     533          72 :         trace_rpm_return_int(dev, _THIS_IP_, retval);
     534          72 :         return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
     535             : }
     536             : 
     537             : /**
     538             :  * rpm_suspend - Carry out runtime suspend of given device.
     539             :  * @dev: Device to suspend.
     540             :  * @rpmflags: Flag bits.
     541             :  *
     542             :  * Check if the device's runtime PM status allows it to be suspended.
     543             :  * Cancel a pending idle notification, autosuspend or suspend. If
     544             :  * another suspend has been started earlier, either return immediately
     545             :  * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
     546             :  * flags. If the RPM_ASYNC flag is set then queue a suspend request;
     547             :  * otherwise run the ->runtime_suspend() callback directly. When
     548             :  * ->runtime_suspend succeeded, if a deferred resume was requested while
     549             :  * the callback was running then carry it out, otherwise send an idle
     550             :  * notification for its parent (if the suspend succeeded and both
     551             :  * ignore_children of parent->power and irq_safe of dev->power are not set).
     552             :  * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
     553             :  * flag is set and the next autosuspend-delay expiration time is in the
     554             :  * future, schedule another autosuspend attempt.
     555             :  *
     556             :  * This function must be called under dev->power.lock with interrupts disabled.
     557             :  */
     558           0 : static int rpm_suspend(struct device *dev, int rpmflags)
     559             :         __releases(&dev->power.lock) __acquires(&dev->power.lock)
     560             : {
     561             :         int (*callback)(struct device *);
     562           0 :         struct device *parent = NULL;
     563             :         int retval;
     564             : 
     565           0 :         trace_rpm_suspend(dev, rpmflags);
     566             : 
     567             :  repeat:
     568           0 :         retval = rpm_check_suspend_allowed(dev);
     569           0 :         if (retval < 0)
     570             :                 goto out;       /* Conditions are wrong. */
     571             : 
     572             :         /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
     573           0 :         if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
     574           0 :                 retval = -EAGAIN;
     575             : 
     576           0 :         if (retval)
     577             :                 goto out;
     578             : 
     579             :         /* If the autosuspend_delay time hasn't expired yet, reschedule. */
     580           0 :         if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) {
     581           0 :                 u64 expires = pm_runtime_autosuspend_expiration(dev);
     582             : 
     583           0 :                 if (expires != 0) {
     584             :                         /* Pending requests need to be canceled. */
     585           0 :                         dev->power.request = RPM_REQ_NONE;
     586             : 
     587             :                         /*
     588             :                          * Optimization: If the timer is already running and is
     589             :                          * set to expire at or before the autosuspend delay,
     590             :                          * avoid the overhead of resetting it.  Just let it
     591             :                          * expire; pm_suspend_timer_fn() will take care of the
     592             :                          * rest.
     593             :                          */
     594           0 :                         if (!(dev->power.timer_expires &&
     595             :                             dev->power.timer_expires <= expires)) {
     596             :                                 /*
     597             :                                  * We add a slack of 25% to gather wakeups
     598             :                                  * without sacrificing the granularity.
     599             :                                  */
     600           0 :                                 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
     601             :                                                     (NSEC_PER_MSEC >> 2);
     602             : 
     603           0 :                                 dev->power.timer_expires = expires;
     604           0 :                                 hrtimer_start_range_ns(&dev->power.suspend_timer,
     605             :                                                        ns_to_ktime(expires),
     606             :                                                        slack,
     607             :                                                        HRTIMER_MODE_ABS);
     608             :                         }
     609           0 :                         dev->power.timer_autosuspends = 1;
     610           0 :                         goto out;
     611             :                 }
     612             :         }
     613             : 
     614             :         /* Other scheduled or pending requests need to be canceled. */
     615           0 :         pm_runtime_cancel_pending(dev);
     616             : 
     617           0 :         if (dev->power.runtime_status == RPM_SUSPENDING) {
     618           0 :                 DEFINE_WAIT(wait);
     619             : 
     620           0 :                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
     621           0 :                         retval = -EINPROGRESS;
     622           0 :                         goto out;
     623             :                 }
     624             : 
     625           0 :                 if (dev->power.irq_safe) {
     626           0 :                         spin_unlock(&dev->power.lock);
     627             : 
     628             :                         cpu_relax();
     629             : 
     630           0 :                         spin_lock(&dev->power.lock);
     631           0 :                         goto repeat;
     632             :                 }
     633             : 
     634             :                 /* Wait for the other suspend running in parallel with us. */
     635             :                 for (;;) {
     636           0 :                         prepare_to_wait(&dev->power.wait_queue, &wait,
     637             :                                         TASK_UNINTERRUPTIBLE);
     638           0 :                         if (dev->power.runtime_status != RPM_SUSPENDING)
     639             :                                 break;
     640             : 
     641           0 :                         spin_unlock_irq(&dev->power.lock);
     642             : 
     643           0 :                         schedule();
     644             : 
     645           0 :                         spin_lock_irq(&dev->power.lock);
     646             :                 }
     647           0 :                 finish_wait(&dev->power.wait_queue, &wait);
     648           0 :                 goto repeat;
     649             :         }
     650             : 
     651           0 :         if (dev->power.no_callbacks)
     652             :                 goto no_callback;       /* Assume success. */
     653             : 
     654             :         /* Carry out an asynchronous or a synchronous suspend. */
     655           0 :         if (rpmflags & RPM_ASYNC) {
     656           0 :                 dev->power.request = (rpmflags & RPM_AUTO) ?
     657           0 :                     RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
     658           0 :                 if (!dev->power.request_pending) {
     659           0 :                         dev->power.request_pending = true;
     660           0 :                         queue_work(pm_wq, &dev->power.work);
     661             :                 }
     662             :                 goto out;
     663             :         }
     664             : 
     665           0 :         __update_runtime_status(dev, RPM_SUSPENDING);
     666             : 
     667           0 :         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
     668             : 
     669           0 :         dev_pm_enable_wake_irq_check(dev, true);
     670           0 :         retval = rpm_callback(callback, dev);
     671           0 :         if (retval)
     672             :                 goto fail;
     673             : 
     674           0 :         dev_pm_enable_wake_irq_complete(dev);
     675             : 
     676             :  no_callback:
     677           0 :         __update_runtime_status(dev, RPM_SUSPENDED);
     678           0 :         pm_runtime_deactivate_timer(dev);
     679             : 
     680           0 :         if (dev->parent) {
     681           0 :                 parent = dev->parent;
     682           0 :                 atomic_add_unless(&parent->power.child_count, -1, 0);
     683             :         }
     684           0 :         wake_up_all(&dev->power.wait_queue);
     685             : 
     686           0 :         if (dev->power.deferred_resume) {
     687           0 :                 dev->power.deferred_resume = false;
     688           0 :                 rpm_resume(dev, 0);
     689           0 :                 retval = -EAGAIN;
     690           0 :                 goto out;
     691             :         }
     692             : 
     693           0 :         if (dev->power.irq_safe)
     694             :                 goto out;
     695             : 
     696             :         /* Maybe the parent is now able to suspend. */
     697           0 :         if (parent && !parent->power.ignore_children) {
     698           0 :                 spin_unlock(&dev->power.lock);
     699             : 
     700           0 :                 spin_lock(&parent->power.lock);
     701           0 :                 rpm_idle(parent, RPM_ASYNC);
     702           0 :                 spin_unlock(&parent->power.lock);
     703             : 
     704           0 :                 spin_lock(&dev->power.lock);
     705             :         }
     706             :         /* Maybe the suppliers are now able to suspend. */
     707           0 :         if (dev->power.links_count > 0) {
     708           0 :                 spin_unlock_irq(&dev->power.lock);
     709             : 
     710           0 :                 rpm_suspend_suppliers(dev);
     711             : 
     712           0 :                 spin_lock_irq(&dev->power.lock);
     713             :         }
     714             : 
     715             :  out:
     716           0 :         trace_rpm_return_int(dev, _THIS_IP_, retval);
     717             : 
     718           0 :         return retval;
     719             : 
     720             :  fail:
     721           0 :         dev_pm_disable_wake_irq_check(dev, true);
     722           0 :         __update_runtime_status(dev, RPM_ACTIVE);
     723           0 :         dev->power.deferred_resume = false;
     724           0 :         wake_up_all(&dev->power.wait_queue);
     725             : 
     726           0 :         if (retval == -EAGAIN || retval == -EBUSY) {
     727           0 :                 dev->power.runtime_error = 0;
     728             : 
     729             :                 /*
     730             :                  * If the callback routine failed an autosuspend, and
     731             :                  * if the last_busy time has been updated so that there
     732             :                  * is a new autosuspend expiration time, automatically
     733             :                  * reschedule another autosuspend.
     734             :                  */
     735           0 :                 if ((rpmflags & RPM_AUTO) &&
     736           0 :                     pm_runtime_autosuspend_expiration(dev) != 0)
     737             :                         goto repeat;
     738             :         } else {
     739             :                 pm_runtime_cancel_pending(dev);
     740             :         }
     741             :         goto out;
     742             : }
     743             : 
     744             : /**
     745             :  * rpm_resume - Carry out runtime resume of given device.
     746             :  * @dev: Device to resume.
     747             :  * @rpmflags: Flag bits.
     748             :  *
     749             :  * Check if the device's runtime PM status allows it to be resumed.  Cancel
     750             :  * any scheduled or pending requests.  If another resume has been started
     751             :  * earlier, either return immediately or wait for it to finish, depending on the
     752             :  * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
     753             :  * parallel with this function, either tell the other process to resume after
     754             :  * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
     755             :  * flag is set then queue a resume request; otherwise run the
     756             :  * ->runtime_resume() callback directly.  Queue an idle notification for the
     757             :  * device if the resume succeeded.
     758             :  *
     759             :  * This function must be called under dev->power.lock with interrupts disabled.
     760             :  */
     761          51 : static int rpm_resume(struct device *dev, int rpmflags)
     762             :         __releases(&dev->power.lock) __acquires(&dev->power.lock)
     763             : {
     764             :         int (*callback)(struct device *);
     765          51 :         struct device *parent = NULL;
     766          51 :         int retval = 0;
     767             : 
     768          51 :         trace_rpm_resume(dev, rpmflags);
     769             : 
     770             :  repeat:
     771          51 :         if (dev->power.runtime_error) {
     772             :                 retval = -EINVAL;
     773          51 :         } else if (dev->power.disable_depth > 0) {
     774          51 :                 if (dev->power.runtime_status == RPM_ACTIVE &&
     775           0 :                     dev->power.last_status == RPM_ACTIVE)
     776             :                         retval = 1;
     777             :                 else
     778          51 :                         retval = -EACCES;
     779             :         }
     780          51 :         if (retval)
     781             :                 goto out;
     782             : 
     783             :         /*
     784             :          * Other scheduled or pending requests need to be canceled.  Small
     785             :          * optimization: If an autosuspend timer is running, leave it running
     786             :          * rather than cancelling it now only to restart it again in the near
     787             :          * future.
     788             :          */
     789           0 :         dev->power.request = RPM_REQ_NONE;
     790           0 :         if (!dev->power.timer_autosuspends)
     791             :                 pm_runtime_deactivate_timer(dev);
     792             : 
     793           0 :         if (dev->power.runtime_status == RPM_ACTIVE) {
     794             :                 retval = 1;
     795             :                 goto out;
     796             :         }
     797             : 
     798           0 :         if (dev->power.runtime_status == RPM_RESUMING ||
     799             :             dev->power.runtime_status == RPM_SUSPENDING) {
     800           0 :                 DEFINE_WAIT(wait);
     801             : 
     802           0 :                 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
     803           0 :                         if (dev->power.runtime_status == RPM_SUSPENDING) {
     804           0 :                                 dev->power.deferred_resume = true;
     805           0 :                                 if (rpmflags & RPM_NOWAIT)
     806           0 :                                         retval = -EINPROGRESS;
     807             :                         } else {
     808             :                                 retval = -EINPROGRESS;
     809             :                         }
     810           0 :                         goto out;
     811             :                 }
     812             : 
     813           0 :                 if (dev->power.irq_safe) {
     814           0 :                         spin_unlock(&dev->power.lock);
     815             : 
     816             :                         cpu_relax();
     817             : 
     818           0 :                         spin_lock(&dev->power.lock);
     819           0 :                         goto repeat;
     820             :                 }
     821             : 
     822             :                 /* Wait for the operation carried out in parallel with us. */
     823             :                 for (;;) {
     824           0 :                         prepare_to_wait(&dev->power.wait_queue, &wait,
     825             :                                         TASK_UNINTERRUPTIBLE);
     826           0 :                         if (dev->power.runtime_status != RPM_RESUMING &&
     827             :                             dev->power.runtime_status != RPM_SUSPENDING)
     828             :                                 break;
     829             : 
     830           0 :                         spin_unlock_irq(&dev->power.lock);
     831             : 
     832           0 :                         schedule();
     833             : 
     834           0 :                         spin_lock_irq(&dev->power.lock);
     835             :                 }
     836           0 :                 finish_wait(&dev->power.wait_queue, &wait);
     837           0 :                 goto repeat;
     838             :         }
     839             : 
     840             :         /*
     841             :          * See if we can skip waking up the parent.  This is safe only if
     842             :          * power.no_callbacks is set, because otherwise we don't know whether
     843             :          * the resume will actually succeed.
     844             :          */
     845           0 :         if (dev->power.no_callbacks && !parent && dev->parent) {
     846           0 :                 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
     847           0 :                 if (dev->parent->power.disable_depth > 0 ||
     848           0 :                     dev->parent->power.ignore_children ||
     849           0 :                     dev->parent->power.runtime_status == RPM_ACTIVE) {
     850           0 :                         atomic_inc(&dev->parent->power.child_count);
     851           0 :                         spin_unlock(&dev->parent->power.lock);
     852           0 :                         retval = 1;
     853           0 :                         goto no_callback;       /* Assume success. */
     854             :                 }
     855           0 :                 spin_unlock(&dev->parent->power.lock);
     856             :         }
     857             : 
     858             :         /* Carry out an asynchronous or a synchronous resume. */
     859           0 :         if (rpmflags & RPM_ASYNC) {
     860           0 :                 dev->power.request = RPM_REQ_RESUME;
     861           0 :                 if (!dev->power.request_pending) {
     862           0 :                         dev->power.request_pending = true;
     863           0 :                         queue_work(pm_wq, &dev->power.work);
     864             :                 }
     865             :                 retval = 0;
     866             :                 goto out;
     867             :         }
     868             : 
     869           0 :         if (!parent && dev->parent) {
     870             :                 /*
     871             :                  * Increment the parent's usage counter and resume it if
     872             :                  * necessary.  Not needed if dev is irq-safe; then the
     873             :                  * parent is permanently resumed.
     874             :                  */
     875           0 :                 parent = dev->parent;
     876           0 :                 if (dev->power.irq_safe)
     877             :                         goto skip_parent;
     878             : 
     879           0 :                 spin_unlock(&dev->power.lock);
     880             : 
     881           0 :                 pm_runtime_get_noresume(parent);
     882             : 
     883           0 :                 spin_lock(&parent->power.lock);
     884             :                 /*
     885             :                  * Resume the parent if it has runtime PM enabled and not been
     886             :                  * set to ignore its children.
     887             :                  */
     888           0 :                 if (!parent->power.disable_depth &&
     889           0 :                     !parent->power.ignore_children) {
     890           0 :                         rpm_resume(parent, 0);
     891           0 :                         if (parent->power.runtime_status != RPM_ACTIVE)
     892           0 :                                 retval = -EBUSY;
     893             :                 }
     894           0 :                 spin_unlock(&parent->power.lock);
     895             : 
     896           0 :                 spin_lock(&dev->power.lock);
     897           0 :                 if (retval)
     898             :                         goto out;
     899             : 
     900             :                 goto repeat;
     901             :         }
     902             :  skip_parent:
     903             : 
     904           0 :         if (dev->power.no_callbacks)
     905             :                 goto no_callback;       /* Assume success. */
     906             : 
     907           0 :         __update_runtime_status(dev, RPM_RESUMING);
     908             : 
     909           0 :         callback = RPM_GET_CALLBACK(dev, runtime_resume);
     910             : 
     911           0 :         dev_pm_disable_wake_irq_check(dev, false);
     912           0 :         retval = rpm_callback(callback, dev);
     913           0 :         if (retval) {
     914           0 :                 __update_runtime_status(dev, RPM_SUSPENDED);
     915           0 :                 pm_runtime_cancel_pending(dev);
     916           0 :                 dev_pm_enable_wake_irq_check(dev, false);
     917             :         } else {
     918             :  no_callback:
     919           0 :                 __update_runtime_status(dev, RPM_ACTIVE);
     920           0 :                 pm_runtime_mark_last_busy(dev);
     921           0 :                 if (parent)
     922           0 :                         atomic_inc(&parent->power.child_count);
     923             :         }
     924           0 :         wake_up_all(&dev->power.wait_queue);
     925             : 
     926           0 :         if (retval >= 0)
     927           0 :                 rpm_idle(dev, RPM_ASYNC);
     928             : 
     929             :  out:
     930          51 :         if (parent && !dev->power.irq_safe) {
     931           0 :                 spin_unlock_irq(&dev->power.lock);
     932             : 
     933           0 :                 pm_runtime_put(parent);
     934             : 
     935           0 :                 spin_lock_irq(&dev->power.lock);
     936             :         }
     937             : 
     938          51 :         trace_rpm_return_int(dev, _THIS_IP_, retval);
     939             : 
     940          51 :         return retval;
     941             : }
     942             : 
     943             : /**
     944             :  * pm_runtime_work - Universal runtime PM work function.
     945             :  * @work: Work structure used for scheduling the execution of this function.
     946             :  *
     947             :  * Use @work to get the device object the work is to be done for, determine what
     948             :  * is to be done and execute the appropriate runtime PM function.
     949             :  */
     950           0 : static void pm_runtime_work(struct work_struct *work)
     951             : {
     952           0 :         struct device *dev = container_of(work, struct device, power.work);
     953             :         enum rpm_request req;
     954             : 
     955           0 :         spin_lock_irq(&dev->power.lock);
     956             : 
     957           0 :         if (!dev->power.request_pending)
     958             :                 goto out;
     959             : 
     960           0 :         req = dev->power.request;
     961           0 :         dev->power.request = RPM_REQ_NONE;
     962           0 :         dev->power.request_pending = false;
     963             : 
     964           0 :         switch (req) {
     965             :         case RPM_REQ_NONE:
     966             :                 break;
     967             :         case RPM_REQ_IDLE:
     968           0 :                 rpm_idle(dev, RPM_NOWAIT);
     969           0 :                 break;
     970             :         case RPM_REQ_SUSPEND:
     971           0 :                 rpm_suspend(dev, RPM_NOWAIT);
     972           0 :                 break;
     973             :         case RPM_REQ_AUTOSUSPEND:
     974           0 :                 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
     975           0 :                 break;
     976             :         case RPM_REQ_RESUME:
     977           0 :                 rpm_resume(dev, RPM_NOWAIT);
     978           0 :                 break;
     979             :         }
     980             : 
     981             :  out:
     982           0 :         spin_unlock_irq(&dev->power.lock);
     983           0 : }
     984             : 
     985             : /**
     986             :  * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
     987             :  * @timer: hrtimer used by pm_schedule_suspend().
     988             :  *
     989             :  * Check if the time is right and queue a suspend request.
     990             :  */
     991           0 : static enum hrtimer_restart  pm_suspend_timer_fn(struct hrtimer *timer)
     992             : {
     993           0 :         struct device *dev = container_of(timer, struct device, power.suspend_timer);
     994             :         unsigned long flags;
     995             :         u64 expires;
     996             : 
     997           0 :         spin_lock_irqsave(&dev->power.lock, flags);
     998             : 
     999           0 :         expires = dev->power.timer_expires;
    1000             :         /*
    1001             :          * If 'expires' is after the current time, we've been called
    1002             :          * too early.
    1003             :          */
    1004           0 :         if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
    1005           0 :                 dev->power.timer_expires = 0;
    1006           0 :                 rpm_suspend(dev, dev->power.timer_autosuspends ?
    1007             :                     (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
    1008             :         }
    1009             : 
    1010           0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1011             : 
    1012           0 :         return HRTIMER_NORESTART;
    1013             : }
    1014             : 
    1015             : /**
    1016             :  * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
    1017             :  * @dev: Device to suspend.
    1018             :  * @delay: Time to wait before submitting a suspend request, in milliseconds.
    1019             :  */
    1020           0 : int pm_schedule_suspend(struct device *dev, unsigned int delay)
    1021             : {
    1022             :         unsigned long flags;
    1023             :         u64 expires;
    1024             :         int retval;
    1025             : 
    1026           0 :         spin_lock_irqsave(&dev->power.lock, flags);
    1027             : 
    1028           0 :         if (!delay) {
    1029           0 :                 retval = rpm_suspend(dev, RPM_ASYNC);
    1030           0 :                 goto out;
    1031             :         }
    1032             : 
    1033           0 :         retval = rpm_check_suspend_allowed(dev);
    1034           0 :         if (retval)
    1035             :                 goto out;
    1036             : 
    1037             :         /* Other scheduled or pending requests need to be canceled. */
    1038           0 :         pm_runtime_cancel_pending(dev);
    1039             : 
    1040           0 :         expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
    1041           0 :         dev->power.timer_expires = expires;
    1042           0 :         dev->power.timer_autosuspends = 0;
    1043           0 :         hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
    1044             : 
    1045             :  out:
    1046           0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1047             : 
    1048           0 :         return retval;
    1049             : }
    1050             : EXPORT_SYMBOL_GPL(pm_schedule_suspend);
    1051             : 
    1052             : static int rpm_drop_usage_count(struct device *dev)
    1053             : {
    1054             :         int ret;
    1055             : 
    1056         102 :         ret = atomic_sub_return(1, &dev->power.usage_count);
    1057          51 :         if (ret >= 0)
    1058             :                 return ret;
    1059             : 
    1060             :         /*
    1061             :          * Because rpm_resume() does not check the usage counter, it will resume
    1062             :          * the device even if the usage counter is 0 or negative, so it is
    1063             :          * sufficient to increment the usage counter here to reverse the change
    1064             :          * made above.
    1065             :          */
    1066           0 :         atomic_inc(&dev->power.usage_count);
    1067           0 :         dev_warn(dev, "Runtime PM usage count underflow!\n");
    1068             :         return -EINVAL;
    1069             : }
    1070             : 
    1071             : /**
    1072             :  * __pm_runtime_idle - Entry point for runtime idle operations.
    1073             :  * @dev: Device to send idle notification for.
    1074             :  * @rpmflags: Flag bits.
    1075             :  *
    1076             :  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
    1077             :  * return immediately if it is larger than zero (if it becomes negative, log a
    1078             :  * warning, increment it, and return an error).  Then carry out an idle
    1079             :  * notification, either synchronous or asynchronous.
    1080             :  *
    1081             :  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
    1082             :  * or if pm_runtime_irq_safe() has been called.
    1083             :  */
    1084          89 : int __pm_runtime_idle(struct device *dev, int rpmflags)
    1085             : {
    1086             :         unsigned long flags;
    1087             :         int retval;
    1088             : 
    1089          89 :         if (rpmflags & RPM_GET_PUT) {
    1090          51 :                 retval = rpm_drop_usage_count(dev);
    1091          51 :                 if (retval < 0) {
    1092             :                         return retval;
    1093          51 :                 } else if (retval > 0) {
    1094             :                         trace_rpm_usage(dev, rpmflags);
    1095             :                         return 0;
    1096             :                 }
    1097             :         }
    1098             : 
    1099             :         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
    1100             : 
    1101          72 :         spin_lock_irqsave(&dev->power.lock, flags);
    1102          72 :         retval = rpm_idle(dev, rpmflags);
    1103         144 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1104             : 
    1105          72 :         return retval;
    1106             : }
    1107             : EXPORT_SYMBOL_GPL(__pm_runtime_idle);
    1108             : 
    1109             : /**
    1110             :  * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
    1111             :  * @dev: Device to suspend.
    1112             :  * @rpmflags: Flag bits.
    1113             :  *
    1114             :  * If the RPM_GET_PUT flag is set, decrement the device's usage count and
    1115             :  * return immediately if it is larger than zero (if it becomes negative, log a
    1116             :  * warning, increment it, and return an error).  Then carry out a suspend,
    1117             :  * either synchronous or asynchronous.
    1118             :  *
    1119             :  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
    1120             :  * or if pm_runtime_irq_safe() has been called.
    1121             :  */
    1122           0 : int __pm_runtime_suspend(struct device *dev, int rpmflags)
    1123             : {
    1124             :         unsigned long flags;
    1125             :         int retval;
    1126             : 
    1127           0 :         if (rpmflags & RPM_GET_PUT) {
    1128           0 :                 retval = rpm_drop_usage_count(dev);
    1129           0 :                 if (retval < 0) {
    1130             :                         return retval;
    1131           0 :                 } else if (retval > 0) {
    1132             :                         trace_rpm_usage(dev, rpmflags);
    1133             :                         return 0;
    1134             :                 }
    1135             :         }
    1136             : 
    1137             :         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
    1138             : 
    1139           0 :         spin_lock_irqsave(&dev->power.lock, flags);
    1140           0 :         retval = rpm_suspend(dev, rpmflags);
    1141           0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1142             : 
    1143           0 :         return retval;
    1144             : }
    1145             : EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
    1146             : 
    1147             : /**
    1148             :  * __pm_runtime_resume - Entry point for runtime resume operations.
    1149             :  * @dev: Device to resume.
    1150             :  * @rpmflags: Flag bits.
    1151             :  *
    1152             :  * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
    1153             :  * carry out a resume, either synchronous or asynchronous.
    1154             :  *
    1155             :  * This routine may be called in atomic context if the RPM_ASYNC flag is set,
    1156             :  * or if pm_runtime_irq_safe() has been called.
    1157             :  */
    1158          51 : int __pm_runtime_resume(struct device *dev, int rpmflags)
    1159             : {
    1160             :         unsigned long flags;
    1161             :         int retval;
    1162             : 
    1163             :         might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
    1164             :                         dev->power.runtime_status != RPM_ACTIVE);
    1165             : 
    1166          51 :         if (rpmflags & RPM_GET_PUT)
    1167          51 :                 atomic_inc(&dev->power.usage_count);
    1168             : 
    1169          51 :         spin_lock_irqsave(&dev->power.lock, flags);
    1170          51 :         retval = rpm_resume(dev, rpmflags);
    1171         102 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1172             : 
    1173          51 :         return retval;
    1174             : }
    1175             : EXPORT_SYMBOL_GPL(__pm_runtime_resume);
    1176             : 
    1177             : /**
    1178             :  * pm_runtime_get_if_active - Conditionally bump up device usage counter.
    1179             :  * @dev: Device to handle.
    1180             :  * @ign_usage_count: Whether or not to look at the current usage counter value.
    1181             :  *
    1182             :  * Return -EINVAL if runtime PM is disabled for @dev.
    1183             :  *
    1184             :  * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
    1185             :  * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
    1186             :  * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
    1187             :  * without changing the usage counter.
    1188             :  *
    1189             :  * If @ign_usage_count is %true, this function can be used to prevent suspending
    1190             :  * the device when its runtime PM status is %RPM_ACTIVE.
    1191             :  *
    1192             :  * If @ign_usage_count is %false, this function can be used to prevent
    1193             :  * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
    1194             :  * runtime PM usage counter is not zero.
    1195             :  *
    1196             :  * The caller is responsible for decrementing the runtime PM usage counter of
    1197             :  * @dev after this function has returned a positive value for it.
    1198             :  */
    1199           0 : int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
    1200             : {
    1201             :         unsigned long flags;
    1202             :         int retval;
    1203             : 
    1204           0 :         spin_lock_irqsave(&dev->power.lock, flags);
    1205           0 :         if (dev->power.disable_depth > 0) {
    1206             :                 retval = -EINVAL;
    1207           0 :         } else if (dev->power.runtime_status != RPM_ACTIVE) {
    1208             :                 retval = 0;
    1209           0 :         } else if (ign_usage_count) {
    1210           0 :                 retval = 1;
    1211           0 :                 atomic_inc(&dev->power.usage_count);
    1212             :         } else {
    1213           0 :                 retval = atomic_inc_not_zero(&dev->power.usage_count);
    1214             :         }
    1215           0 :         trace_rpm_usage(dev, 0);
    1216           0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1217             : 
    1218           0 :         return retval;
    1219             : }
    1220             : EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
    1221             : 
    1222             : /**
    1223             :  * __pm_runtime_set_status - Set runtime PM status of a device.
    1224             :  * @dev: Device to handle.
    1225             :  * @status: New runtime PM status of the device.
    1226             :  *
    1227             :  * If runtime PM of the device is disabled or its power.runtime_error field is
    1228             :  * different from zero, the status may be changed either to RPM_ACTIVE, or to
    1229             :  * RPM_SUSPENDED, as long as that reflects the actual state of the device.
    1230             :  * However, if the device has a parent and the parent is not active, and the
    1231             :  * parent's power.ignore_children flag is unset, the device's status cannot be
    1232             :  * set to RPM_ACTIVE, so -EBUSY is returned in that case.
    1233             :  *
    1234             :  * If successful, __pm_runtime_set_status() clears the power.runtime_error field
    1235             :  * and the device parent's counter of unsuspended children is modified to
    1236             :  * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
    1237             :  * notification request for the parent is submitted.
    1238             :  *
    1239             :  * If @dev has any suppliers (as reflected by device links to them), and @status
    1240             :  * is RPM_ACTIVE, they will be activated upfront and if the activation of one
    1241             :  * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
    1242             :  * of the @status value) and the suppliers will be deacticated on exit.  The
    1243             :  * error returned by the failing supplier activation will be returned in that
    1244             :  * case.
    1245             :  */
    1246           0 : int __pm_runtime_set_status(struct device *dev, unsigned int status)
    1247             : {
    1248           0 :         struct device *parent = dev->parent;
    1249           0 :         bool notify_parent = false;
    1250             :         unsigned long flags;
    1251           0 :         int error = 0;
    1252             : 
    1253           0 :         if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
    1254             :                 return -EINVAL;
    1255             : 
    1256           0 :         spin_lock_irqsave(&dev->power.lock, flags);
    1257             : 
    1258             :         /*
    1259             :          * Prevent PM-runtime from being enabled for the device or return an
    1260             :          * error if it is enabled already and working.
    1261             :          */
    1262           0 :         if (dev->power.runtime_error || dev->power.disable_depth)
    1263           0 :                 dev->power.disable_depth++;
    1264             :         else
    1265             :                 error = -EAGAIN;
    1266             : 
    1267           0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1268             : 
    1269           0 :         if (error)
    1270             :                 return error;
    1271             : 
    1272             :         /*
    1273             :          * If the new status is RPM_ACTIVE, the suppliers can be activated
    1274             :          * upfront regardless of the current status, because next time
    1275             :          * rpm_put_suppliers() runs, the rpm_active refcounts of the links
    1276             :          * involved will be dropped down to one anyway.
    1277             :          */
    1278           0 :         if (status == RPM_ACTIVE) {
    1279           0 :                 int idx = device_links_read_lock();
    1280             : 
    1281           0 :                 error = rpm_get_suppliers(dev);
    1282           0 :                 if (error)
    1283           0 :                         status = RPM_SUSPENDED;
    1284             : 
    1285           0 :                 device_links_read_unlock(idx);
    1286             :         }
    1287             : 
    1288           0 :         spin_lock_irqsave(&dev->power.lock, flags);
    1289             : 
    1290           0 :         if (dev->power.runtime_status == status || !parent)
    1291             :                 goto out_set;
    1292             : 
    1293           0 :         if (status == RPM_SUSPENDED) {
    1294           0 :                 atomic_add_unless(&parent->power.child_count, -1, 0);
    1295           0 :                 notify_parent = !parent->power.ignore_children;
    1296             :         } else {
    1297           0 :                 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
    1298             : 
    1299             :                 /*
    1300             :                  * It is invalid to put an active child under a parent that is
    1301             :                  * not active, has runtime PM enabled and the
    1302             :                  * 'power.ignore_children' flag unset.
    1303             :                  */
    1304           0 :                 if (!parent->power.disable_depth &&
    1305           0 :                     !parent->power.ignore_children &&
    1306           0 :                     parent->power.runtime_status != RPM_ACTIVE) {
    1307           0 :                         dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
    1308             :                                 dev_name(dev),
    1309             :                                 dev_name(parent));
    1310           0 :                         error = -EBUSY;
    1311           0 :                 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
    1312           0 :                         atomic_inc(&parent->power.child_count);
    1313             :                 }
    1314             : 
    1315           0 :                 spin_unlock(&parent->power.lock);
    1316             : 
    1317           0 :                 if (error) {
    1318             :                         status = RPM_SUSPENDED;
    1319             :                         goto out;
    1320             :                 }
    1321             :         }
    1322             : 
    1323             :  out_set:
    1324           0 :         __update_runtime_status(dev, status);
    1325           0 :         if (!error)
    1326           0 :                 dev->power.runtime_error = 0;
    1327             : 
    1328             :  out:
    1329           0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1330             : 
    1331           0 :         if (notify_parent)
    1332             :                 pm_request_idle(parent);
    1333             : 
    1334           0 :         if (status == RPM_SUSPENDED) {
    1335           0 :                 int idx = device_links_read_lock();
    1336             : 
    1337           0 :                 rpm_put_suppliers(dev);
    1338             : 
    1339           0 :                 device_links_read_unlock(idx);
    1340             :         }
    1341             : 
    1342           0 :         pm_runtime_enable(dev);
    1343             : 
    1344           0 :         return error;
    1345             : }
    1346             : EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
    1347             : 
    1348             : /**
    1349             :  * __pm_runtime_barrier - Cancel pending requests and wait for completions.
    1350             :  * @dev: Device to handle.
    1351             :  *
    1352             :  * Flush all pending requests for the device from pm_wq and wait for all
    1353             :  * runtime PM operations involving the device in progress to complete.
    1354             :  *
    1355             :  * Should be called under dev->power.lock with interrupts disabled.
    1356             :  */
    1357         553 : static void __pm_runtime_barrier(struct device *dev)
    1358             : {
    1359         553 :         pm_runtime_deactivate_timer(dev);
    1360             : 
    1361         553 :         if (dev->power.request_pending) {
    1362           0 :                 dev->power.request = RPM_REQ_NONE;
    1363           0 :                 spin_unlock_irq(&dev->power.lock);
    1364             : 
    1365           0 :                 cancel_work_sync(&dev->power.work);
    1366             : 
    1367           0 :                 spin_lock_irq(&dev->power.lock);
    1368           0 :                 dev->power.request_pending = false;
    1369             :         }
    1370             : 
    1371         553 :         if (dev->power.runtime_status == RPM_SUSPENDING ||
    1372         553 :             dev->power.runtime_status == RPM_RESUMING ||
    1373             :             dev->power.idle_notification) {
    1374           0 :                 DEFINE_WAIT(wait);
    1375             : 
    1376             :                 /* Suspend, wake-up or idle notification in progress. */
    1377             :                 for (;;) {
    1378           0 :                         prepare_to_wait(&dev->power.wait_queue, &wait,
    1379             :                                         TASK_UNINTERRUPTIBLE);
    1380           0 :                         if (dev->power.runtime_status != RPM_SUSPENDING
    1381           0 :                             && dev->power.runtime_status != RPM_RESUMING
    1382           0 :                             && !dev->power.idle_notification)
    1383             :                                 break;
    1384           0 :                         spin_unlock_irq(&dev->power.lock);
    1385             : 
    1386           0 :                         schedule();
    1387             : 
    1388           0 :                         spin_lock_irq(&dev->power.lock);
    1389             :                 }
    1390           0 :                 finish_wait(&dev->power.wait_queue, &wait);
    1391             :         }
    1392         553 : }
    1393             : 
    1394             : /**
    1395             :  * pm_runtime_barrier - Flush pending requests and wait for completions.
    1396             :  * @dev: Device to handle.
    1397             :  *
    1398             :  * Prevent the device from being suspended by incrementing its usage counter and
    1399             :  * if there's a pending resume request for the device, wake the device up.
    1400             :  * Next, make sure that all pending requests for the device have been flushed
    1401             :  * from pm_wq and wait for all runtime PM operations involving the device in
    1402             :  * progress to complete.
    1403             :  *
    1404             :  * Return value:
    1405             :  * 1, if there was a resume request pending and the device had to be woken up,
    1406             :  * 0, otherwise
    1407             :  */
    1408         553 : int pm_runtime_barrier(struct device *dev)
    1409             : {
    1410         553 :         int retval = 0;
    1411             : 
    1412         553 :         pm_runtime_get_noresume(dev);
    1413        1106 :         spin_lock_irq(&dev->power.lock);
    1414             : 
    1415         553 :         if (dev->power.request_pending
    1416           0 :             && dev->power.request == RPM_REQ_RESUME) {
    1417           0 :                 rpm_resume(dev, 0);
    1418           0 :                 retval = 1;
    1419             :         }
    1420             : 
    1421         553 :         __pm_runtime_barrier(dev);
    1422             : 
    1423        1106 :         spin_unlock_irq(&dev->power.lock);
    1424         553 :         pm_runtime_put_noidle(dev);
    1425             : 
    1426         553 :         return retval;
    1427             : }
    1428             : EXPORT_SYMBOL_GPL(pm_runtime_barrier);
    1429             : 
    1430             : /**
    1431             :  * __pm_runtime_disable - Disable runtime PM of a device.
    1432             :  * @dev: Device to handle.
    1433             :  * @check_resume: If set, check if there's a resume request for the device.
    1434             :  *
    1435             :  * Increment power.disable_depth for the device and if it was zero previously,
    1436             :  * cancel all pending runtime PM requests for the device and wait for all
    1437             :  * operations in progress to complete.  The device can be either active or
    1438             :  * suspended after its runtime PM has been disabled.
    1439             :  *
    1440             :  * If @check_resume is set and there's a resume request pending when
    1441             :  * __pm_runtime_disable() is called and power.disable_depth is zero, the
    1442             :  * function will wake up the device before disabling its runtime PM.
    1443             :  */
    1444          18 : void __pm_runtime_disable(struct device *dev, bool check_resume)
    1445             : {
    1446          36 :         spin_lock_irq(&dev->power.lock);
    1447             : 
    1448          18 :         if (dev->power.disable_depth > 0) {
    1449          18 :                 dev->power.disable_depth++;
    1450          18 :                 goto out;
    1451             :         }
    1452             : 
    1453             :         /*
    1454             :          * Wake up the device if there's a resume request pending, because that
    1455             :          * means there probably is some I/O to process and disabling runtime PM
    1456             :          * shouldn't prevent the device from processing the I/O.
    1457             :          */
    1458           0 :         if (check_resume && dev->power.request_pending &&
    1459           0 :             dev->power.request == RPM_REQ_RESUME) {
    1460             :                 /*
    1461             :                  * Prevent suspends and idle notifications from being carried
    1462             :                  * out after we have woken up the device.
    1463             :                  */
    1464           0 :                 pm_runtime_get_noresume(dev);
    1465             : 
    1466           0 :                 rpm_resume(dev, 0);
    1467             : 
    1468             :                 pm_runtime_put_noidle(dev);
    1469             :         }
    1470             : 
    1471             :         /* Update time accounting before disabling PM-runtime. */
    1472           0 :         update_pm_runtime_accounting(dev);
    1473             : 
    1474           0 :         if (!dev->power.disable_depth++) {
    1475           0 :                 __pm_runtime_barrier(dev);
    1476           0 :                 dev->power.last_status = dev->power.runtime_status;
    1477             :         }
    1478             : 
    1479             :  out:
    1480          36 :         spin_unlock_irq(&dev->power.lock);
    1481          18 : }
    1482             : EXPORT_SYMBOL_GPL(__pm_runtime_disable);
    1483             : 
    1484             : /**
    1485             :  * pm_runtime_enable - Enable runtime PM of a device.
    1486             :  * @dev: Device to handle.
    1487             :  */
    1488           0 : void pm_runtime_enable(struct device *dev)
    1489             : {
    1490             :         unsigned long flags;
    1491             : 
    1492           0 :         spin_lock_irqsave(&dev->power.lock, flags);
    1493             : 
    1494           0 :         if (!dev->power.disable_depth) {
    1495           0 :                 dev_warn(dev, "Unbalanced %s!\n", __func__);
    1496           0 :                 goto out;
    1497             :         }
    1498             : 
    1499           0 :         if (--dev->power.disable_depth > 0)
    1500             :                 goto out;
    1501             : 
    1502           0 :         dev->power.last_status = RPM_INVALID;
    1503           0 :         dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
    1504             : 
    1505           0 :         if (dev->power.runtime_status == RPM_SUSPENDED &&
    1506           0 :             !dev->power.ignore_children &&
    1507           0 :             atomic_read(&dev->power.child_count) > 0)
    1508           0 :                 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
    1509             : 
    1510             : out:
    1511           0 :         spin_unlock_irqrestore(&dev->power.lock, flags);
    1512           0 : }
    1513             : EXPORT_SYMBOL_GPL(pm_runtime_enable);
    1514             : 
    1515           0 : static void pm_runtime_disable_action(void *data)
    1516             : {
    1517           0 :         pm_runtime_dont_use_autosuspend(data);
    1518           0 :         pm_runtime_disable(data);
    1519           0 : }
    1520             : 
    1521             : /**
    1522             :  * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
    1523             :  *
    1524             :  * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
    1525             :  * you at driver exit time if needed.
    1526             :  *
    1527             :  * @dev: Device to handle.
    1528             :  */
    1529           0 : int devm_pm_runtime_enable(struct device *dev)
    1530             : {
    1531           0 :         pm_runtime_enable(dev);
    1532             : 
    1533           0 :         return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
    1534             : }
    1535             : EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
    1536             : 
    1537             : /**
    1538             :  * pm_runtime_forbid - Block runtime PM of a device.
    1539             :  * @dev: Device to handle.
    1540             :  *
    1541             :  * Increase the device's usage count and clear its power.runtime_auto flag,
    1542             :  * so that it cannot be suspended at run time until pm_runtime_allow() is called
    1543             :  * for it.
    1544             :  */
    1545           0 : void pm_runtime_forbid(struct device *dev)
    1546             : {
    1547           0 :         spin_lock_irq(&dev->power.lock);
    1548           0 :         if (!dev->power.runtime_auto)
    1549             :                 goto out;
    1550             : 
    1551           0 :         dev->power.runtime_auto = false;
    1552           0 :         atomic_inc(&dev->power.usage_count);
    1553           0 :         rpm_resume(dev, 0);
    1554             : 
    1555             :  out:
    1556           0 :         spin_unlock_irq(&dev->power.lock);
    1557           0 : }
    1558             : EXPORT_SYMBOL_GPL(pm_runtime_forbid);
    1559             : 
    1560             : /**
    1561             :  * pm_runtime_allow - Unblock runtime PM of a device.
    1562             :  * @dev: Device to handle.
    1563             :  *
    1564             :  * Decrease the device's usage count and set its power.runtime_auto flag.
    1565             :  */
    1566           0 : void pm_runtime_allow(struct device *dev)
    1567             : {
    1568             :         int ret;
    1569             : 
    1570           0 :         spin_lock_irq(&dev->power.lock);
    1571           0 :         if (dev->power.runtime_auto)
    1572             :                 goto out;
    1573             : 
    1574           0 :         dev->power.runtime_auto = true;
    1575           0 :         ret = rpm_drop_usage_count(dev);
    1576           0 :         if (ret == 0)
    1577           0 :                 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
    1578             :         else if (ret > 0)
    1579             :                 trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
    1580             : 
    1581             :  out:
    1582           0 :         spin_unlock_irq(&dev->power.lock);
    1583           0 : }
    1584             : EXPORT_SYMBOL_GPL(pm_runtime_allow);
    1585             : 
    1586             : /**
    1587             :  * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
    1588             :  * @dev: Device to handle.
    1589             :  *
    1590             :  * Set the power.no_callbacks flag, which tells the PM core that this
    1591             :  * device is power-managed through its parent and has no runtime PM
    1592             :  * callbacks of its own.  The runtime sysfs attributes will be removed.
    1593             :  */
    1594           0 : void pm_runtime_no_callbacks(struct device *dev)
    1595             : {
    1596           0 :         spin_lock_irq(&dev->power.lock);
    1597           0 :         dev->power.no_callbacks = 1;
    1598           0 :         spin_unlock_irq(&dev->power.lock);
    1599           0 :         if (device_is_registered(dev))
    1600           0 :                 rpm_sysfs_remove(dev);
    1601           0 : }
    1602             : EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
    1603             : 
    1604             : /**
    1605             :  * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
    1606             :  * @dev: Device to handle
    1607             :  *
    1608             :  * Set the power.irq_safe flag, which tells the PM core that the
    1609             :  * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
    1610             :  * always be invoked with the spinlock held and interrupts disabled.  It also
    1611             :  * causes the parent's usage counter to be permanently incremented, preventing
    1612             :  * the parent from runtime suspending -- otherwise an irq-safe child might have
    1613             :  * to wait for a non-irq-safe parent.
    1614             :  */
    1615           0 : void pm_runtime_irq_safe(struct device *dev)
    1616             : {
    1617           0 :         if (dev->parent)
    1618           0 :                 pm_runtime_get_sync(dev->parent);
    1619             : 
    1620           0 :         spin_lock_irq(&dev->power.lock);
    1621           0 :         dev->power.irq_safe = 1;
    1622           0 :         spin_unlock_irq(&dev->power.lock);
    1623           0 : }
    1624             : EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
    1625             : 
    1626             : /**
    1627             :  * update_autosuspend - Handle a change to a device's autosuspend settings.
    1628             :  * @dev: Device to handle.
    1629             :  * @old_delay: The former autosuspend_delay value.
    1630             :  * @old_use: The former use_autosuspend value.
    1631             :  *
    1632             :  * Prevent runtime suspend if the new delay is negative and use_autosuspend is
    1633             :  * set; otherwise allow it.  Send an idle notification if suspends are allowed.
    1634             :  *
    1635             :  * This function must be called under dev->power.lock with interrupts disabled.
    1636             :  */
    1637           0 : static void update_autosuspend(struct device *dev, int old_delay, int old_use)
    1638             : {
    1639           0 :         int delay = dev->power.autosuspend_delay;
    1640             : 
    1641             :         /* Should runtime suspend be prevented now? */
    1642           0 :         if (dev->power.use_autosuspend && delay < 0) {
    1643             : 
    1644             :                 /* If it used to be allowed then prevent it. */
    1645           0 :                 if (!old_use || old_delay >= 0) {
    1646           0 :                         atomic_inc(&dev->power.usage_count);
    1647           0 :                         rpm_resume(dev, 0);
    1648             :                 } else {
    1649             :                         trace_rpm_usage(dev, 0);
    1650             :                 }
    1651             :         }
    1652             : 
    1653             :         /* Runtime suspend should be allowed now. */
    1654             :         else {
    1655             : 
    1656             :                 /* If it used to be prevented then allow it. */
    1657           0 :                 if (old_use && old_delay < 0)
    1658           0 :                         atomic_dec(&dev->power.usage_count);
    1659             : 
    1660             :                 /* Maybe we can autosuspend now. */
    1661           0 :                 rpm_idle(dev, RPM_AUTO);
    1662             :         }
    1663           0 : }
    1664             : 
    1665             : /**
    1666             :  * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
    1667             :  * @dev: Device to handle.
    1668             :  * @delay: Value of the new delay in milliseconds.
    1669             :  *
    1670             :  * Set the device's power.autosuspend_delay value.  If it changes to negative
    1671             :  * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
    1672             :  * changes the other way, allow runtime suspends.
    1673             :  */
    1674           0 : void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
    1675             : {
    1676             :         int old_delay, old_use;
    1677             : 
    1678           0 :         spin_lock_irq(&dev->power.lock);
    1679           0 :         old_delay = dev->power.autosuspend_delay;
    1680           0 :         old_use = dev->power.use_autosuspend;
    1681           0 :         dev->power.autosuspend_delay = delay;
    1682           0 :         update_autosuspend(dev, old_delay, old_use);
    1683           0 :         spin_unlock_irq(&dev->power.lock);
    1684           0 : }
    1685             : EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
    1686             : 
    1687             : /**
    1688             :  * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
    1689             :  * @dev: Device to handle.
    1690             :  * @use: New value for use_autosuspend.
    1691             :  *
    1692             :  * Set the device's power.use_autosuspend flag, and allow or prevent runtime
    1693             :  * suspends as needed.
    1694             :  */
    1695           0 : void __pm_runtime_use_autosuspend(struct device *dev, bool use)
    1696             : {
    1697             :         int old_delay, old_use;
    1698             : 
    1699           0 :         spin_lock_irq(&dev->power.lock);
    1700           0 :         old_delay = dev->power.autosuspend_delay;
    1701           0 :         old_use = dev->power.use_autosuspend;
    1702           0 :         dev->power.use_autosuspend = use;
    1703           0 :         update_autosuspend(dev, old_delay, old_use);
    1704           0 :         spin_unlock_irq(&dev->power.lock);
    1705           0 : }
    1706             : EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
    1707             : 
    1708             : /**
    1709             :  * pm_runtime_init - Initialize runtime PM fields in given device object.
    1710             :  * @dev: Device object to initialize.
    1711             :  */
    1712         570 : void pm_runtime_init(struct device *dev)
    1713             : {
    1714         570 :         dev->power.runtime_status = RPM_SUSPENDED;
    1715         570 :         dev->power.last_status = RPM_INVALID;
    1716         570 :         dev->power.idle_notification = false;
    1717             : 
    1718         570 :         dev->power.disable_depth = 1;
    1719        1140 :         atomic_set(&dev->power.usage_count, 0);
    1720             : 
    1721         570 :         dev->power.runtime_error = 0;
    1722             : 
    1723        1140 :         atomic_set(&dev->power.child_count, 0);
    1724         570 :         pm_suspend_ignore_children(dev, false);
    1725         570 :         dev->power.runtime_auto = true;
    1726             : 
    1727         570 :         dev->power.request_pending = false;
    1728         570 :         dev->power.request = RPM_REQ_NONE;
    1729         570 :         dev->power.deferred_resume = false;
    1730         570 :         dev->power.needs_force_resume = 0;
    1731        1140 :         INIT_WORK(&dev->power.work, pm_runtime_work);
    1732             : 
    1733         570 :         dev->power.timer_expires = 0;
    1734         570 :         hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
    1735         570 :         dev->power.suspend_timer.function = pm_suspend_timer_fn;
    1736             : 
    1737         570 :         init_waitqueue_head(&dev->power.wait_queue);
    1738         570 : }
    1739             : 
    1740             : /**
    1741             :  * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
    1742             :  * @dev: Device object to re-initialize.
    1743             :  */
    1744          35 : void pm_runtime_reinit(struct device *dev)
    1745             : {
    1746          35 :         if (!pm_runtime_enabled(dev)) {
    1747          35 :                 if (dev->power.runtime_status == RPM_ACTIVE)
    1748             :                         pm_runtime_set_suspended(dev);
    1749          35 :                 if (dev->power.irq_safe) {
    1750           0 :                         spin_lock_irq(&dev->power.lock);
    1751           0 :                         dev->power.irq_safe = 0;
    1752           0 :                         spin_unlock_irq(&dev->power.lock);
    1753           0 :                         if (dev->parent)
    1754           0 :                                 pm_runtime_put(dev->parent);
    1755             :                 }
    1756             :         }
    1757          35 : }
    1758             : 
    1759             : /**
    1760             :  * pm_runtime_remove - Prepare for removing a device from device hierarchy.
    1761             :  * @dev: Device object being removed from device hierarchy.
    1762             :  */
    1763          18 : void pm_runtime_remove(struct device *dev)
    1764             : {
    1765          18 :         __pm_runtime_disable(dev, false);
    1766          18 :         pm_runtime_reinit(dev);
    1767          18 : }
    1768             : 
    1769             : /**
    1770             :  * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
    1771             :  * @dev: Consumer device.
    1772             :  */
    1773          17 : void pm_runtime_get_suppliers(struct device *dev)
    1774             : {
    1775             :         struct device_link *link;
    1776             :         int idx;
    1777             : 
    1778          17 :         idx = device_links_read_lock();
    1779             : 
    1780          17 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
    1781             :                                 device_links_read_lock_held())
    1782           0 :                 if (link->flags & DL_FLAG_PM_RUNTIME) {
    1783           0 :                         link->supplier_preactivated = true;
    1784           0 :                         pm_runtime_get_sync(link->supplier);
    1785             :                 }
    1786             : 
    1787          17 :         device_links_read_unlock(idx);
    1788          17 : }
    1789             : 
    1790             : /**
    1791             :  * pm_runtime_put_suppliers - Drop references to supplier devices.
    1792             :  * @dev: Consumer device.
    1793             :  */
    1794          17 : void pm_runtime_put_suppliers(struct device *dev)
    1795             : {
    1796             :         struct device_link *link;
    1797             :         int idx;
    1798             : 
    1799          17 :         idx = device_links_read_lock();
    1800             : 
    1801          17 :         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
    1802             :                                 device_links_read_lock_held())
    1803           0 :                 if (link->supplier_preactivated) {
    1804           0 :                         link->supplier_preactivated = false;
    1805           0 :                         pm_runtime_put(link->supplier);
    1806             :                 }
    1807             : 
    1808          17 :         device_links_read_unlock(idx);
    1809          17 : }
    1810             : 
    1811           0 : void pm_runtime_new_link(struct device *dev)
    1812             : {
    1813           0 :         spin_lock_irq(&dev->power.lock);
    1814           0 :         dev->power.links_count++;
    1815           0 :         spin_unlock_irq(&dev->power.lock);
    1816           0 : }
    1817             : 
    1818           0 : static void pm_runtime_drop_link_count(struct device *dev)
    1819             : {
    1820           0 :         spin_lock_irq(&dev->power.lock);
    1821           0 :         WARN_ON(dev->power.links_count == 0);
    1822           0 :         dev->power.links_count--;
    1823           0 :         spin_unlock_irq(&dev->power.lock);
    1824           0 : }
    1825             : 
    1826             : /**
    1827             :  * pm_runtime_drop_link - Prepare for device link removal.
    1828             :  * @link: Device link going away.
    1829             :  *
    1830             :  * Drop the link count of the consumer end of @link and decrement the supplier
    1831             :  * device's runtime PM usage counter as many times as needed to drop all of the
    1832             :  * PM runtime reference to it from the consumer.
    1833             :  */
    1834           0 : void pm_runtime_drop_link(struct device_link *link)
    1835             : {
    1836           0 :         if (!(link->flags & DL_FLAG_PM_RUNTIME))
    1837             :                 return;
    1838             : 
    1839           0 :         pm_runtime_drop_link_count(link->consumer);
    1840           0 :         pm_runtime_release_supplier(link);
    1841           0 :         pm_request_idle(link->supplier);
    1842             : }
    1843             : 
    1844             : static bool pm_runtime_need_not_resume(struct device *dev)
    1845             : {
    1846           0 :         return atomic_read(&dev->power.usage_count) <= 1 &&
    1847           0 :                 (atomic_read(&dev->power.child_count) == 0 ||
    1848             :                  dev->power.ignore_children);
    1849             : }
    1850             : 
    1851             : /**
    1852             :  * pm_runtime_force_suspend - Force a device into suspend state if needed.
    1853             :  * @dev: Device to suspend.
    1854             :  *
    1855             :  * Disable runtime PM so we safely can check the device's runtime PM status and
    1856             :  * if it is active, invoke its ->runtime_suspend callback to suspend it and
    1857             :  * change its runtime PM status field to RPM_SUSPENDED.  Also, if the device's
    1858             :  * usage and children counters don't indicate that the device was in use before
    1859             :  * the system-wide transition under way, decrement its parent's children counter
    1860             :  * (if there is a parent).  Keep runtime PM disabled to preserve the state
    1861             :  * unless we encounter errors.
    1862             :  *
    1863             :  * Typically this function may be invoked from a system suspend callback to make
    1864             :  * sure the device is put into low power state and it should only be used during
    1865             :  * system-wide PM transitions to sleep states.  It assumes that the analogous
    1866             :  * pm_runtime_force_resume() will be used to resume the device.
    1867             :  *
    1868             :  * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent
    1869             :  * state where this function has called the ->runtime_suspend callback but the
    1870             :  * PM core marks the driver as runtime active.
    1871             :  */
    1872           0 : int pm_runtime_force_suspend(struct device *dev)
    1873             : {
    1874             :         int (*callback)(struct device *);
    1875             :         int ret;
    1876             : 
    1877           0 :         pm_runtime_disable(dev);
    1878           0 :         if (pm_runtime_status_suspended(dev))
    1879             :                 return 0;
    1880             : 
    1881           0 :         callback = RPM_GET_CALLBACK(dev, runtime_suspend);
    1882             : 
    1883           0 :         dev_pm_enable_wake_irq_check(dev, true);
    1884           0 :         ret = callback ? callback(dev) : 0;
    1885           0 :         if (ret)
    1886             :                 goto err;
    1887             : 
    1888           0 :         dev_pm_enable_wake_irq_complete(dev);
    1889             : 
    1890             :         /*
    1891             :          * If the device can stay in suspend after the system-wide transition
    1892             :          * to the working state that will follow, drop the children counter of
    1893             :          * its parent, but set its status to RPM_SUSPENDED anyway in case this
    1894             :          * function will be called again for it in the meantime.
    1895             :          */
    1896           0 :         if (pm_runtime_need_not_resume(dev)) {
    1897             :                 pm_runtime_set_suspended(dev);
    1898             :         } else {
    1899           0 :                 __update_runtime_status(dev, RPM_SUSPENDED);
    1900           0 :                 dev->power.needs_force_resume = 1;
    1901             :         }
    1902             : 
    1903             :         return 0;
    1904             : 
    1905             : err:
    1906           0 :         dev_pm_disable_wake_irq_check(dev, true);
    1907           0 :         pm_runtime_enable(dev);
    1908           0 :         return ret;
    1909             : }
    1910             : EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
    1911             : 
    1912             : /**
    1913             :  * pm_runtime_force_resume - Force a device into resume state if needed.
    1914             :  * @dev: Device to resume.
    1915             :  *
    1916             :  * Prior invoking this function we expect the user to have brought the device
    1917             :  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
    1918             :  * those actions and bring the device into full power, if it is expected to be
    1919             :  * used on system resume.  In the other case, we defer the resume to be managed
    1920             :  * via runtime PM.
    1921             :  *
    1922             :  * Typically this function may be invoked from a system resume callback.
    1923             :  */
    1924           0 : int pm_runtime_force_resume(struct device *dev)
    1925             : {
    1926             :         int (*callback)(struct device *);
    1927           0 :         int ret = 0;
    1928             : 
    1929           0 :         if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
    1930             :                 goto out;
    1931             : 
    1932             :         /*
    1933             :          * The value of the parent's children counter is correct already, so
    1934             :          * just update the status of the device.
    1935             :          */
    1936           0 :         __update_runtime_status(dev, RPM_ACTIVE);
    1937             : 
    1938           0 :         callback = RPM_GET_CALLBACK(dev, runtime_resume);
    1939             : 
    1940           0 :         dev_pm_disable_wake_irq_check(dev, false);
    1941           0 :         ret = callback ? callback(dev) : 0;
    1942           0 :         if (ret) {
    1943           0 :                 pm_runtime_set_suspended(dev);
    1944           0 :                 dev_pm_enable_wake_irq_check(dev, false);
    1945           0 :                 goto out;
    1946             :         }
    1947             : 
    1948             :         pm_runtime_mark_last_busy(dev);
    1949             : out:
    1950           0 :         dev->power.needs_force_resume = 0;
    1951           0 :         pm_runtime_enable(dev);
    1952           0 :         return ret;
    1953             : }
    1954             : EXPORT_SYMBOL_GPL(pm_runtime_force_resume);

Generated by: LCOV version 1.14