Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0 2 : 3 : #include <linux/blk-pm.h> 4 : #include <linux/blkdev.h> 5 : #include <linux/pm_runtime.h> 6 : #include "blk-mq.h" 7 : 8 : /** 9 : * blk_pm_runtime_init - Block layer runtime PM initialization routine 10 : * @q: the queue of the device 11 : * @dev: the device the queue belongs to 12 : * 13 : * Description: 14 : * Initialize runtime-PM-related fields for @q and start auto suspend for 15 : * @dev. Drivers that want to take advantage of request-based runtime PM 16 : * should call this function after @dev has been initialized, and its 17 : * request queue @q has been allocated, and runtime PM for it can not happen 18 : * yet(either due to disabled/forbidden or its usage_count > 0). In most 19 : * cases, driver should call this function before any I/O has taken place. 20 : * 21 : * This function takes care of setting up using auto suspend for the device, 22 : * the autosuspend delay is set to -1 to make runtime suspend impossible 23 : * until an updated value is either set by user or by driver. Drivers do 24 : * not need to touch other autosuspend settings. 25 : * 26 : * The block layer runtime PM is request based, so only works for drivers 27 : * that use request as their IO unit instead of those directly use bio's. 28 : */ 29 0 : void blk_pm_runtime_init(struct request_queue *q, struct device *dev) 30 : { 31 0 : q->dev = dev; 32 0 : q->rpm_status = RPM_ACTIVE; 33 0 : pm_runtime_set_autosuspend_delay(q->dev, -1); 34 0 : pm_runtime_use_autosuspend(q->dev); 35 0 : } 36 : EXPORT_SYMBOL(blk_pm_runtime_init); 37 : 38 : /** 39 : * blk_pre_runtime_suspend - Pre runtime suspend check 40 : * @q: the queue of the device 41 : * 42 : * Description: 43 : * This function will check if runtime suspend is allowed for the device 44 : * by examining if there are any requests pending in the queue. If there 45 : * are requests pending, the device can not be runtime suspended; otherwise, 46 : * the queue's status will be updated to SUSPENDING and the driver can 47 : * proceed to suspend the device. 48 : * 49 : * For the not allowed case, we mark last busy for the device so that 50 : * runtime PM core will try to autosuspend it some time later. 51 : * 52 : * This function should be called near the start of the device's 53 : * runtime_suspend callback. 54 : * 55 : * Return: 56 : * 0 - OK to runtime suspend the device 57 : * -EBUSY - Device should not be runtime suspended 58 : */ 59 0 : int blk_pre_runtime_suspend(struct request_queue *q) 60 : { 61 0 : int ret = 0; 62 : 63 0 : if (!q->dev) 64 : return ret; 65 : 66 0 : WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE); 67 : 68 0 : spin_lock_irq(&q->queue_lock); 69 0 : q->rpm_status = RPM_SUSPENDING; 70 0 : spin_unlock_irq(&q->queue_lock); 71 : 72 : /* 73 : * Increase the pm_only counter before checking whether any 74 : * non-PM blk_queue_enter() calls are in progress to avoid that any 75 : * new non-PM blk_queue_enter() calls succeed before the pm_only 76 : * counter is decreased again. 77 : */ 78 0 : blk_set_pm_only(q); 79 0 : ret = -EBUSY; 80 : /* Switch q_usage_counter from per-cpu to atomic mode. */ 81 0 : blk_freeze_queue_start(q); 82 : /* 83 : * Wait until atomic mode has been reached. Since that 84 : * involves calling call_rcu(), it is guaranteed that later 85 : * blk_queue_enter() calls see the pm-only state. See also 86 : * http://lwn.net/Articles/573497/. 87 : */ 88 0 : percpu_ref_switch_to_atomic_sync(&q->q_usage_counter); 89 0 : if (percpu_ref_is_zero(&q->q_usage_counter)) 90 0 : ret = 0; 91 : /* Switch q_usage_counter back to per-cpu mode. */ 92 0 : blk_mq_unfreeze_queue(q); 93 : 94 0 : if (ret < 0) { 95 0 : spin_lock_irq(&q->queue_lock); 96 0 : q->rpm_status = RPM_ACTIVE; 97 0 : pm_runtime_mark_last_busy(q->dev); 98 0 : spin_unlock_irq(&q->queue_lock); 99 : 100 0 : blk_clear_pm_only(q); 101 : } 102 : 103 : return ret; 104 : } 105 : EXPORT_SYMBOL(blk_pre_runtime_suspend); 106 : 107 : /** 108 : * blk_post_runtime_suspend - Post runtime suspend processing 109 : * @q: the queue of the device 110 : * @err: return value of the device's runtime_suspend function 111 : * 112 : * Description: 113 : * Update the queue's runtime status according to the return value of the 114 : * device's runtime suspend function and mark last busy for the device so 115 : * that PM core will try to auto suspend the device at a later time. 116 : * 117 : * This function should be called near the end of the device's 118 : * runtime_suspend callback. 119 : */ 120 0 : void blk_post_runtime_suspend(struct request_queue *q, int err) 121 : { 122 0 : if (!q->dev) 123 : return; 124 : 125 0 : spin_lock_irq(&q->queue_lock); 126 0 : if (!err) { 127 0 : q->rpm_status = RPM_SUSPENDED; 128 : } else { 129 0 : q->rpm_status = RPM_ACTIVE; 130 0 : pm_runtime_mark_last_busy(q->dev); 131 : } 132 0 : spin_unlock_irq(&q->queue_lock); 133 : 134 0 : if (err) 135 0 : blk_clear_pm_only(q); 136 : } 137 : EXPORT_SYMBOL(blk_post_runtime_suspend); 138 : 139 : /** 140 : * blk_pre_runtime_resume - Pre runtime resume processing 141 : * @q: the queue of the device 142 : * 143 : * Description: 144 : * Update the queue's runtime status to RESUMING in preparation for the 145 : * runtime resume of the device. 146 : * 147 : * This function should be called near the start of the device's 148 : * runtime_resume callback. 149 : */ 150 0 : void blk_pre_runtime_resume(struct request_queue *q) 151 : { 152 0 : if (!q->dev) 153 : return; 154 : 155 0 : spin_lock_irq(&q->queue_lock); 156 0 : q->rpm_status = RPM_RESUMING; 157 0 : spin_unlock_irq(&q->queue_lock); 158 : } 159 : EXPORT_SYMBOL(blk_pre_runtime_resume); 160 : 161 : /** 162 : * blk_post_runtime_resume - Post runtime resume processing 163 : * @q: the queue of the device 164 : * 165 : * Description: 166 : * For historical reasons, this routine merely calls blk_set_runtime_active() 167 : * to do the real work of restarting the queue. It does this regardless of 168 : * whether the device's runtime-resume succeeded; even if it failed the 169 : * driver or error handler will need to communicate with the device. 170 : * 171 : * This function should be called near the end of the device's 172 : * runtime_resume callback. 173 : */ 174 0 : void blk_post_runtime_resume(struct request_queue *q) 175 : { 176 0 : blk_set_runtime_active(q); 177 0 : } 178 : EXPORT_SYMBOL(blk_post_runtime_resume); 179 : 180 : /** 181 : * blk_set_runtime_active - Force runtime status of the queue to be active 182 : * @q: the queue of the device 183 : * 184 : * If the device is left runtime suspended during system suspend the resume 185 : * hook typically resumes the device and corrects runtime status 186 : * accordingly. However, that does not affect the queue runtime PM status 187 : * which is still "suspended". This prevents processing requests from the 188 : * queue. 189 : * 190 : * This function can be used in driver's resume hook to correct queue 191 : * runtime PM status and re-enable peeking requests from the queue. It 192 : * should be called before first request is added to the queue. 193 : * 194 : * This function is also called by blk_post_runtime_resume() for 195 : * runtime resumes. It does everything necessary to restart the queue. 196 : */ 197 0 : void blk_set_runtime_active(struct request_queue *q) 198 : { 199 : int old_status; 200 : 201 0 : if (!q->dev) 202 : return; 203 : 204 0 : spin_lock_irq(&q->queue_lock); 205 0 : old_status = q->rpm_status; 206 0 : q->rpm_status = RPM_ACTIVE; 207 0 : pm_runtime_mark_last_busy(q->dev); 208 0 : pm_request_autosuspend(q->dev); 209 0 : spin_unlock_irq(&q->queue_lock); 210 : 211 0 : if (old_status != RPM_ACTIVE) 212 0 : blk_clear_pm_only(q); 213 : } 214 : EXPORT_SYMBOL(blk_set_runtime_active);