Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0 2 : 3 : #include <linux/blk-mq.h> 4 : #include <linux/blk-pm.h> 5 : #include <linux/blkdev.h> 6 : #include <linux/pm_runtime.h> 7 : #include "blk-mq.h" 8 : #include "blk-mq-tag.h" 9 : 10 : /** 11 : * blk_pm_runtime_init - Block layer runtime PM initialization routine 12 : * @q: the queue of the device 13 : * @dev: the device the queue belongs to 14 : * 15 : * Description: 16 : * Initialize runtime-PM-related fields for @q and start auto suspend for 17 : * @dev. Drivers that want to take advantage of request-based runtime PM 18 : * should call this function after @dev has been initialized, and its 19 : * request queue @q has been allocated, and runtime PM for it can not happen 20 : * yet(either due to disabled/forbidden or its usage_count > 0). In most 21 : * cases, driver should call this function before any I/O has taken place. 22 : * 23 : * This function takes care of setting up using auto suspend for the device, 24 : * the autosuspend delay is set to -1 to make runtime suspend impossible 25 : * until an updated value is either set by user or by driver. Drivers do 26 : * not need to touch other autosuspend settings. 27 : * 28 : * The block layer runtime PM is request based, so only works for drivers 29 : * that use request as their IO unit instead of those directly use bio's. 30 : */ 31 0 : void blk_pm_runtime_init(struct request_queue *q, struct device *dev) 32 : { 33 0 : q->dev = dev; 34 0 : q->rpm_status = RPM_ACTIVE; 35 0 : pm_runtime_set_autosuspend_delay(q->dev, -1); 36 0 : pm_runtime_use_autosuspend(q->dev); 37 0 : } 38 : EXPORT_SYMBOL(blk_pm_runtime_init); 39 : 40 : /** 41 : * blk_pre_runtime_suspend - Pre runtime suspend check 42 : * @q: the queue of the device 43 : * 44 : * Description: 45 : * This function will check if runtime suspend is allowed for the device 46 : * by examining if there are any requests pending in the queue. If there 47 : * are requests pending, the device can not be runtime suspended; otherwise, 48 : * the queue's status will be updated to SUSPENDING and the driver can 49 : * proceed to suspend the device. 50 : * 51 : * For the not allowed case, we mark last busy for the device so that 52 : * runtime PM core will try to autosuspend it some time later. 53 : * 54 : * This function should be called near the start of the device's 55 : * runtime_suspend callback. 56 : * 57 : * Return: 58 : * 0 - OK to runtime suspend the device 59 : * -EBUSY - Device should not be runtime suspended 60 : */ 61 0 : int blk_pre_runtime_suspend(struct request_queue *q) 62 : { 63 0 : int ret = 0; 64 : 65 0 : if (!q->dev) 66 : return ret; 67 : 68 0 : WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE); 69 : 70 0 : spin_lock_irq(&q->queue_lock); 71 0 : q->rpm_status = RPM_SUSPENDING; 72 0 : spin_unlock_irq(&q->queue_lock); 73 : 74 : /* 75 : * Increase the pm_only counter before checking whether any 76 : * non-PM blk_queue_enter() calls are in progress to avoid that any 77 : * new non-PM blk_queue_enter() calls succeed before the pm_only 78 : * counter is decreased again. 79 : */ 80 0 : blk_set_pm_only(q); 81 0 : ret = -EBUSY; 82 : /* Switch q_usage_counter from per-cpu to atomic mode. */ 83 0 : blk_freeze_queue_start(q); 84 : /* 85 : * Wait until atomic mode has been reached. Since that 86 : * involves calling call_rcu(), it is guaranteed that later 87 : * blk_queue_enter() calls see the pm-only state. See also 88 : * http://lwn.net/Articles/573497/. 89 : */ 90 0 : percpu_ref_switch_to_atomic_sync(&q->q_usage_counter); 91 0 : if (percpu_ref_is_zero(&q->q_usage_counter)) 92 0 : ret = 0; 93 : /* Switch q_usage_counter back to per-cpu mode. */ 94 0 : blk_mq_unfreeze_queue(q); 95 : 96 0 : if (ret < 0) { 97 0 : spin_lock_irq(&q->queue_lock); 98 0 : q->rpm_status = RPM_ACTIVE; 99 0 : pm_runtime_mark_last_busy(q->dev); 100 0 : spin_unlock_irq(&q->queue_lock); 101 : 102 0 : blk_clear_pm_only(q); 103 : } 104 : 105 : return ret; 106 : } 107 : EXPORT_SYMBOL(blk_pre_runtime_suspend); 108 : 109 : /** 110 : * blk_post_runtime_suspend - Post runtime suspend processing 111 : * @q: the queue of the device 112 : * @err: return value of the device's runtime_suspend function 113 : * 114 : * Description: 115 : * Update the queue's runtime status according to the return value of the 116 : * device's runtime suspend function and mark last busy for the device so 117 : * that PM core will try to auto suspend the device at a later time. 118 : * 119 : * This function should be called near the end of the device's 120 : * runtime_suspend callback. 121 : */ 122 0 : void blk_post_runtime_suspend(struct request_queue *q, int err) 123 : { 124 0 : if (!q->dev) 125 : return; 126 : 127 0 : spin_lock_irq(&q->queue_lock); 128 0 : if (!err) { 129 0 : q->rpm_status = RPM_SUSPENDED; 130 : } else { 131 0 : q->rpm_status = RPM_ACTIVE; 132 0 : pm_runtime_mark_last_busy(q->dev); 133 : } 134 0 : spin_unlock_irq(&q->queue_lock); 135 : 136 0 : if (err) 137 0 : blk_clear_pm_only(q); 138 : } 139 : EXPORT_SYMBOL(blk_post_runtime_suspend); 140 : 141 : /** 142 : * blk_pre_runtime_resume - Pre runtime resume processing 143 : * @q: the queue of the device 144 : * 145 : * Description: 146 : * Update the queue's runtime status to RESUMING in preparation for the 147 : * runtime resume of the device. 148 : * 149 : * This function should be called near the start of the device's 150 : * runtime_resume callback. 151 : */ 152 0 : void blk_pre_runtime_resume(struct request_queue *q) 153 : { 154 0 : if (!q->dev) 155 : return; 156 : 157 0 : spin_lock_irq(&q->queue_lock); 158 0 : q->rpm_status = RPM_RESUMING; 159 0 : spin_unlock_irq(&q->queue_lock); 160 : } 161 : EXPORT_SYMBOL(blk_pre_runtime_resume); 162 : 163 : /** 164 : * blk_post_runtime_resume - Post runtime resume processing 165 : * @q: the queue of the device 166 : * 167 : * Description: 168 : * For historical reasons, this routine merely calls blk_set_runtime_active() 169 : * to do the real work of restarting the queue. It does this regardless of 170 : * whether the device's runtime-resume succeeded; even if it failed the 171 : * driver or error handler will need to communicate with the device. 172 : * 173 : * This function should be called near the end of the device's 174 : * runtime_resume callback. 175 : */ 176 0 : void blk_post_runtime_resume(struct request_queue *q) 177 : { 178 0 : blk_set_runtime_active(q); 179 0 : } 180 : EXPORT_SYMBOL(blk_post_runtime_resume); 181 : 182 : /** 183 : * blk_set_runtime_active - Force runtime status of the queue to be active 184 : * @q: the queue of the device 185 : * 186 : * If the device is left runtime suspended during system suspend the resume 187 : * hook typically resumes the device and corrects runtime status 188 : * accordingly. However, that does not affect the queue runtime PM status 189 : * which is still "suspended". This prevents processing requests from the 190 : * queue. 191 : * 192 : * This function can be used in driver's resume hook to correct queue 193 : * runtime PM status and re-enable peeking requests from the queue. It 194 : * should be called before first request is added to the queue. 195 : * 196 : * This function is also called by blk_post_runtime_resume() for 197 : * runtime resumes. It does everything necessary to restart the queue. 198 : */ 199 0 : void blk_set_runtime_active(struct request_queue *q) 200 : { 201 : int old_status; 202 : 203 0 : if (!q->dev) 204 : return; 205 : 206 0 : spin_lock_irq(&q->queue_lock); 207 0 : old_status = q->rpm_status; 208 0 : q->rpm_status = RPM_ACTIVE; 209 0 : pm_runtime_mark_last_busy(q->dev); 210 0 : pm_request_autosuspend(q->dev); 211 0 : spin_unlock_irq(&q->queue_lock); 212 : 213 0 : if (old_status != RPM_ACTIVE) 214 0 : blk_clear_pm_only(q); 215 : } 216 : EXPORT_SYMBOL(blk_set_runtime_active);