Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Devices PM QoS constraints management
4 : *
5 : * Copyright (C) 2011 Texas Instruments, Inc.
6 : *
7 : * This module exposes the interface to kernel space for specifying
8 : * per-device PM QoS dependencies. It provides infrastructure for registration
9 : * of:
10 : *
11 : * Dependents on a QoS value : register requests
12 : * Watchers of QoS value : get notified when target QoS value changes
13 : *
14 : * This QoS design is best effort based. Dependents register their QoS needs.
15 : * Watchers register to keep track of the current QoS needs of the system.
16 : * Watchers can register a per-device notification callback using the
17 : * dev_pm_qos_*_notifier API. The notification chain data is stored in the
18 : * per-device constraint data struct.
19 : *
20 : * Note about the per-device constraint data struct allocation:
21 : * . The per-device constraints data struct ptr is stored into the device
22 : * dev_pm_info.
23 : * . To minimize the data usage by the per-device constraints, the data struct
24 : * is only allocated at the first call to dev_pm_qos_add_request.
25 : * . The data is later free'd when the device is removed from the system.
26 : * . A global mutex protects the constraints users from the data being
27 : * allocated and free'd.
28 : */
29 :
30 : #include <linux/pm_qos.h>
31 : #include <linux/spinlock.h>
32 : #include <linux/slab.h>
33 : #include <linux/device.h>
34 : #include <linux/mutex.h>
35 : #include <linux/export.h>
36 : #include <linux/pm_runtime.h>
37 : #include <linux/err.h>
38 : #include <trace/events/power.h>
39 :
40 : #include "power.h"
41 :
42 : static DEFINE_MUTEX(dev_pm_qos_mtx);
43 : static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
44 :
45 : /**
46 : * __dev_pm_qos_flags - Check PM QoS flags for a given device.
47 : * @dev: Device to check the PM QoS flags for.
48 : * @mask: Flags to check against.
49 : *
50 : * This routine must be called with dev->power.lock held.
51 : */
52 0 : enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
53 : {
54 0 : struct dev_pm_qos *qos = dev->power.qos;
55 : struct pm_qos_flags *pqf;
56 : s32 val;
57 :
58 : lockdep_assert_held(&dev->power.lock);
59 :
60 0 : if (IS_ERR_OR_NULL(qos))
61 : return PM_QOS_FLAGS_UNDEFINED;
62 :
63 0 : pqf = &qos->flags;
64 0 : if (list_empty(&pqf->list))
65 : return PM_QOS_FLAGS_UNDEFINED;
66 :
67 0 : val = pqf->effective_flags & mask;
68 0 : if (val)
69 0 : return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
70 :
71 : return PM_QOS_FLAGS_NONE;
72 : }
73 :
74 : /**
75 : * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
76 : * @dev: Device to check the PM QoS flags for.
77 : * @mask: Flags to check against.
78 : */
79 0 : enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
80 : {
81 : unsigned long irqflags;
82 : enum pm_qos_flags_status ret;
83 :
84 0 : spin_lock_irqsave(&dev->power.lock, irqflags);
85 0 : ret = __dev_pm_qos_flags(dev, mask);
86 0 : spin_unlock_irqrestore(&dev->power.lock, irqflags);
87 :
88 0 : return ret;
89 : }
90 : EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
91 :
92 : /**
93 : * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
94 : * @dev: Device to get the PM QoS constraint value for.
95 : *
96 : * This routine must be called with dev->power.lock held.
97 : */
98 0 : s32 __dev_pm_qos_resume_latency(struct device *dev)
99 : {
100 : lockdep_assert_held(&dev->power.lock);
101 :
102 0 : return dev_pm_qos_raw_resume_latency(dev);
103 : }
104 :
105 : /**
106 : * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
107 : * @dev: Device to get the PM QoS constraint value for.
108 : * @type: QoS request type.
109 : */
110 0 : s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
111 : {
112 0 : struct dev_pm_qos *qos = dev->power.qos;
113 : unsigned long flags;
114 : s32 ret;
115 :
116 0 : spin_lock_irqsave(&dev->power.lock, flags);
117 :
118 0 : switch (type) {
119 : case DEV_PM_QOS_RESUME_LATENCY:
120 0 : ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
121 0 : : pm_qos_read_value(&qos->resume_latency);
122 : break;
123 : case DEV_PM_QOS_MIN_FREQUENCY:
124 0 : ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
125 0 : : freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
126 : break;
127 : case DEV_PM_QOS_MAX_FREQUENCY:
128 0 : ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
129 0 : : freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
130 : break;
131 : default:
132 0 : WARN_ON(1);
133 0 : ret = 0;
134 : }
135 :
136 0 : spin_unlock_irqrestore(&dev->power.lock, flags);
137 :
138 0 : return ret;
139 : }
140 :
141 : /**
142 : * apply_constraint - Add/modify/remove device PM QoS request.
143 : * @req: Constraint request to apply
144 : * @action: Action to perform (add/update/remove).
145 : * @value: Value to assign to the QoS request.
146 : *
147 : * Internal function to update the constraints list using the PM QoS core
148 : * code and if needed call the per-device callbacks.
149 : */
150 1 : static int apply_constraint(struct dev_pm_qos_request *req,
151 : enum pm_qos_req_action action, s32 value)
152 : {
153 1 : struct dev_pm_qos *qos = req->dev->power.qos;
154 : int ret;
155 :
156 1 : switch(req->type) {
157 : case DEV_PM_QOS_RESUME_LATENCY:
158 1 : if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
159 0 : value = 0;
160 :
161 1 : ret = pm_qos_update_target(&qos->resume_latency,
162 : &req->data.pnode, action, value);
163 1 : break;
164 : case DEV_PM_QOS_LATENCY_TOLERANCE:
165 0 : ret = pm_qos_update_target(&qos->latency_tolerance,
166 : &req->data.pnode, action, value);
167 0 : if (ret) {
168 0 : value = pm_qos_read_value(&qos->latency_tolerance);
169 0 : req->dev->power.set_latency_tolerance(req->dev, value);
170 : }
171 : break;
172 : case DEV_PM_QOS_MIN_FREQUENCY:
173 : case DEV_PM_QOS_MAX_FREQUENCY:
174 0 : ret = freq_qos_apply(&req->data.freq, action, value);
175 0 : break;
176 : case DEV_PM_QOS_FLAGS:
177 0 : ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
178 : action, value);
179 0 : break;
180 : default:
181 : ret = -EINVAL;
182 : }
183 :
184 1 : return ret;
185 : }
186 :
187 : /*
188 : * dev_pm_qos_constraints_allocate
189 : * @dev: device to allocate data for
190 : *
191 : * Called at the first call to add_request, for constraint data allocation
192 : * Must be called with the dev_pm_qos_mtx mutex held
193 : */
194 1 : static int dev_pm_qos_constraints_allocate(struct device *dev)
195 : {
196 : struct dev_pm_qos *qos;
197 : struct pm_qos_constraints *c;
198 : struct blocking_notifier_head *n;
199 :
200 1 : qos = kzalloc(sizeof(*qos), GFP_KERNEL);
201 1 : if (!qos)
202 : return -ENOMEM;
203 :
204 1 : n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
205 1 : if (!n) {
206 0 : kfree(qos);
207 : return -ENOMEM;
208 : }
209 :
210 1 : c = &qos->resume_latency;
211 2 : plist_head_init(&c->list);
212 1 : c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
213 1 : c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
214 1 : c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
215 1 : c->type = PM_QOS_MIN;
216 1 : c->notifiers = n;
217 1 : BLOCKING_INIT_NOTIFIER_HEAD(n);
218 :
219 1 : c = &qos->latency_tolerance;
220 2 : plist_head_init(&c->list);
221 1 : c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
222 1 : c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
223 1 : c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
224 1 : c->type = PM_QOS_MIN;
225 :
226 1 : freq_constraints_init(&qos->freq);
227 :
228 2 : INIT_LIST_HEAD(&qos->flags.list);
229 :
230 2 : spin_lock_irq(&dev->power.lock);
231 1 : dev->power.qos = qos;
232 2 : spin_unlock_irq(&dev->power.lock);
233 :
234 : return 0;
235 : }
236 :
237 : static void __dev_pm_qos_hide_latency_limit(struct device *dev);
238 : static void __dev_pm_qos_hide_flags(struct device *dev);
239 :
240 : /**
241 : * dev_pm_qos_constraints_destroy
242 : * @dev: target device
243 : *
244 : * Called from the device PM subsystem on device removal under device_pm_lock().
245 : */
246 5 : void dev_pm_qos_constraints_destroy(struct device *dev)
247 : {
248 : struct dev_pm_qos *qos;
249 : struct dev_pm_qos_request *req, *tmp;
250 : struct pm_qos_constraints *c;
251 : struct pm_qos_flags *f;
252 :
253 5 : mutex_lock(&dev_pm_qos_sysfs_mtx);
254 :
255 : /*
256 : * If the device's PM QoS resume latency limit or PM QoS flags have been
257 : * exposed to user space, they have to be hidden at this point.
258 : */
259 5 : pm_qos_sysfs_remove_resume_latency(dev);
260 5 : pm_qos_sysfs_remove_flags(dev);
261 :
262 5 : mutex_lock(&dev_pm_qos_mtx);
263 :
264 5 : __dev_pm_qos_hide_latency_limit(dev);
265 5 : __dev_pm_qos_hide_flags(dev);
266 :
267 5 : qos = dev->power.qos;
268 5 : if (!qos)
269 : goto out;
270 :
271 : /* Flush the constraints lists for the device. */
272 0 : c = &qos->resume_latency;
273 0 : plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
274 : /*
275 : * Update constraints list and call the notification
276 : * callbacks if needed
277 : */
278 0 : apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
279 0 : memset(req, 0, sizeof(*req));
280 : }
281 :
282 0 : c = &qos->latency_tolerance;
283 0 : plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
284 0 : apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
285 0 : memset(req, 0, sizeof(*req));
286 : }
287 :
288 0 : c = &qos->freq.min_freq;
289 0 : plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
290 0 : apply_constraint(req, PM_QOS_REMOVE_REQ,
291 : PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
292 0 : memset(req, 0, sizeof(*req));
293 : }
294 :
295 0 : c = &qos->freq.max_freq;
296 0 : plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
297 0 : apply_constraint(req, PM_QOS_REMOVE_REQ,
298 : PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
299 0 : memset(req, 0, sizeof(*req));
300 : }
301 :
302 0 : f = &qos->flags;
303 0 : list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
304 0 : apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
305 0 : memset(req, 0, sizeof(*req));
306 : }
307 :
308 0 : spin_lock_irq(&dev->power.lock);
309 0 : dev->power.qos = ERR_PTR(-ENODEV);
310 0 : spin_unlock_irq(&dev->power.lock);
311 :
312 0 : kfree(qos->resume_latency.notifiers);
313 0 : kfree(qos);
314 :
315 : out:
316 5 : mutex_unlock(&dev_pm_qos_mtx);
317 :
318 5 : mutex_unlock(&dev_pm_qos_sysfs_mtx);
319 5 : }
320 :
321 : static bool dev_pm_qos_invalid_req_type(struct device *dev,
322 : enum dev_pm_qos_req_type type)
323 : {
324 1 : return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
325 0 : !dev->power.set_latency_tolerance;
326 : }
327 :
328 1 : static int __dev_pm_qos_add_request(struct device *dev,
329 : struct dev_pm_qos_request *req,
330 : enum dev_pm_qos_req_type type, s32 value)
331 : {
332 1 : int ret = 0;
333 :
334 2 : if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
335 : return -EINVAL;
336 :
337 2 : if (WARN(dev_pm_qos_request_active(req),
338 : "%s() called for already added request\n", __func__))
339 : return -EINVAL;
340 :
341 2 : if (IS_ERR(dev->power.qos))
342 : ret = -ENODEV;
343 1 : else if (!dev->power.qos)
344 1 : ret = dev_pm_qos_constraints_allocate(dev);
345 :
346 1 : trace_dev_pm_qos_add_request(dev_name(dev), type, value);
347 1 : if (ret)
348 : return ret;
349 :
350 1 : req->dev = dev;
351 1 : req->type = type;
352 1 : if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
353 0 : ret = freq_qos_add_request(&dev->power.qos->freq,
354 : &req->data.freq,
355 : FREQ_QOS_MIN, value);
356 1 : else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
357 0 : ret = freq_qos_add_request(&dev->power.qos->freq,
358 : &req->data.freq,
359 : FREQ_QOS_MAX, value);
360 : else
361 1 : ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
362 :
363 : return ret;
364 : }
365 :
366 : /**
367 : * dev_pm_qos_add_request - inserts new qos request into the list
368 : * @dev: target device for the constraint
369 : * @req: pointer to a preallocated handle
370 : * @type: type of the request
371 : * @value: defines the qos request
372 : *
373 : * This function inserts a new entry in the device constraints list of
374 : * requested qos performance characteristics. It recomputes the aggregate
375 : * QoS expectations of parameters and initializes the dev_pm_qos_request
376 : * handle. Caller needs to save this handle for later use in updates and
377 : * removal.
378 : *
379 : * Returns 1 if the aggregated constraint value has changed,
380 : * 0 if the aggregated constraint value has not changed,
381 : * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
382 : * to allocate for data structures, -ENODEV if the device has just been removed
383 : * from the system.
384 : *
385 : * Callers should ensure that the target device is not RPM_SUSPENDED before
386 : * using this function for requests of type DEV_PM_QOS_FLAGS.
387 : */
388 1 : int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
389 : enum dev_pm_qos_req_type type, s32 value)
390 : {
391 : int ret;
392 :
393 1 : mutex_lock(&dev_pm_qos_mtx);
394 1 : ret = __dev_pm_qos_add_request(dev, req, type, value);
395 1 : mutex_unlock(&dev_pm_qos_mtx);
396 1 : return ret;
397 : }
398 : EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
399 :
400 : /**
401 : * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
402 : * @req : PM QoS request to modify.
403 : * @new_value: New value to request.
404 : */
405 0 : static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
406 : s32 new_value)
407 : {
408 : s32 curr_value;
409 0 : int ret = 0;
410 :
411 0 : if (!req) /*guard against callers passing in null */
412 : return -EINVAL;
413 :
414 0 : if (WARN(!dev_pm_qos_request_active(req),
415 : "%s() called for unknown object\n", __func__))
416 : return -EINVAL;
417 :
418 0 : if (IS_ERR_OR_NULL(req->dev->power.qos))
419 : return -ENODEV;
420 :
421 0 : switch(req->type) {
422 : case DEV_PM_QOS_RESUME_LATENCY:
423 : case DEV_PM_QOS_LATENCY_TOLERANCE:
424 0 : curr_value = req->data.pnode.prio;
425 0 : break;
426 : case DEV_PM_QOS_MIN_FREQUENCY:
427 : case DEV_PM_QOS_MAX_FREQUENCY:
428 0 : curr_value = req->data.freq.pnode.prio;
429 0 : break;
430 : case DEV_PM_QOS_FLAGS:
431 0 : curr_value = req->data.flr.flags;
432 0 : break;
433 : default:
434 : return -EINVAL;
435 : }
436 :
437 0 : trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
438 : new_value);
439 0 : if (curr_value != new_value)
440 0 : ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
441 :
442 : return ret;
443 : }
444 :
445 : /**
446 : * dev_pm_qos_update_request - modifies an existing qos request
447 : * @req : handle to list element holding a dev_pm_qos request to use
448 : * @new_value: defines the qos request
449 : *
450 : * Updates an existing dev PM qos request along with updating the
451 : * target value.
452 : *
453 : * Attempts are made to make this code callable on hot code paths.
454 : *
455 : * Returns 1 if the aggregated constraint value has changed,
456 : * 0 if the aggregated constraint value has not changed,
457 : * -EINVAL in case of wrong parameters, -ENODEV if the device has been
458 : * removed from the system
459 : *
460 : * Callers should ensure that the target device is not RPM_SUSPENDED before
461 : * using this function for requests of type DEV_PM_QOS_FLAGS.
462 : */
463 0 : int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
464 : {
465 : int ret;
466 :
467 0 : mutex_lock(&dev_pm_qos_mtx);
468 0 : ret = __dev_pm_qos_update_request(req, new_value);
469 0 : mutex_unlock(&dev_pm_qos_mtx);
470 0 : return ret;
471 : }
472 : EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
473 :
474 0 : static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
475 : {
476 : int ret;
477 :
478 0 : if (!req) /*guard against callers passing in null */
479 : return -EINVAL;
480 :
481 0 : if (WARN(!dev_pm_qos_request_active(req),
482 : "%s() called for unknown object\n", __func__))
483 : return -EINVAL;
484 :
485 0 : if (IS_ERR_OR_NULL(req->dev->power.qos))
486 : return -ENODEV;
487 :
488 0 : trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
489 : PM_QOS_DEFAULT_VALUE);
490 0 : ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
491 0 : memset(req, 0, sizeof(*req));
492 0 : return ret;
493 : }
494 :
495 : /**
496 : * dev_pm_qos_remove_request - modifies an existing qos request
497 : * @req: handle to request list element
498 : *
499 : * Will remove pm qos request from the list of constraints and
500 : * recompute the current target value. Call this on slow code paths.
501 : *
502 : * Returns 1 if the aggregated constraint value has changed,
503 : * 0 if the aggregated constraint value has not changed,
504 : * -EINVAL in case of wrong parameters, -ENODEV if the device has been
505 : * removed from the system
506 : *
507 : * Callers should ensure that the target device is not RPM_SUSPENDED before
508 : * using this function for requests of type DEV_PM_QOS_FLAGS.
509 : */
510 0 : int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
511 : {
512 : int ret;
513 :
514 0 : mutex_lock(&dev_pm_qos_mtx);
515 0 : ret = __dev_pm_qos_remove_request(req);
516 0 : mutex_unlock(&dev_pm_qos_mtx);
517 0 : return ret;
518 : }
519 : EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
520 :
521 : /**
522 : * dev_pm_qos_add_notifier - sets notification entry for changes to target value
523 : * of per-device PM QoS constraints
524 : *
525 : * @dev: target device for the constraint
526 : * @notifier: notifier block managed by caller.
527 : * @type: request type.
528 : *
529 : * Will register the notifier into a notification chain that gets called
530 : * upon changes to the target value for the device.
531 : *
532 : * If the device's constraints object doesn't exist when this routine is called,
533 : * it will be created (or error code will be returned if that fails).
534 : */
535 0 : int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
536 : enum dev_pm_qos_req_type type)
537 : {
538 0 : int ret = 0;
539 :
540 0 : mutex_lock(&dev_pm_qos_mtx);
541 :
542 0 : if (IS_ERR(dev->power.qos))
543 : ret = -ENODEV;
544 0 : else if (!dev->power.qos)
545 0 : ret = dev_pm_qos_constraints_allocate(dev);
546 :
547 0 : if (ret)
548 : goto unlock;
549 :
550 0 : switch (type) {
551 : case DEV_PM_QOS_RESUME_LATENCY:
552 0 : ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
553 : notifier);
554 0 : break;
555 : case DEV_PM_QOS_MIN_FREQUENCY:
556 0 : ret = freq_qos_add_notifier(&dev->power.qos->freq,
557 : FREQ_QOS_MIN, notifier);
558 0 : break;
559 : case DEV_PM_QOS_MAX_FREQUENCY:
560 0 : ret = freq_qos_add_notifier(&dev->power.qos->freq,
561 : FREQ_QOS_MAX, notifier);
562 0 : break;
563 : default:
564 0 : WARN_ON(1);
565 0 : ret = -EINVAL;
566 : }
567 :
568 : unlock:
569 0 : mutex_unlock(&dev_pm_qos_mtx);
570 0 : return ret;
571 : }
572 : EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
573 :
574 : /**
575 : * dev_pm_qos_remove_notifier - deletes notification for changes to target value
576 : * of per-device PM QoS constraints
577 : *
578 : * @dev: target device for the constraint
579 : * @notifier: notifier block to be removed.
580 : * @type: request type.
581 : *
582 : * Will remove the notifier from the notification chain that gets called
583 : * upon changes to the target value.
584 : */
585 0 : int dev_pm_qos_remove_notifier(struct device *dev,
586 : struct notifier_block *notifier,
587 : enum dev_pm_qos_req_type type)
588 : {
589 0 : int ret = 0;
590 :
591 0 : mutex_lock(&dev_pm_qos_mtx);
592 :
593 : /* Silently return if the constraints object is not present. */
594 0 : if (IS_ERR_OR_NULL(dev->power.qos))
595 : goto unlock;
596 :
597 0 : switch (type) {
598 : case DEV_PM_QOS_RESUME_LATENCY:
599 0 : ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
600 : notifier);
601 0 : break;
602 : case DEV_PM_QOS_MIN_FREQUENCY:
603 0 : ret = freq_qos_remove_notifier(&dev->power.qos->freq,
604 : FREQ_QOS_MIN, notifier);
605 0 : break;
606 : case DEV_PM_QOS_MAX_FREQUENCY:
607 0 : ret = freq_qos_remove_notifier(&dev->power.qos->freq,
608 : FREQ_QOS_MAX, notifier);
609 0 : break;
610 : default:
611 0 : WARN_ON(1);
612 0 : ret = -EINVAL;
613 : }
614 :
615 : unlock:
616 0 : mutex_unlock(&dev_pm_qos_mtx);
617 0 : return ret;
618 : }
619 : EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
620 :
621 : /**
622 : * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
623 : * @dev: Device whose ancestor to add the request for.
624 : * @req: Pointer to the preallocated handle.
625 : * @type: Type of the request.
626 : * @value: Constraint latency value.
627 : */
628 0 : int dev_pm_qos_add_ancestor_request(struct device *dev,
629 : struct dev_pm_qos_request *req,
630 : enum dev_pm_qos_req_type type, s32 value)
631 : {
632 0 : struct device *ancestor = dev->parent;
633 0 : int ret = -ENODEV;
634 :
635 0 : switch (type) {
636 : case DEV_PM_QOS_RESUME_LATENCY:
637 0 : while (ancestor && !ancestor->power.ignore_children)
638 0 : ancestor = ancestor->parent;
639 :
640 : break;
641 : case DEV_PM_QOS_LATENCY_TOLERANCE:
642 0 : while (ancestor && !ancestor->power.set_latency_tolerance)
643 0 : ancestor = ancestor->parent;
644 :
645 : break;
646 : default:
647 : ancestor = NULL;
648 : }
649 0 : if (ancestor)
650 0 : ret = dev_pm_qos_add_request(ancestor, req, type, value);
651 :
652 0 : if (ret < 0)
653 0 : req->dev = NULL;
654 :
655 0 : return ret;
656 : }
657 : EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
658 :
659 0 : static void __dev_pm_qos_drop_user_request(struct device *dev,
660 : enum dev_pm_qos_req_type type)
661 : {
662 0 : struct dev_pm_qos_request *req = NULL;
663 :
664 0 : switch(type) {
665 : case DEV_PM_QOS_RESUME_LATENCY:
666 0 : req = dev->power.qos->resume_latency_req;
667 0 : dev->power.qos->resume_latency_req = NULL;
668 : break;
669 : case DEV_PM_QOS_LATENCY_TOLERANCE:
670 0 : req = dev->power.qos->latency_tolerance_req;
671 0 : dev->power.qos->latency_tolerance_req = NULL;
672 : break;
673 : case DEV_PM_QOS_FLAGS:
674 0 : req = dev->power.qos->flags_req;
675 0 : dev->power.qos->flags_req = NULL;
676 : break;
677 : default:
678 0 : WARN_ON(1);
679 : return;
680 : }
681 0 : __dev_pm_qos_remove_request(req);
682 0 : kfree(req);
683 : }
684 :
685 0 : static void dev_pm_qos_drop_user_request(struct device *dev,
686 : enum dev_pm_qos_req_type type)
687 : {
688 0 : mutex_lock(&dev_pm_qos_mtx);
689 0 : __dev_pm_qos_drop_user_request(dev, type);
690 0 : mutex_unlock(&dev_pm_qos_mtx);
691 0 : }
692 :
693 : /**
694 : * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
695 : * @dev: Device whose PM QoS latency limit is to be exposed to user space.
696 : * @value: Initial value of the latency limit.
697 : */
698 1 : int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
699 : {
700 : struct dev_pm_qos_request *req;
701 : int ret;
702 :
703 1 : if (!device_is_registered(dev) || value < 0)
704 : return -EINVAL;
705 :
706 1 : req = kzalloc(sizeof(*req), GFP_KERNEL);
707 1 : if (!req)
708 : return -ENOMEM;
709 :
710 1 : ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
711 1 : if (ret < 0) {
712 0 : kfree(req);
713 0 : return ret;
714 : }
715 :
716 1 : mutex_lock(&dev_pm_qos_sysfs_mtx);
717 :
718 1 : mutex_lock(&dev_pm_qos_mtx);
719 :
720 2 : if (IS_ERR_OR_NULL(dev->power.qos))
721 : ret = -ENODEV;
722 1 : else if (dev->power.qos->resume_latency_req)
723 0 : ret = -EEXIST;
724 :
725 1 : if (ret < 0) {
726 0 : __dev_pm_qos_remove_request(req);
727 0 : kfree(req);
728 0 : mutex_unlock(&dev_pm_qos_mtx);
729 0 : goto out;
730 : }
731 1 : dev->power.qos->resume_latency_req = req;
732 :
733 1 : mutex_unlock(&dev_pm_qos_mtx);
734 :
735 1 : ret = pm_qos_sysfs_add_resume_latency(dev);
736 1 : if (ret)
737 0 : dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
738 :
739 : out:
740 1 : mutex_unlock(&dev_pm_qos_sysfs_mtx);
741 1 : return ret;
742 : }
743 : EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
744 :
745 5 : static void __dev_pm_qos_hide_latency_limit(struct device *dev)
746 : {
747 10 : if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
748 0 : __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
749 5 : }
750 :
751 : /**
752 : * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
753 : * @dev: Device whose PM QoS latency limit is to be hidden from user space.
754 : */
755 0 : void dev_pm_qos_hide_latency_limit(struct device *dev)
756 : {
757 0 : mutex_lock(&dev_pm_qos_sysfs_mtx);
758 :
759 0 : pm_qos_sysfs_remove_resume_latency(dev);
760 :
761 0 : mutex_lock(&dev_pm_qos_mtx);
762 0 : __dev_pm_qos_hide_latency_limit(dev);
763 0 : mutex_unlock(&dev_pm_qos_mtx);
764 :
765 0 : mutex_unlock(&dev_pm_qos_sysfs_mtx);
766 0 : }
767 : EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
768 :
769 : /**
770 : * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
771 : * @dev: Device whose PM QoS flags are to be exposed to user space.
772 : * @val: Initial values of the flags.
773 : */
774 0 : int dev_pm_qos_expose_flags(struct device *dev, s32 val)
775 : {
776 : struct dev_pm_qos_request *req;
777 : int ret;
778 :
779 0 : if (!device_is_registered(dev))
780 : return -EINVAL;
781 :
782 0 : req = kzalloc(sizeof(*req), GFP_KERNEL);
783 0 : if (!req)
784 : return -ENOMEM;
785 :
786 0 : ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
787 0 : if (ret < 0) {
788 0 : kfree(req);
789 0 : return ret;
790 : }
791 :
792 0 : pm_runtime_get_sync(dev);
793 0 : mutex_lock(&dev_pm_qos_sysfs_mtx);
794 :
795 0 : mutex_lock(&dev_pm_qos_mtx);
796 :
797 0 : if (IS_ERR_OR_NULL(dev->power.qos))
798 : ret = -ENODEV;
799 0 : else if (dev->power.qos->flags_req)
800 0 : ret = -EEXIST;
801 :
802 0 : if (ret < 0) {
803 0 : __dev_pm_qos_remove_request(req);
804 0 : kfree(req);
805 0 : mutex_unlock(&dev_pm_qos_mtx);
806 0 : goto out;
807 : }
808 0 : dev->power.qos->flags_req = req;
809 :
810 0 : mutex_unlock(&dev_pm_qos_mtx);
811 :
812 0 : ret = pm_qos_sysfs_add_flags(dev);
813 0 : if (ret)
814 0 : dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
815 :
816 : out:
817 0 : mutex_unlock(&dev_pm_qos_sysfs_mtx);
818 0 : pm_runtime_put(dev);
819 0 : return ret;
820 : }
821 : EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
822 :
823 5 : static void __dev_pm_qos_hide_flags(struct device *dev)
824 : {
825 10 : if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
826 0 : __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
827 5 : }
828 :
829 : /**
830 : * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
831 : * @dev: Device whose PM QoS flags are to be hidden from user space.
832 : */
833 0 : void dev_pm_qos_hide_flags(struct device *dev)
834 : {
835 0 : pm_runtime_get_sync(dev);
836 0 : mutex_lock(&dev_pm_qos_sysfs_mtx);
837 :
838 0 : pm_qos_sysfs_remove_flags(dev);
839 :
840 0 : mutex_lock(&dev_pm_qos_mtx);
841 0 : __dev_pm_qos_hide_flags(dev);
842 0 : mutex_unlock(&dev_pm_qos_mtx);
843 :
844 0 : mutex_unlock(&dev_pm_qos_sysfs_mtx);
845 0 : pm_runtime_put(dev);
846 0 : }
847 : EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
848 :
849 : /**
850 : * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
851 : * @dev: Device to update the PM QoS flags request for.
852 : * @mask: Flags to set/clear.
853 : * @set: Whether to set or clear the flags (true means set).
854 : */
855 0 : int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
856 : {
857 : s32 value;
858 : int ret;
859 :
860 0 : pm_runtime_get_sync(dev);
861 0 : mutex_lock(&dev_pm_qos_mtx);
862 :
863 0 : if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
864 : ret = -EINVAL;
865 : goto out;
866 : }
867 :
868 0 : value = dev_pm_qos_requested_flags(dev);
869 0 : if (set)
870 0 : value |= mask;
871 : else
872 0 : value &= ~mask;
873 :
874 0 : ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
875 :
876 : out:
877 0 : mutex_unlock(&dev_pm_qos_mtx);
878 0 : pm_runtime_put(dev);
879 0 : return ret;
880 : }
881 :
882 : /**
883 : * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
884 : * @dev: Device to obtain the user space latency tolerance for.
885 : */
886 0 : s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
887 : {
888 : s32 ret;
889 :
890 0 : mutex_lock(&dev_pm_qos_mtx);
891 0 : ret = IS_ERR_OR_NULL(dev->power.qos)
892 0 : || !dev->power.qos->latency_tolerance_req ?
893 0 : PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
894 : dev->power.qos->latency_tolerance_req->data.pnode.prio;
895 0 : mutex_unlock(&dev_pm_qos_mtx);
896 0 : return ret;
897 : }
898 :
899 : /**
900 : * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
901 : * @dev: Device to update the user space latency tolerance for.
902 : * @val: New user space latency tolerance for @dev (negative values disable).
903 : */
904 0 : int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
905 : {
906 : int ret;
907 :
908 0 : mutex_lock(&dev_pm_qos_mtx);
909 :
910 0 : if (IS_ERR_OR_NULL(dev->power.qos)
911 0 : || !dev->power.qos->latency_tolerance_req) {
912 : struct dev_pm_qos_request *req;
913 :
914 0 : if (val < 0) {
915 0 : if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
916 : ret = 0;
917 : else
918 0 : ret = -EINVAL;
919 : goto out;
920 : }
921 0 : req = kzalloc(sizeof(*req), GFP_KERNEL);
922 0 : if (!req) {
923 : ret = -ENOMEM;
924 : goto out;
925 : }
926 0 : ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
927 0 : if (ret < 0) {
928 0 : kfree(req);
929 0 : goto out;
930 : }
931 0 : dev->power.qos->latency_tolerance_req = req;
932 : } else {
933 0 : if (val < 0) {
934 0 : __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
935 0 : ret = 0;
936 : } else {
937 0 : ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
938 : }
939 : }
940 :
941 : out:
942 0 : mutex_unlock(&dev_pm_qos_mtx);
943 0 : return ret;
944 : }
945 : EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
946 :
947 : /**
948 : * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
949 : * @dev: Device whose latency tolerance to expose
950 : */
951 0 : int dev_pm_qos_expose_latency_tolerance(struct device *dev)
952 : {
953 : int ret;
954 :
955 0 : if (!dev->power.set_latency_tolerance)
956 : return -EINVAL;
957 :
958 0 : mutex_lock(&dev_pm_qos_sysfs_mtx);
959 0 : ret = pm_qos_sysfs_add_latency_tolerance(dev);
960 0 : mutex_unlock(&dev_pm_qos_sysfs_mtx);
961 :
962 0 : return ret;
963 : }
964 : EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
965 :
966 : /**
967 : * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
968 : * @dev: Device whose latency tolerance to hide
969 : */
970 0 : void dev_pm_qos_hide_latency_tolerance(struct device *dev)
971 : {
972 0 : mutex_lock(&dev_pm_qos_sysfs_mtx);
973 0 : pm_qos_sysfs_remove_latency_tolerance(dev);
974 0 : mutex_unlock(&dev_pm_qos_sysfs_mtx);
975 :
976 : /* Remove the request from user space now */
977 0 : pm_runtime_get_sync(dev);
978 0 : dev_pm_qos_update_user_latency_tolerance(dev,
979 : PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
980 0 : pm_runtime_put(dev);
981 0 : }
982 : EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
|