Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
qos.c
Go to the documentation of this file.
1 /*
2  * Devices PM QoS constraints management
3  *
4  * Copyright (C) 2011 Texas Instruments, Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *
11  * This module exposes the interface to kernel space for specifying
12  * per-device PM QoS dependencies. It provides infrastructure for registration
13  * of:
14  *
15  * Dependents on a QoS value : register requests
16  * Watchers of QoS value : get notified when target QoS value changes
17  *
18  * This QoS design is best effort based. Dependents register their QoS needs.
19  * Watchers register to keep track of the current QoS needs of the system.
20  * Watchers can register different types of notification callbacks:
21  * . a per-device notification callback using the dev_pm_qos_*_notifier API.
22  * The notification chain data is stored in the per-device constraint
23  * data struct.
24  * . a system-wide notification callback using the dev_pm_qos_*_global_notifier
25  * API. The notification chain data is stored in a static variable.
26  *
27  * Note about the per-device constraint data struct allocation:
28  * . The per-device constraints data struct ptr is tored into the device
29  * dev_pm_info.
30  * . To minimize the data usage by the per-device constraints, the data struct
31  * is only allocated at the first call to dev_pm_qos_add_request.
32  * . The data is later free'd when the device is removed from the system.
33  * . A global mutex protects the constraints users from the data being
34  * allocated and free'd.
35  */
36 
37 #include <linux/pm_qos.h>
38 #include <linux/spinlock.h>
39 #include <linux/slab.h>
40 #include <linux/device.h>
41 #include <linux/mutex.h>
42 #include <linux/export.h>
43 
44 #include "power.h"
45 
46 static DEFINE_MUTEX(dev_pm_qos_mtx);
47 
48 static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
49 
57 {
58  struct pm_qos_constraints *c = dev->power.constraints;
59 
60  return c ? pm_qos_read_value(c) : 0;
61 }
62 
68 {
69  unsigned long flags;
70  s32 ret;
71 
72  spin_lock_irqsave(&dev->power.lock, flags);
73  ret = __dev_pm_qos_read_value(dev);
74  spin_unlock_irqrestore(&dev->power.lock, flags);
75 
76  return ret;
77 }
78 
79 /*
80  * apply_constraint
81  * @req: constraint request to apply
82  * @action: action to perform add/update/remove, of type enum pm_qos_req_action
83  * @value: defines the qos request
84  *
85  * Internal function to update the constraints list using the PM QoS core
86  * code and if needed call the per-device and the global notification
87  * callbacks
88  */
89 static int apply_constraint(struct dev_pm_qos_request *req,
90  enum pm_qos_req_action action, int value)
91 {
92  int ret, curr_value;
93 
94  ret = pm_qos_update_target(req->dev->power.constraints,
95  &req->node, action, value);
96 
97  if (ret) {
98  /* Call the global callbacks if needed */
99  curr_value = pm_qos_read_value(req->dev->power.constraints);
100  blocking_notifier_call_chain(&dev_pm_notifiers,
101  (unsigned long)curr_value,
102  req);
103  }
104 
105  return ret;
106 }
107 
108 /*
109  * dev_pm_qos_constraints_allocate
110  * @dev: device to allocate data for
111  *
112  * Called at the first call to add_request, for constraint data allocation
113  * Must be called with the dev_pm_qos_mtx mutex held
114  */
115 static int dev_pm_qos_constraints_allocate(struct device *dev)
116 {
117  struct pm_qos_constraints *c;
118  struct blocking_notifier_head *n;
119 
120  c = kzalloc(sizeof(*c), GFP_KERNEL);
121  if (!c)
122  return -ENOMEM;
123 
124  n = kzalloc(sizeof(*n), GFP_KERNEL);
125  if (!n) {
126  kfree(c);
127  return -ENOMEM;
128  }
130 
131  plist_head_init(&c->list);
134  c->type = PM_QOS_MIN;
135  c->notifiers = n;
136 
137  spin_lock_irq(&dev->power.lock);
138  dev->power.constraints = c;
139  spin_unlock_irq(&dev->power.lock);
140 
141  return 0;
142 }
143 
152 {
153  mutex_lock(&dev_pm_qos_mtx);
154  dev->power.constraints = NULL;
155  dev->power.power_state = PMSG_ON;
156  mutex_unlock(&dev_pm_qos_mtx);
157 }
158 
166 {
167  struct dev_pm_qos_request *req, *tmp;
168  struct pm_qos_constraints *c;
169 
170  /*
171  * If the device's PM QoS resume latency limit has been exposed to user
172  * space, it has to be hidden at this point.
173  */
174  dev_pm_qos_hide_latency_limit(dev);
175 
176  mutex_lock(&dev_pm_qos_mtx);
177 
178  dev->power.power_state = PMSG_INVALID;
179  c = dev->power.constraints;
180  if (!c)
181  goto out;
182 
183  /* Flush the constraints list for the device */
184  plist_for_each_entry_safe(req, tmp, &c->list, node) {
185  /*
186  * Update constraints list and call the notification
187  * callbacks if needed
188  */
189  apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
190  memset(req, 0, sizeof(*req));
191  }
192 
193  spin_lock_irq(&dev->power.lock);
194  dev->power.constraints = NULL;
195  spin_unlock_irq(&dev->power.lock);
196 
197  kfree(c->notifiers);
198  kfree(c);
199 
200  out:
201  mutex_unlock(&dev_pm_qos_mtx);
202 }
203 
222 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
223  s32 value)
224 {
225  int ret = 0;
226 
227  if (!dev || !req) /*guard against callers passing in null */
228  return -EINVAL;
229 
230  if (WARN(dev_pm_qos_request_active(req),
231  "%s() called for already added request\n", __func__))
232  return -EINVAL;
233 
234  req->dev = dev;
235 
236  mutex_lock(&dev_pm_qos_mtx);
237 
238  if (!dev->power.constraints) {
239  if (dev->power.power_state.event == PM_EVENT_INVALID) {
240  /* The device has been removed from the system. */
241  req->dev = NULL;
242  ret = -ENODEV;
243  goto out;
244  } else {
245  /*
246  * Allocate the constraints data on the first call to
247  * add_request, i.e. only if the data is not already
248  * allocated and if the device has not been removed.
249  */
250  ret = dev_pm_qos_constraints_allocate(dev);
251  }
252  }
253 
254  if (!ret)
255  ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
256 
257  out:
258  mutex_unlock(&dev_pm_qos_mtx);
259 
260  return ret;
261 }
263 
280  s32 new_value)
281 {
282  int ret = 0;
283 
284  if (!req) /*guard against callers passing in null */
285  return -EINVAL;
286 
287  if (WARN(!dev_pm_qos_request_active(req),
288  "%s() called for unknown object\n", __func__))
289  return -EINVAL;
290 
291  mutex_lock(&dev_pm_qos_mtx);
292 
293  if (req->dev->power.constraints) {
294  if (new_value != req->node.prio)
295  ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
296  new_value);
297  } else {
298  /* Return if the device has been removed */
299  ret = -ENODEV;
300  }
301 
302  mutex_unlock(&dev_pm_qos_mtx);
303  return ret;
304 }
306 
320 {
321  int ret = 0;
322 
323  if (!req) /*guard against callers passing in null */
324  return -EINVAL;
325 
326  if (WARN(!dev_pm_qos_request_active(req),
327  "%s() called for unknown object\n", __func__))
328  return -EINVAL;
329 
330  mutex_lock(&dev_pm_qos_mtx);
331 
332  if (req->dev->power.constraints) {
333  ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
335  memset(req, 0, sizeof(*req));
336  } else {
337  /* Return if the device has been removed */
338  ret = -ENODEV;
339  }
340 
341  mutex_unlock(&dev_pm_qos_mtx);
342  return ret;
343 }
345 
359 int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
360 {
361  int ret = 0;
362 
363  mutex_lock(&dev_pm_qos_mtx);
364 
365  if (!dev->power.constraints)
366  ret = dev->power.power_state.event != PM_EVENT_INVALID ?
367  dev_pm_qos_constraints_allocate(dev) : -ENODEV;
368 
369  if (!ret)
371  dev->power.constraints->notifiers, notifier);
372 
373  mutex_unlock(&dev_pm_qos_mtx);
374  return ret;
375 }
377 
389  struct notifier_block *notifier)
390 {
391  int retval = 0;
392 
393  mutex_lock(&dev_pm_qos_mtx);
394 
395  /* Silently return if the constraints object is not present. */
396  if (dev->power.constraints)
398  dev->power.constraints->notifiers,
399  notifier);
400 
401  mutex_unlock(&dev_pm_qos_mtx);
402  return retval;
403 }
405 
416 {
417  return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
418 }
420 
431 {
432  return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
433 }
435 
443  struct dev_pm_qos_request *req, s32 value)
444 {
445  struct device *ancestor = dev->parent;
446  int error = -ENODEV;
447 
448  while (ancestor && !ancestor->power.ignore_children)
449  ancestor = ancestor->parent;
450 
451  if (ancestor)
452  error = dev_pm_qos_add_request(ancestor, req, value);
453 
454  if (error < 0)
455  req->dev = NULL;
456 
457  return error;
458 }
460 
461 #ifdef CONFIG_PM_RUNTIME
462 static void __dev_pm_qos_drop_user_request(struct device *dev)
463 {
464  dev_pm_qos_remove_request(dev->power.pq_req);
465  dev->power.pq_req = NULL;
466 }
467 
473 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
474 {
475  struct dev_pm_qos_request *req;
476  int ret;
477 
478  if (!device_is_registered(dev) || value < 0)
479  return -EINVAL;
480 
481  if (dev->power.pq_req)
482  return -EEXIST;
483 
484  req = kzalloc(sizeof(*req), GFP_KERNEL);
485  if (!req)
486  return -ENOMEM;
487 
488  ret = dev_pm_qos_add_request(dev, req, value);
489  if (ret < 0)
490  return ret;
491 
492  dev->power.pq_req = req;
493  ret = pm_qos_sysfs_add(dev);
494  if (ret)
495  __dev_pm_qos_drop_user_request(dev);
496 
497  return ret;
498 }
499 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
500 
505 void dev_pm_qos_hide_latency_limit(struct device *dev)
506 {
507  if (dev->power.pq_req) {
508  pm_qos_sysfs_remove(dev);
509  __dev_pm_qos_drop_user_request(dev);
510  }
511 }
512 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
513 #endif /* CONFIG_PM_RUNTIME */