Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
devfreq.c
Go to the documentation of this file.
1 /*
2  * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
3  * for Non-CPU Devices.
4  *
5  * Copyright (C) 2011 Samsung Electronics
6  * MyungJoo Ham <[email protected]>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/stat.h>
21 #include <linux/opp.h>
22 #include <linux/devfreq.h>
23 #include <linux/workqueue.h>
24 #include <linux/platform_device.h>
25 #include <linux/list.h>
26 #include <linux/printk.h>
27 #include <linux/hrtimer.h>
28 #include "governor.h"
29 
31 
32 /*
33  * devfreq_work periodically monitors every registered device.
34  * The minimum polling interval is one jiffy. The polling interval is
35  * determined by the minimum polling period among all polling devfreq
36  * devices. The resolution of polling interval is one jiffy.
37  */
38 static bool polling;
39 static struct workqueue_struct *devfreq_wq;
40 static struct delayed_work devfreq_work;
41 
42 /* wait removing if this is to be removed */
43 static struct devfreq *wait_remove_device;
44 
45 /* The list of all device-devfreq */
46 static LIST_HEAD(devfreq_list);
47 static DEFINE_MUTEX(devfreq_list_lock);
48 
56 static struct devfreq *find_device_devfreq(struct device *dev)
57 {
58  struct devfreq *tmp_devfreq;
59 
60  if (unlikely(IS_ERR_OR_NULL(dev))) {
61  pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
62  return ERR_PTR(-EINVAL);
63  }
64  WARN(!mutex_is_locked(&devfreq_list_lock),
65  "devfreq_list_lock must be locked.");
66 
67  list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
68  if (tmp_devfreq->dev.parent == dev)
69  return tmp_devfreq;
70  }
71 
72  return ERR_PTR(-ENODEV);
73 }
74 
83 {
84  unsigned long freq;
85  int err = 0;
86  u32 flags = 0;
87 
88  if (!mutex_is_locked(&devfreq->lock)) {
89  WARN(true, "devfreq->lock must be locked by the caller.\n");
90  return -EINVAL;
91  }
92 
93  /* Reevaluate the proper frequency */
94  err = devfreq->governor->get_target_freq(devfreq, &freq);
95  if (err)
96  return err;
97 
98  /*
99  * Adjust the freuqency with user freq and QoS.
100  *
101  * List from the highest proiority
102  * max_freq (probably called by thermal when it's too hot)
103  * min_freq
104  */
105 
106  if (devfreq->min_freq && freq < devfreq->min_freq) {
107  freq = devfreq->min_freq;
108  flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
109  }
110  if (devfreq->max_freq && freq > devfreq->max_freq) {
111  freq = devfreq->max_freq;
112  flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
113  }
114 
115  err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
116  if (err)
117  return err;
118 
119  devfreq->previous_freq = freq;
120  return err;
121 }
122 
132 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
133  void *devp)
134 {
135  struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
136  int ret;
137 
138  mutex_lock(&devfreq->lock);
139  ret = update_devfreq(devfreq);
140  mutex_unlock(&devfreq->lock);
141 
142  return ret;
143 }
144 
165 static void _remove_devfreq(struct devfreq *devfreq, bool skip)
166 {
167  if (!mutex_is_locked(&devfreq->lock)) {
168  WARN(true, "devfreq->lock must be locked by the caller.\n");
169  return;
170  }
171  if (!devfreq->governor->no_central_polling &&
172  !mutex_is_locked(&devfreq_list_lock)) {
173  WARN(true, "devfreq_list_lock must be locked by the caller.\n");
174  return;
175  }
176 
177  if (devfreq->being_removed)
178  return;
179 
180  devfreq->being_removed = true;
181 
182  if (devfreq->profile->exit)
183  devfreq->profile->exit(devfreq->dev.parent);
184 
185  if (devfreq->governor->exit)
186  devfreq->governor->exit(devfreq);
187 
188  if (!skip && get_device(&devfreq->dev)) {
189  device_unregister(&devfreq->dev);
190  put_device(&devfreq->dev);
191  }
192 
193  if (!devfreq->governor->no_central_polling)
194  list_del(&devfreq->node);
195 
196  mutex_unlock(&devfreq->lock);
197  mutex_destroy(&devfreq->lock);
198 
199  kfree(devfreq);
200 }
201 
210 static void devfreq_dev_release(struct device *dev)
211 {
212  struct devfreq *devfreq = to_devfreq(dev);
213  bool central_polling = !devfreq->governor->no_central_polling;
214 
215  /*
216  * If devfreq_dev_release() was called by device_unregister() of
217  * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and
218  * being_removed is already set. This also partially checks the case
219  * where devfreq_dev_release() is called from a thread other than
220  * the one called _remove_devfreq(); however, this case is
221  * dealt completely with another following being_removed check.
222  *
223  * Because being_removed is never being
224  * unset, we do not need to worry about race conditions on
225  * being_removed.
226  */
227  if (devfreq->being_removed)
228  return;
229 
230  if (central_polling)
231  mutex_lock(&devfreq_list_lock);
232 
233  mutex_lock(&devfreq->lock);
234 
235  /*
236  * Check being_removed flag again for the case where
237  * devfreq_dev_release() was called in a thread other than the one
238  * possibly called _remove_devfreq().
239  */
240  if (devfreq->being_removed) {
241  mutex_unlock(&devfreq->lock);
242  goto out;
243  }
244 
245  /* devfreq->lock is unlocked and removed in _removed_devfreq() */
246  _remove_devfreq(devfreq, true);
247 
248 out:
249  if (central_polling)
250  mutex_unlock(&devfreq_list_lock);
251 }
252 
258 static void devfreq_monitor(struct work_struct *work)
259 {
260  static unsigned long last_polled_at;
261  struct devfreq *devfreq, *tmp;
262  int error;
263  unsigned long jiffies_passed;
264  unsigned long next_jiffies = ULONG_MAX, now = jiffies;
265  struct device *dev;
266 
267  /* Initially last_polled_at = 0, polling every device at bootup */
268  jiffies_passed = now - last_polled_at;
269  last_polled_at = now;
270  if (jiffies_passed == 0)
271  jiffies_passed = 1;
272 
273  mutex_lock(&devfreq_list_lock);
274  list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) {
275  mutex_lock(&devfreq->lock);
276  dev = devfreq->dev.parent;
277 
278  /* Do not remove tmp for a while */
279  wait_remove_device = tmp;
280 
281  if (devfreq->governor->no_central_polling ||
282  devfreq->next_polling == 0) {
283  mutex_unlock(&devfreq->lock);
284  continue;
285  }
286  mutex_unlock(&devfreq_list_lock);
287 
288  /*
289  * Reduce more next_polling if devfreq_wq took an extra
290  * delay. (i.e., CPU has been idled.)
291  */
292  if (devfreq->next_polling <= jiffies_passed) {
293  error = update_devfreq(devfreq);
294 
295  /* Remove a devfreq with an error. */
296  if (error && error != -EAGAIN) {
297 
298  dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n",
299  error, devfreq->governor->name);
300 
301  /*
302  * Unlock devfreq before locking the list
303  * in order to avoid deadlock with
304  * find_device_devfreq or others
305  */
306  mutex_unlock(&devfreq->lock);
307  mutex_lock(&devfreq_list_lock);
308  /* Check if devfreq is already removed */
309  if (IS_ERR(find_device_devfreq(dev)))
310  continue;
311  mutex_lock(&devfreq->lock);
312  /* This unlocks devfreq->lock and free it */
313  _remove_devfreq(devfreq, false);
314  continue;
315  }
316  devfreq->next_polling = devfreq->polling_jiffies;
317  } else {
318  devfreq->next_polling -= jiffies_passed;
319  }
320 
321  if (devfreq->next_polling)
322  next_jiffies = (next_jiffies > devfreq->next_polling) ?
323  devfreq->next_polling : next_jiffies;
324 
325  mutex_unlock(&devfreq->lock);
326  mutex_lock(&devfreq_list_lock);
327  }
328  wait_remove_device = NULL;
329  mutex_unlock(&devfreq_list_lock);
330 
331  if (next_jiffies > 0 && next_jiffies < ULONG_MAX) {
332  polling = true;
333  queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies);
334  } else {
335  polling = false;
336  }
337 }
338 
347 struct devfreq *devfreq_add_device(struct device *dev,
349  const struct devfreq_governor *governor,
350  void *data)
351 {
352  struct devfreq *devfreq;
353  int err = 0;
354 
355  if (!dev || !profile || !governor) {
356  dev_err(dev, "%s: Invalid parameters.\n", __func__);
357  return ERR_PTR(-EINVAL);
358  }
359 
360 
361  if (!governor->no_central_polling) {
362  mutex_lock(&devfreq_list_lock);
363  devfreq = find_device_devfreq(dev);
364  mutex_unlock(&devfreq_list_lock);
365  if (!IS_ERR(devfreq)) {
366  dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
367  err = -EINVAL;
368  goto err_out;
369  }
370  }
371 
372  devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
373  if (!devfreq) {
374  dev_err(dev, "%s: Unable to create devfreq for the device\n",
375  __func__);
376  err = -ENOMEM;
377  goto err_out;
378  }
379 
380  mutex_init(&devfreq->lock);
381  mutex_lock(&devfreq->lock);
382  devfreq->dev.parent = dev;
383  devfreq->dev.class = devfreq_class;
384  devfreq->dev.release = devfreq_dev_release;
385  devfreq->profile = profile;
386  devfreq->governor = governor;
387  devfreq->previous_freq = profile->initial_freq;
388  devfreq->data = data;
389  devfreq->next_polling = devfreq->polling_jiffies
390  = msecs_to_jiffies(devfreq->profile->polling_ms);
391  devfreq->nb.notifier_call = devfreq_notifier_call;
392 
393  dev_set_name(&devfreq->dev, dev_name(dev));
394  err = device_register(&devfreq->dev);
395  if (err) {
396  put_device(&devfreq->dev);
397  goto err_dev;
398  }
399 
400  if (governor->init)
401  err = governor->init(devfreq);
402  if (err)
403  goto err_init;
404 
405  mutex_unlock(&devfreq->lock);
406 
407  if (governor->no_central_polling)
408  goto out;
409 
410  mutex_lock(&devfreq_list_lock);
411 
412  list_add(&devfreq->node, &devfreq_list);
413 
414  if (devfreq_wq && devfreq->next_polling && !polling) {
415  polling = true;
416  queue_delayed_work(devfreq_wq, &devfreq_work,
417  devfreq->next_polling);
418  }
419  mutex_unlock(&devfreq_list_lock);
420 out:
421  return devfreq;
422 
423 err_init:
424  device_unregister(&devfreq->dev);
425 err_dev:
426  mutex_unlock(&devfreq->lock);
427  kfree(devfreq);
428 err_out:
429  return ERR_PTR(err);
430 }
431 
436 int devfreq_remove_device(struct devfreq *devfreq)
437 {
438  bool central_polling;
439 
440  if (!devfreq)
441  return -EINVAL;
442 
443  central_polling = !devfreq->governor->no_central_polling;
444 
445  if (central_polling) {
446  mutex_lock(&devfreq_list_lock);
447  while (wait_remove_device == devfreq) {
448  mutex_unlock(&devfreq_list_lock);
449  schedule();
450  mutex_lock(&devfreq_list_lock);
451  }
452  }
453 
454  mutex_lock(&devfreq->lock);
455  _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
456 
457  if (central_polling)
458  mutex_unlock(&devfreq_list_lock);
459 
460  return 0;
461 }
462 
463 static ssize_t show_governor(struct device *dev,
464  struct device_attribute *attr, char *buf)
465 {
466  return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
467 }
468 
469 static ssize_t show_freq(struct device *dev,
470  struct device_attribute *attr, char *buf)
471 {
472  return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
473 }
474 
475 static ssize_t show_polling_interval(struct device *dev,
476  struct device_attribute *attr, char *buf)
477 {
478  return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
479 }
480 
481 static ssize_t store_polling_interval(struct device *dev,
482  struct device_attribute *attr,
483  const char *buf, size_t count)
484 {
485  struct devfreq *df = to_devfreq(dev);
486  unsigned int value;
487  int ret;
488 
489  ret = sscanf(buf, "%u", &value);
490  if (ret != 1)
491  goto out;
492 
493  mutex_lock(&df->lock);
494  df->profile->polling_ms = value;
495  df->next_polling = df->polling_jiffies
496  = msecs_to_jiffies(value);
497  mutex_unlock(&df->lock);
498 
499  ret = count;
500 
501  if (df->governor->no_central_polling)
502  goto out;
503 
504  mutex_lock(&devfreq_list_lock);
505  if (df->next_polling > 0 && !polling) {
506  polling = true;
507  queue_delayed_work(devfreq_wq, &devfreq_work,
508  df->next_polling);
509  }
510  mutex_unlock(&devfreq_list_lock);
511 out:
512  return ret;
513 }
514 
515 static ssize_t show_central_polling(struct device *dev,
516  struct device_attribute *attr, char *buf)
517 {
518  return sprintf(buf, "%d\n",
519  !to_devfreq(dev)->governor->no_central_polling);
520 }
521 
522 static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
523  const char *buf, size_t count)
524 {
525  struct devfreq *df = to_devfreq(dev);
526  unsigned long value;
527  int ret;
528  unsigned long max;
529 
530  ret = sscanf(buf, "%lu", &value);
531  if (ret != 1)
532  goto out;
533 
534  mutex_lock(&df->lock);
535  max = df->max_freq;
536  if (value && max && value > max) {
537  ret = -EINVAL;
538  goto unlock;
539  }
540 
541  df->min_freq = value;
542  update_devfreq(df);
543  ret = count;
544 unlock:
545  mutex_unlock(&df->lock);
546 out:
547  return ret;
548 }
549 
550 static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr,
551  char *buf)
552 {
553  return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq);
554 }
555 
556 static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
557  const char *buf, size_t count)
558 {
559  struct devfreq *df = to_devfreq(dev);
560  unsigned long value;
561  int ret;
562  unsigned long min;
563 
564  ret = sscanf(buf, "%lu", &value);
565  if (ret != 1)
566  goto out;
567 
568  mutex_lock(&df->lock);
569  min = df->min_freq;
570  if (value && min && value < min) {
571  ret = -EINVAL;
572  goto unlock;
573  }
574 
575  df->max_freq = value;
576  update_devfreq(df);
577  ret = count;
578 unlock:
579  mutex_unlock(&df->lock);
580 out:
581  return ret;
582 }
583 
584 static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
585  char *buf)
586 {
587  return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
588 }
589 
590 static struct device_attribute devfreq_attrs[] = {
591  __ATTR(governor, S_IRUGO, show_governor, NULL),
592  __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
593  __ATTR(central_polling, S_IRUGO, show_central_polling, NULL),
594  __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
595  store_polling_interval),
596  __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
597  __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
598  { },
599 };
600 
605 static int __init devfreq_start_polling(void)
606 {
607  mutex_lock(&devfreq_list_lock);
608  polling = false;
609  devfreq_wq = create_freezable_workqueue("devfreq_wq");
610  INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor);
611  mutex_unlock(&devfreq_list_lock);
612 
613  devfreq_monitor(&devfreq_work.work);
614  return 0;
615 }
616 late_initcall(devfreq_start_polling);
617 
618 static int __init devfreq_init(void)
619 {
620  devfreq_class = class_create(THIS_MODULE, "devfreq");
621  if (IS_ERR(devfreq_class)) {
622  pr_err("%s: couldn't create class\n", __FILE__);
623  return PTR_ERR(devfreq_class);
624  }
625  devfreq_class->dev_attrs = devfreq_attrs;
626  return 0;
627 }
628 subsys_initcall(devfreq_init);
629 
630 static void __exit devfreq_exit(void)
631 {
632  class_destroy(devfreq_class);
633 }
634 module_exit(devfreq_exit);
635 
636 /*
637  * The followings are helper functions for devfreq user device drivers with
638  * OPP framework.
639  */
640 
649 struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
650  u32 flags)
651 {
652  struct opp *opp;
653 
654  if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
655  /* The freq is an upper bound. opp should be lower */
656  opp = opp_find_freq_floor(dev, freq);
657 
658  /* If not available, use the closest opp */
659  if (opp == ERR_PTR(-ENODEV))
660  opp = opp_find_freq_ceil(dev, freq);
661  } else {
662  /* The freq is an lower bound. opp should be higher */
663  opp = opp_find_freq_ceil(dev, freq);
664 
665  /* If not available, use the closest opp */
666  if (opp == ERR_PTR(-ENODEV))
667  opp = opp_find_freq_floor(dev, freq);
668  }
669 
670  return opp;
671 }
672 
680 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
681 {
682  struct srcu_notifier_head *nh = opp_get_notifier(dev);
683 
684  if (IS_ERR(nh))
685  return PTR_ERR(nh);
686  return srcu_notifier_chain_register(nh, &devfreq->nb);
687 }
688 
699 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
700 {
701  struct srcu_notifier_head *nh = opp_get_notifier(dev);
702 
703  if (IS_ERR(nh))
704  return PTR_ERR(nh);
705  return srcu_notifier_chain_unregister(nh, &devfreq->nb);
706 }
707 
708 MODULE_AUTHOR("MyungJoo Ham <[email protected]>");
709 MODULE_DESCRIPTION("devfreq class support");
710 MODULE_LICENSE("GPL");