Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
opp.c
Go to the documentation of this file.
1 /*
2  * Generic OPP Interface
3  *
4  * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5  * Nishanth Menon
6  * Romit Dasgupta
7  * Kevin Hilman
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/cpufreq.h>
20 #include <linux/device.h>
21 #include <linux/list.h>
22 #include <linux/rculist.h>
23 #include <linux/rcupdate.h>
24 #include <linux/opp.h>
25 #include <linux/of.h>
26 
27 /*
28  * Internal data structure organization with the OPP layer library is as
29  * follows:
30  * dev_opp_list (root)
31  * |- device 1 (represents voltage domain 1)
32  * | |- opp 1 (availability, freq, voltage)
33  * | |- opp 2 ..
34  * ... ...
35  * | `- opp n ..
36  * |- device 2 (represents the next voltage domain)
37  * ...
38  * `- device m (represents mth voltage domain)
39  * device 1, 2.. are represented by dev_opp structure while each opp
40  * is represented by the opp structure.
41  */
42 
60 struct opp {
61  struct list_head node;
62 
63  bool available;
64  unsigned long rate;
65  unsigned long u_volt;
66 
68 };
69 
85 struct device_opp {
86  struct list_head node;
87 
88  struct device *dev;
91 };
92 
93 /*
94  * The root of the list of all devices. All device_opp structures branch off
95  * from here, with each device_opp containing the list of opp it supports in
96  * various states of availability.
97  */
98 static LIST_HEAD(dev_opp_list);
99 /* Lock to allow exclusive modification to the device and opp lists */
100 static DEFINE_MUTEX(dev_opp_list_lock);
101 
116 static struct device_opp *find_device_opp(struct device *dev)
117 {
118  struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
119 
120  if (unlikely(IS_ERR_OR_NULL(dev))) {
121  pr_err("%s: Invalid parameters\n", __func__);
122  return ERR_PTR(-EINVAL);
123  }
124 
125  list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
126  if (tmp_dev_opp->dev == dev) {
127  dev_opp = tmp_dev_opp;
128  break;
129  }
130  }
131 
132  return dev_opp;
133 }
134 
150 unsigned long opp_get_voltage(struct opp *opp)
151 {
152  struct opp *tmp_opp;
153  unsigned long v = 0;
154 
155  tmp_opp = rcu_dereference(opp);
156  if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
157  pr_err("%s: Invalid parameters\n", __func__);
158  else
159  v = tmp_opp->u_volt;
160 
161  return v;
162 }
163 
179 unsigned long opp_get_freq(struct opp *opp)
180 {
181  struct opp *tmp_opp;
182  unsigned long f = 0;
183 
184  tmp_opp = rcu_dereference(opp);
185  if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
186  pr_err("%s: Invalid parameters\n", __func__);
187  else
188  f = tmp_opp->rate;
189 
190  return f;
191 }
192 
204 int opp_get_opp_count(struct device *dev)
205 {
206  struct device_opp *dev_opp;
207  struct opp *temp_opp;
208  int count = 0;
209 
210  dev_opp = find_device_opp(dev);
211  if (IS_ERR(dev_opp)) {
212  int r = PTR_ERR(dev_opp);
213  dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
214  return r;
215  }
216 
217  list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
218  if (temp_opp->available)
219  count++;
220  }
221 
222  return count;
223 }
224 
248 struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
249  bool available)
250 {
251  struct device_opp *dev_opp;
252  struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
253 
254  dev_opp = find_device_opp(dev);
255  if (IS_ERR(dev_opp)) {
256  int r = PTR_ERR(dev_opp);
257  dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
258  return ERR_PTR(r);
259  }
260 
261  list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
262  if (temp_opp->available == available &&
263  temp_opp->rate == freq) {
264  opp = temp_opp;
265  break;
266  }
267  }
268 
269  return opp;
270 }
271 
289 struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
290 {
291  struct device_opp *dev_opp;
292  struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
293 
294  if (!dev || !freq) {
295  dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
296  return ERR_PTR(-EINVAL);
297  }
298 
299  dev_opp = find_device_opp(dev);
300  if (IS_ERR(dev_opp))
301  return opp;
302 
303  list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
304  if (temp_opp->available && temp_opp->rate >= *freq) {
305  opp = temp_opp;
306  *freq = opp->rate;
307  break;
308  }
309  }
310 
311  return opp;
312 }
313 
331 struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
332 {
333  struct device_opp *dev_opp;
334  struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
335 
336  if (!dev || !freq) {
337  dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
338  return ERR_PTR(-EINVAL);
339  }
340 
341  dev_opp = find_device_opp(dev);
342  if (IS_ERR(dev_opp))
343  return opp;
344 
345  list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
346  if (temp_opp->available) {
347  /* go to the next node, before choosing prev */
348  if (temp_opp->rate > *freq)
349  break;
350  else
351  opp = temp_opp;
352  }
353  }
354  if (!IS_ERR(opp))
355  *freq = opp->rate;
356 
357  return opp;
358 }
359 
376 int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
377 {
378  struct device_opp *dev_opp = NULL;
379  struct opp *opp, *new_opp;
380  struct list_head *head;
381 
382  /* allocate new OPP node */
383  new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
384  if (!new_opp) {
385  dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
386  return -ENOMEM;
387  }
388 
389  /* Hold our list modification lock here */
390  mutex_lock(&dev_opp_list_lock);
391 
392  /* Check for existing list for 'dev' */
393  dev_opp = find_device_opp(dev);
394  if (IS_ERR(dev_opp)) {
395  /*
396  * Allocate a new device OPP table. In the infrequent case
397  * where a new device is needed to be added, we pay this
398  * penalty.
399  */
400  dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL);
401  if (!dev_opp) {
402  mutex_unlock(&dev_opp_list_lock);
403  kfree(new_opp);
404  dev_warn(dev,
405  "%s: Unable to create device OPP structure\n",
406  __func__);
407  return -ENOMEM;
408  }
409 
410  dev_opp->dev = dev;
411  srcu_init_notifier_head(&dev_opp->head);
412  INIT_LIST_HEAD(&dev_opp->opp_list);
413 
414  /* Secure the device list modification */
415  list_add_rcu(&dev_opp->node, &dev_opp_list);
416  }
417 
418  /* populate the opp table */
419  new_opp->dev_opp = dev_opp;
420  new_opp->rate = freq;
421  new_opp->u_volt = u_volt;
422  new_opp->available = true;
423 
424  /* Insert new OPP in order of increasing frequency */
425  head = &dev_opp->opp_list;
426  list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
427  if (new_opp->rate < opp->rate)
428  break;
429  else
430  head = &opp->node;
431  }
432 
433  list_add_rcu(&new_opp->node, head);
434  mutex_unlock(&dev_opp_list_lock);
435 
436  /*
437  * Notify the changes in the availability of the operable
438  * frequency/voltage list.
439  */
440  srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
441  return 0;
442 }
443 
463 static int opp_set_availability(struct device *dev, unsigned long freq,
464  bool availability_req)
465 {
466  struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
467  struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
468  int r = 0;
469 
470  /* keep the node allocated */
471  new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
472  if (!new_opp) {
473  dev_warn(dev, "%s: Unable to create OPP\n", __func__);
474  return -ENOMEM;
475  }
476 
477  mutex_lock(&dev_opp_list_lock);
478 
479  /* Find the device_opp */
480  list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
481  if (dev == tmp_dev_opp->dev) {
482  dev_opp = tmp_dev_opp;
483  break;
484  }
485  }
486  if (IS_ERR(dev_opp)) {
487  r = PTR_ERR(dev_opp);
488  dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
489  goto unlock;
490  }
491 
492  /* Do we have the frequency? */
493  list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
494  if (tmp_opp->rate == freq) {
495  opp = tmp_opp;
496  break;
497  }
498  }
499  if (IS_ERR(opp)) {
500  r = PTR_ERR(opp);
501  goto unlock;
502  }
503 
504  /* Is update really needed? */
505  if (opp->available == availability_req)
506  goto unlock;
507  /* copy the old data over */
508  *new_opp = *opp;
509 
510  /* plug in new node */
511  new_opp->available = availability_req;
512 
513  list_replace_rcu(&opp->node, &new_opp->node);
514  mutex_unlock(&dev_opp_list_lock);
515  synchronize_rcu();
516 
517  /* Notify the change of the OPP availability */
518  if (availability_req)
520  new_opp);
521  else
523  new_opp);
524 
525  /* clean up old opp */
526  new_opp = opp;
527  goto out;
528 
529 unlock:
530  mutex_unlock(&dev_opp_list_lock);
531 out:
532  kfree(new_opp);
533  return r;
534 }
535 
551 int opp_enable(struct device *dev, unsigned long freq)
552 {
553  return opp_set_availability(dev, freq, true);
554 }
555 
572 int opp_disable(struct device *dev, unsigned long freq)
573 {
574  return opp_set_availability(dev, freq, false);
575 }
576 
577 #ifdef CONFIG_CPU_FREQ
578 
602 int opp_init_cpufreq_table(struct device *dev,
604 {
605  struct device_opp *dev_opp;
606  struct opp *opp;
608  int i = 0;
609 
610  /* Pretend as if I am an updater */
611  mutex_lock(&dev_opp_list_lock);
612 
613  dev_opp = find_device_opp(dev);
614  if (IS_ERR(dev_opp)) {
615  int r = PTR_ERR(dev_opp);
616  mutex_unlock(&dev_opp_list_lock);
617  dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
618  return r;
619  }
620 
621  freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
622  (opp_get_opp_count(dev) + 1), GFP_KERNEL);
623  if (!freq_table) {
624  mutex_unlock(&dev_opp_list_lock);
625  dev_warn(dev, "%s: Unable to allocate frequency table\n",
626  __func__);
627  return -ENOMEM;
628  }
629 
630  list_for_each_entry(opp, &dev_opp->opp_list, node) {
631  if (opp->available) {
632  freq_table[i].index = i;
633  freq_table[i].frequency = opp->rate / 1000;
634  i++;
635  }
636  }
637  mutex_unlock(&dev_opp_list_lock);
638 
639  freq_table[i].index = i;
640  freq_table[i].frequency = CPUFREQ_TABLE_END;
641 
642  *table = &freq_table[0];
643 
644  return 0;
645 }
646 
654 void opp_free_cpufreq_table(struct device *dev,
655  struct cpufreq_frequency_table **table)
656 {
657  if (!table)
658  return;
659 
660  kfree(*table);
661  *table = NULL;
662 }
663 #endif /* CONFIG_CPU_FREQ */
664 
670 {
671  struct device_opp *dev_opp = find_device_opp(dev);
672 
673  if (IS_ERR(dev_opp))
674  return ERR_CAST(dev_opp); /* matching type */
675 
676  return &dev_opp->head;
677 }
678 
679 #ifdef CONFIG_OF
680 
686 int of_init_opp_table(struct device *dev)
687 {
688  const struct property *prop;
689  const __be32 *val;
690  int nr;
691 
692  prop = of_find_property(dev->of_node, "operating-points", NULL);
693  if (!prop)
694  return -ENODEV;
695  if (!prop->value)
696  return -ENODATA;
697 
698  /*
699  * Each OPP is a set of tuples consisting of frequency and
700  * voltage like <freq-kHz vol-uV>.
701  */
702  nr = prop->length / sizeof(u32);
703  if (nr % 2) {
704  dev_err(dev, "%s: Invalid OPP list\n", __func__);
705  return -EINVAL;
706  }
707 
708  val = prop->value;
709  while (nr) {
710  unsigned long freq = be32_to_cpup(val++) * 1000;
711  unsigned long volt = be32_to_cpup(val++);
712 
713  if (opp_add(dev, freq, volt)) {
714  dev_warn(dev, "%s: Failed to add OPP %ld\n",
715  __func__, freq);
716  continue;
717  }
718  nr -= 2;
719  }
720 
721  return 0;
722 }
723 #endif