Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cpu_cooling.c
Go to the documentation of this file.
1 /*
2  * linux/drivers/thermal/cpu_cooling.c
3  *
4  * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
5  * Copyright (C) 2012 Amit Daniel <[email protected]>
6  *
7  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; version 2 of the License.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with this program; if not, write to the Free Software Foundation, Inc.,
19  * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
20  *
21  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
22  */
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/thermal.h>
26 #include <linux/platform_device.h>
27 #include <linux/cpufreq.h>
28 #include <linux/err.h>
29 #include <linux/slab.h>
30 #include <linux/cpu.h>
31 #include <linux/cpu_cooling.h>
32 
52  int id;
54  unsigned int cpufreq_state;
55  unsigned int cpufreq_val;
57  struct list_head node;
58 };
59 static LIST_HEAD(cooling_cpufreq_list);
60 static DEFINE_IDR(cpufreq_idr);
61 
62 static struct mutex cooling_cpufreq_lock;
63 
64 /* notify_table passes value to the CPUFREQ_ADJUST callback function. */
65 #define NOTIFY_INVALID NULL
67 
73 static int get_idr(struct idr *idr, int *id)
74 {
75  int err;
76 again:
77  if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
78  return -ENOMEM;
79 
80  mutex_lock(&cooling_cpufreq_lock);
81  err = idr_get_new(idr, NULL, id);
82  mutex_unlock(&cooling_cpufreq_lock);
83 
84  if (unlikely(err == -EAGAIN))
85  goto again;
86  else if (unlikely(err))
87  return err;
88 
89  *id = *id & MAX_IDR_MASK;
90  return 0;
91 }
92 
98 static void release_idr(struct idr *idr, int id)
99 {
100  mutex_lock(&cooling_cpufreq_lock);
101  idr_remove(idr, id);
102  mutex_unlock(&cooling_cpufreq_lock);
103 }
104 
105 /* Below code defines functions to be used for cpufreq as cooling device */
106 
111 static int is_cpufreq_valid(int cpu)
112 {
113  struct cpufreq_policy policy;
114  return !cpufreq_get_policy(&policy, cpu);
115 }
116 
123 static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
124 {
125  int ret = 0, i = 0;
126  unsigned long level_index;
127  bool descend = false;
130  if (!table)
131  return ret;
132 
133  while (table[i].frequency != CPUFREQ_TABLE_END) {
134  if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
135  continue;
136 
137  /*check if table in ascending or descending order*/
138  if ((table[i + 1].frequency != CPUFREQ_TABLE_END) &&
139  (table[i + 1].frequency < table[i].frequency)
140  && !descend) {
141  descend = true;
142  }
143 
144  /*return if level matched and table in descending order*/
145  if (descend && i == level)
146  return table[i].frequency;
147  i++;
148  }
149  i--;
150 
151  if (level > i || descend)
152  return ret;
153  level_index = i - level;
154 
155  /*Scan the table in reverse order and match the level*/
156  while (i >= 0) {
157  if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
158  continue;
159  /*return if level matched*/
160  if (i == level_index)
161  return table[i].frequency;
162  i--;
163  }
164  return ret;
165 }
166 
173 static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
174  unsigned long cooling_state)
175 {
176  unsigned int cpuid, clip_freq;
177  struct cpumask *maskPtr = &cpufreq_device->allowed_cpus;
178  unsigned int cpu = cpumask_any(maskPtr);
179 
180 
181  /* Check if the old cooling action is same as new cooling action */
182  if (cpufreq_device->cpufreq_state == cooling_state)
183  return 0;
184 
185  clip_freq = get_cpu_frequency(cpu, cooling_state);
186  if (!clip_freq)
187  return -EINVAL;
188 
189  cpufreq_device->cpufreq_state = cooling_state;
190  cpufreq_device->cpufreq_val = clip_freq;
191  notify_device = cpufreq_device;
192 
193  for_each_cpu(cpuid, maskPtr) {
194  if (is_cpufreq_valid(cpuid))
195  cpufreq_update_policy(cpuid);
196  }
197 
198  notify_device = NOTIFY_INVALID;
199 
200  return 0;
201 }
202 
209 static int cpufreq_thermal_notifier(struct notifier_block *nb,
210  unsigned long event, void *data)
211 {
212  struct cpufreq_policy *policy = data;
213  unsigned long max_freq = 0;
214 
215  if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID)
216  return 0;
217 
218  if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus))
219  max_freq = notify_device->cpufreq_val;
220 
221  /* Never exceed user_policy.max*/
222  if (max_freq > policy->user_policy.max)
223  max_freq = policy->user_policy.max;
224 
225  if (policy->max != max_freq)
226  cpufreq_verify_within_limits(policy, 0, max_freq);
227 
228  return 0;
229 }
230 
231 /*
232  * cpufreq cooling device callback functions are defined below
233  */
234 
240 static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
241  unsigned long *state)
242 {
243  int ret = -EINVAL, i = 0;
244  struct cpufreq_cooling_device *cpufreq_device;
245  struct cpumask *maskPtr;
246  unsigned int cpu;
248 
249  mutex_lock(&cooling_cpufreq_lock);
250  list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
251  if (cpufreq_device && cpufreq_device->cool_dev == cdev)
252  break;
253  }
254  if (cpufreq_device == NULL)
255  goto return_get_max_state;
256 
257  maskPtr = &cpufreq_device->allowed_cpus;
258  cpu = cpumask_any(maskPtr);
259  table = cpufreq_frequency_get_table(cpu);
260  if (!table) {
261  *state = 0;
262  ret = 0;
263  goto return_get_max_state;
264  }
265 
266  while (table[i].frequency != CPUFREQ_TABLE_END) {
267  if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
268  continue;
269  i++;
270  }
271  if (i > 0) {
272  *state = --i;
273  ret = 0;
274  }
275 
276 return_get_max_state:
277  mutex_unlock(&cooling_cpufreq_lock);
278  return ret;
279 }
280 
286 static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
287  unsigned long *state)
288 {
289  int ret = -EINVAL;
290  struct cpufreq_cooling_device *cpufreq_device;
291 
292  mutex_lock(&cooling_cpufreq_lock);
293  list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
294  if (cpufreq_device && cpufreq_device->cool_dev == cdev) {
295  *state = cpufreq_device->cpufreq_state;
296  ret = 0;
297  break;
298  }
299  }
300  mutex_unlock(&cooling_cpufreq_lock);
301 
302  return ret;
303 }
304 
310 static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
311  unsigned long state)
312 {
313  int ret = -EINVAL;
314  struct cpufreq_cooling_device *cpufreq_device;
315 
316  mutex_lock(&cooling_cpufreq_lock);
317  list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
318  if (cpufreq_device && cpufreq_device->cool_dev == cdev) {
319  ret = 0;
320  break;
321  }
322  }
323  if (!ret)
324  ret = cpufreq_apply_cooling(cpufreq_device, state);
325 
326  mutex_unlock(&cooling_cpufreq_lock);
327 
328  return ret;
329 }
330 
331 /* Bind cpufreq callbacks to thermal cooling device ops */
332 static struct thermal_cooling_device_ops const cpufreq_cooling_ops = {
333  .get_max_state = cpufreq_get_max_state,
334  .get_cur_state = cpufreq_get_cur_state,
335  .set_cur_state = cpufreq_set_cur_state,
336 };
337 
338 /* Notifier for cpufreq policy change */
339 static struct notifier_block thermal_cpufreq_notifier_block = {
340  .notifier_call = cpufreq_thermal_notifier,
341 };
342 
348  struct cpumask *clip_cpus)
349 {
350  struct thermal_cooling_device *cool_dev;
351  struct cpufreq_cooling_device *cpufreq_dev = NULL;
352  unsigned int cpufreq_dev_count = 0, min = 0, max = 0;
353  char dev_name[THERMAL_NAME_LENGTH];
354  int ret = 0, i;
355  struct cpufreq_policy policy;
356 
357  list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node)
358  cpufreq_dev_count++;
359 
360  /*Verify that all the clip cpus have same freq_min, freq_max limit*/
361  for_each_cpu(i, clip_cpus) {
362  /*continue if cpufreq policy not found and not return error*/
363  if (!cpufreq_get_policy(&policy, i))
364  continue;
365  if (min == 0 && max == 0) {
366  min = policy.cpuinfo.min_freq;
367  max = policy.cpuinfo.max_freq;
368  } else {
369  if (min != policy.cpuinfo.min_freq ||
370  max != policy.cpuinfo.max_freq)
371  return ERR_PTR(-EINVAL);
372 }
373  }
374  cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
375  GFP_KERNEL);
376  if (!cpufreq_dev)
377  return ERR_PTR(-ENOMEM);
378 
379  cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
380 
381  if (cpufreq_dev_count == 0)
382  mutex_init(&cooling_cpufreq_lock);
383 
384  ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
385  if (ret) {
386  kfree(cpufreq_dev);
387  return ERR_PTR(-EINVAL);
388  }
389 
390  sprintf(dev_name, "thermal-cpufreq-%d", cpufreq_dev->id);
391 
392  cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev,
393  &cpufreq_cooling_ops);
394  if (!cool_dev) {
395  release_idr(&cpufreq_idr, cpufreq_dev->id);
396  kfree(cpufreq_dev);
397  return ERR_PTR(-EINVAL);
398  }
399  cpufreq_dev->cool_dev = cool_dev;
400  cpufreq_dev->cpufreq_state = 0;
401  mutex_lock(&cooling_cpufreq_lock);
402  list_add_tail(&cpufreq_dev->node, &cooling_cpufreq_list);
403 
404  /* Register the notifier for first cpufreq cooling device */
405  if (cpufreq_dev_count == 0)
406  cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
408 
409  mutex_unlock(&cooling_cpufreq_lock);
410  return cool_dev;
411 }
413 
419 {
420  struct cpufreq_cooling_device *cpufreq_dev = NULL;
421  unsigned int cpufreq_dev_count = 0;
422 
423  mutex_lock(&cooling_cpufreq_lock);
424  list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node) {
425  if (cpufreq_dev && cpufreq_dev->cool_dev == cdev)
426  break;
427  cpufreq_dev_count++;
428  }
429 
430  if (!cpufreq_dev || cpufreq_dev->cool_dev != cdev) {
431  mutex_unlock(&cooling_cpufreq_lock);
432  return;
433  }
434 
435  list_del(&cpufreq_dev->node);
436 
437  /* Unregister the notifier for the last cpufreq cooling device */
438  if (cpufreq_dev_count == 1) {
439  cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
441  }
442  mutex_unlock(&cooling_cpufreq_lock);
444  release_idr(&cpufreq_idr, cpufreq_dev->id);
445  if (cpufreq_dev_count == 1)
446  mutex_destroy(&cooling_cpufreq_lock);
447  kfree(cpufreq_dev);
448 }