Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
core.c
Go to the documentation of this file.
1 /*
2  * SuperH clock framework
3  *
4  * Copyright (C) 2005 - 2010 Paul Mundt
5  *
6  * This clock framework is derived from the OMAP version by:
7  *
8  * Copyright (C) 2004 - 2008 Nokia Corporation
9  * Written by Tuukka Tikkanen <[email protected]>
10  *
11  * Modified for omap shared clock framework by Tony Lindgren <[email protected]>
12  *
13  * This file is subject to the terms and conditions of the GNU General Public
14  * License. See the file "COPYING" in the main directory of this archive
15  * for more details.
16  */
17 #define pr_fmt(fmt) "clock: " fmt
18 
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/list.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/seq_file.h>
26 #include <linux/err.h>
27 #include <linux/io.h>
28 #include <linux/cpufreq.h>
29 #include <linux/clk.h>
30 #include <linux/sh_clk.h>
31 
32 static LIST_HEAD(clock_list);
33 static DEFINE_SPINLOCK(clock_lock);
34 static DEFINE_MUTEX(clock_list_sem);
35 
36 /* clock disable operations are not passed on to hardware during boot */
37 static int allow_disable;
38 
41  int nr_freqs,
42  struct clk_div_mult_table *src_table,
43  unsigned long *bitmap)
44 {
45  unsigned long mult, div;
46  unsigned long freq;
47  int i;
48 
49  clk->nr_freqs = nr_freqs;
50 
51  for (i = 0; i < nr_freqs; i++) {
52  div = 1;
53  mult = 1;
54 
55  if (src_table->divisors && i < src_table->nr_divisors)
56  div = src_table->divisors[i];
57 
58  if (src_table->multipliers && i < src_table->nr_multipliers)
59  mult = src_table->multipliers[i];
60 
61  if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
62  freq = CPUFREQ_ENTRY_INVALID;
63  else
64  freq = clk->parent->rate * mult / div;
65 
66  freq_table[i].index = i;
67  freq_table[i].frequency = freq;
68  }
69 
70  /* Termination entry */
71  freq_table[i].index = i;
72  freq_table[i].frequency = CPUFREQ_TABLE_END;
73 }
74 
75 struct clk_rate_round_data;
76 
78  unsigned long rate;
79  unsigned int min, max;
80  long (*func)(unsigned int, struct clk_rate_round_data *);
81  void *arg;
82 };
83 
84 #define for_each_frequency(pos, r, freq) \
85  for (pos = r->min, freq = r->func(pos, r); \
86  pos <= r->max; pos++, freq = r->func(pos, r)) \
87  if (unlikely(freq == 0)) \
88  ; \
89  else
90 
91 static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
92 {
93  unsigned long rate_error, rate_error_prev = ~0UL;
94  unsigned long highest, lowest, freq;
95  long rate_best_fit = -ENOENT;
96  int i;
97 
98  highest = 0;
99  lowest = ~0UL;
100 
101  for_each_frequency(i, rounder, freq) {
102  if (freq > highest)
103  highest = freq;
104  if (freq < lowest)
105  lowest = freq;
106 
107  rate_error = abs(freq - rounder->rate);
108  if (rate_error < rate_error_prev) {
109  rate_best_fit = freq;
110  rate_error_prev = rate_error;
111  }
112 
113  if (rate_error == 0)
114  break;
115  }
116 
117  if (rounder->rate >= highest)
118  rate_best_fit = highest;
119  if (rounder->rate <= lowest)
120  rate_best_fit = lowest;
121 
122  return rate_best_fit;
123 }
124 
125 static long clk_rate_table_iter(unsigned int pos,
126  struct clk_rate_round_data *rounder)
127 {
128  struct cpufreq_frequency_table *freq_table = rounder->arg;
129  unsigned long freq = freq_table[pos].frequency;
130 
131  if (freq == CPUFREQ_ENTRY_INVALID)
132  freq = 0;
133 
134  return freq;
135 }
136 
138  struct cpufreq_frequency_table *freq_table,
139  unsigned long rate)
140 {
141  struct clk_rate_round_data table_round = {
142  .min = 0,
143  .max = clk->nr_freqs - 1,
144  .func = clk_rate_table_iter,
145  .arg = freq_table,
146  .rate = rate,
147  };
148 
149  if (clk->nr_freqs < 1)
150  return -ENOSYS;
151 
152  return clk_rate_round_helper(&table_round);
153 }
154 
155 static long clk_rate_div_range_iter(unsigned int pos,
156  struct clk_rate_round_data *rounder)
157 {
158  return clk_get_rate(rounder->arg) / pos;
159 }
160 
161 long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
162  unsigned int div_max, unsigned long rate)
163 {
164  struct clk_rate_round_data div_range_round = {
165  .min = div_min,
166  .max = div_max,
167  .func = clk_rate_div_range_iter,
168  .arg = clk_get_parent(clk),
169  .rate = rate,
170  };
171 
172  return clk_rate_round_helper(&div_range_round);
173 }
174 
175 static long clk_rate_mult_range_iter(unsigned int pos,
176  struct clk_rate_round_data *rounder)
177 {
178  return clk_get_rate(rounder->arg) * pos;
179 }
180 
181 long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
182  unsigned int mult_max, unsigned long rate)
183 {
184  struct clk_rate_round_data mult_range_round = {
185  .min = mult_min,
186  .max = mult_max,
187  .func = clk_rate_mult_range_iter,
188  .arg = clk_get_parent(clk),
189  .rate = rate,
190  };
191 
192  return clk_rate_round_helper(&mult_range_round);
193 }
194 
196  struct cpufreq_frequency_table *freq_table,
197  unsigned long rate)
198 {
199  int i;
200 
201  for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
202  unsigned long freq = freq_table[i].frequency;
203 
204  if (freq == CPUFREQ_ENTRY_INVALID)
205  continue;
206 
207  if (freq == rate)
208  return i;
209  }
210 
211  return -ENOENT;
212 }
213 
214 /* Used for clocks that always have same value as the parent clock */
215 unsigned long followparent_recalc(struct clk *clk)
216 {
217  return clk->parent ? clk->parent->rate : 0;
218 }
219 
220 int clk_reparent(struct clk *child, struct clk *parent)
221 {
222  list_del_init(&child->sibling);
223  if (parent)
224  list_add(&child->sibling, &parent->children);
225  child->parent = parent;
226 
227  return 0;
228 }
229 
230 /* Propagate rate to children */
231 void propagate_rate(struct clk *tclk)
232 {
233  struct clk *clkp;
234 
235  list_for_each_entry(clkp, &tclk->children, sibling) {
236  if (clkp->ops && clkp->ops->recalc)
237  clkp->rate = clkp->ops->recalc(clkp);
238 
239  propagate_rate(clkp);
240  }
241 }
242 
243 static void __clk_disable(struct clk *clk)
244 {
245  if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n",
246  clk))
247  return;
248 
249  if (!(--clk->usecount)) {
250  if (likely(allow_disable && clk->ops && clk->ops->disable))
251  clk->ops->disable(clk);
252  if (likely(clk->parent))
253  __clk_disable(clk->parent);
254  }
255 }
256 
257 void clk_disable(struct clk *clk)
258 {
259  unsigned long flags;
260 
261  if (!clk)
262  return;
263 
264  spin_lock_irqsave(&clock_lock, flags);
265  __clk_disable(clk);
266  spin_unlock_irqrestore(&clock_lock, flags);
267 }
269 
270 static int __clk_enable(struct clk *clk)
271 {
272  int ret = 0;
273 
274  if (clk->usecount++ == 0) {
275  if (clk->parent) {
276  ret = __clk_enable(clk->parent);
277  if (unlikely(ret))
278  goto err;
279  }
280 
281  if (clk->ops && clk->ops->enable) {
282  ret = clk->ops->enable(clk);
283  if (ret) {
284  if (clk->parent)
285  __clk_disable(clk->parent);
286  goto err;
287  }
288  }
289  }
290 
291  return ret;
292 err:
293  clk->usecount--;
294  return ret;
295 }
296 
297 int clk_enable(struct clk *clk)
298 {
299  unsigned long flags;
300  int ret;
301 
302  if (!clk)
303  return -EINVAL;
304 
305  spin_lock_irqsave(&clock_lock, flags);
306  ret = __clk_enable(clk);
307  spin_unlock_irqrestore(&clock_lock, flags);
308 
309  return ret;
310 }
312 
313 static LIST_HEAD(root_clks);
314 
323 {
324  struct clk *clkp;
325 
326  list_for_each_entry(clkp, &root_clks, sibling) {
327  if (clkp->ops && clkp->ops->recalc)
328  clkp->rate = clkp->ops->recalc(clkp);
329  propagate_rate(clkp);
330  }
331 }
332 
333 static struct clk_mapping dummy_mapping;
334 
335 static struct clk *lookup_root_clock(struct clk *clk)
336 {
337  while (clk->parent)
338  clk = clk->parent;
339 
340  return clk;
341 }
342 
343 static int clk_establish_mapping(struct clk *clk)
344 {
345  struct clk_mapping *mapping = clk->mapping;
346 
347  /*
348  * Propagate mappings.
349  */
350  if (!mapping) {
351  struct clk *clkp;
352 
353  /*
354  * dummy mapping for root clocks with no specified ranges
355  */
356  if (!clk->parent) {
357  clk->mapping = &dummy_mapping;
358  goto out;
359  }
360 
361  /*
362  * If we're on a child clock and it provides no mapping of its
363  * own, inherit the mapping from its root clock.
364  */
365  clkp = lookup_root_clock(clk);
366  mapping = clkp->mapping;
367  BUG_ON(!mapping);
368  }
369 
370  /*
371  * Establish initial mapping.
372  */
373  if (!mapping->base && mapping->phys) {
374  kref_init(&mapping->ref);
375 
376  mapping->base = ioremap_nocache(mapping->phys, mapping->len);
377  if (unlikely(!mapping->base))
378  return -ENXIO;
379  } else if (mapping->base) {
380  /*
381  * Bump the refcount for an existing mapping
382  */
383  kref_get(&mapping->ref);
384  }
385 
386  clk->mapping = mapping;
387 out:
388  clk->mapped_reg = clk->mapping->base;
389  clk->mapped_reg += (phys_addr_t)clk->enable_reg - clk->mapping->phys;
390  return 0;
391 }
392 
393 static void clk_destroy_mapping(struct kref *kref)
394 {
395  struct clk_mapping *mapping;
396 
397  mapping = container_of(kref, struct clk_mapping, ref);
398 
399  iounmap(mapping->base);
400 }
401 
402 static void clk_teardown_mapping(struct clk *clk)
403 {
404  struct clk_mapping *mapping = clk->mapping;
405 
406  /* Nothing to do */
407  if (mapping == &dummy_mapping)
408  goto out;
409 
410  kref_put(&mapping->ref, clk_destroy_mapping);
411  clk->mapping = NULL;
412 out:
413  clk->mapped_reg = NULL;
414 }
415 
416 int clk_register(struct clk *clk)
417 {
418  int ret;
419 
420  if (IS_ERR_OR_NULL(clk))
421  return -EINVAL;
422 
423  /*
424  * trap out already registered clocks
425  */
426  if (clk->node.next || clk->node.prev)
427  return 0;
428 
429  mutex_lock(&clock_list_sem);
430 
431  INIT_LIST_HEAD(&clk->children);
432  clk->usecount = 0;
433 
434  ret = clk_establish_mapping(clk);
435  if (unlikely(ret))
436  goto out_unlock;
437 
438  if (clk->parent)
439  list_add(&clk->sibling, &clk->parent->children);
440  else
441  list_add(&clk->sibling, &root_clks);
442 
443  list_add(&clk->node, &clock_list);
444 
445 #ifdef CONFIG_SH_CLK_CPG_LEGACY
446  if (clk->ops && clk->ops->init)
447  clk->ops->init(clk);
448 #endif
449 
450 out_unlock:
451  mutex_unlock(&clock_list_sem);
452 
453  return ret;
454 }
456 
457 void clk_unregister(struct clk *clk)
458 {
459  mutex_lock(&clock_list_sem);
460  list_del(&clk->sibling);
461  list_del(&clk->node);
462  clk_teardown_mapping(clk);
463  mutex_unlock(&clock_list_sem);
464 }
466 
468 {
469  struct clk *clkp;
470 
471  list_for_each_entry(clkp, &clock_list, node)
472  if (clkp->flags & CLK_ENABLE_ON_INIT)
473  clk_enable(clkp);
474 }
475 
476 unsigned long clk_get_rate(struct clk *clk)
477 {
478  return clk->rate;
479 }
481 
482 int clk_set_rate(struct clk *clk, unsigned long rate)
483 {
484  int ret = -EOPNOTSUPP;
485  unsigned long flags;
486 
487  spin_lock_irqsave(&clock_lock, flags);
488 
489  if (likely(clk->ops && clk->ops->set_rate)) {
490  ret = clk->ops->set_rate(clk, rate);
491  if (ret != 0)
492  goto out_unlock;
493  } else {
494  clk->rate = rate;
495  ret = 0;
496  }
497 
498  if (clk->ops && clk->ops->recalc)
499  clk->rate = clk->ops->recalc(clk);
500 
501  propagate_rate(clk);
502 
503 out_unlock:
504  spin_unlock_irqrestore(&clock_lock, flags);
505 
506  return ret;
507 }
509 
510 int clk_set_parent(struct clk *clk, struct clk *parent)
511 {
512  unsigned long flags;
513  int ret = -EINVAL;
514 
515  if (!parent || !clk)
516  return ret;
517  if (clk->parent == parent)
518  return 0;
519 
520  spin_lock_irqsave(&clock_lock, flags);
521  if (clk->usecount == 0) {
522  if (clk->ops->set_parent)
523  ret = clk->ops->set_parent(clk, parent);
524  else
525  ret = clk_reparent(clk, parent);
526 
527  if (ret == 0) {
528  if (clk->ops->recalc)
529  clk->rate = clk->ops->recalc(clk);
530  pr_debug("set parent of %p to %p (new rate %ld)\n",
531  clk, clk->parent, clk->rate);
532  propagate_rate(clk);
533  }
534  } else
535  ret = -EBUSY;
536  spin_unlock_irqrestore(&clock_lock, flags);
537 
538  return ret;
539 }
541 
542 struct clk *clk_get_parent(struct clk *clk)
543 {
544  return clk->parent;
545 }
547 
548 long clk_round_rate(struct clk *clk, unsigned long rate)
549 {
550  if (likely(clk->ops && clk->ops->round_rate)) {
551  unsigned long flags, rounded;
552 
553  spin_lock_irqsave(&clock_lock, flags);
554  rounded = clk->ops->round_rate(clk, rate);
555  spin_unlock_irqrestore(&clock_lock, flags);
556 
557  return rounded;
558  }
559 
560  return clk_get_rate(clk);
561 }
563 
564 long clk_round_parent(struct clk *clk, unsigned long target,
565  unsigned long *best_freq, unsigned long *parent_freq,
566  unsigned int div_min, unsigned int div_max)
567 {
568  struct cpufreq_frequency_table *freq, *best = NULL;
569  unsigned long error = ULONG_MAX, freq_high, freq_low, div;
570  struct clk *parent = clk_get_parent(clk);
571 
572  if (!parent) {
573  *parent_freq = 0;
574  *best_freq = clk_round_rate(clk, target);
575  return abs(target - *best_freq);
576  }
577 
578  for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END;
579  freq++) {
580  if (freq->frequency == CPUFREQ_ENTRY_INVALID)
581  continue;
582 
583  if (unlikely(freq->frequency / target <= div_min - 1)) {
584  unsigned long freq_max;
585 
586  freq_max = (freq->frequency + div_min / 2) / div_min;
587  if (error > target - freq_max) {
588  error = target - freq_max;
589  best = freq;
590  if (best_freq)
591  *best_freq = freq_max;
592  }
593 
594  pr_debug("too low freq %u, error %lu\n", freq->frequency,
595  target - freq_max);
596 
597  if (!error)
598  break;
599 
600  continue;
601  }
602 
603  if (unlikely(freq->frequency / target >= div_max)) {
604  unsigned long freq_min;
605 
606  freq_min = (freq->frequency + div_max / 2) / div_max;
607  if (error > freq_min - target) {
608  error = freq_min - target;
609  best = freq;
610  if (best_freq)
611  *best_freq = freq_min;
612  }
613 
614  pr_debug("too high freq %u, error %lu\n", freq->frequency,
615  freq_min - target);
616 
617  if (!error)
618  break;
619 
620  continue;
621  }
622 
623  div = freq->frequency / target;
624  freq_high = freq->frequency / div;
625  freq_low = freq->frequency / (div + 1);
626 
627  if (freq_high - target < error) {
628  error = freq_high - target;
629  best = freq;
630  if (best_freq)
631  *best_freq = freq_high;
632  }
633 
634  if (target - freq_low < error) {
635  error = target - freq_low;
636  best = freq;
637  if (best_freq)
638  *best_freq = freq_low;
639  }
640 
641  pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n",
642  freq->frequency, div, freq_high, div + 1, freq_low,
643  *best_freq, best->frequency);
644 
645  if (!error)
646  break;
647  }
648 
649  if (parent_freq)
650  *parent_freq = best->frequency;
651 
652  return error;
653 }
655 
656 #ifdef CONFIG_PM
657 static void clks_core_resume(void)
658 {
659  struct clk *clkp;
660 
661  list_for_each_entry(clkp, &clock_list, node) {
662  if (likely(clkp->usecount && clkp->ops)) {
663  unsigned long rate = clkp->rate;
664 
665  if (likely(clkp->ops->set_parent))
666  clkp->ops->set_parent(clkp,
667  clkp->parent);
668  if (likely(clkp->ops->set_rate))
669  clkp->ops->set_rate(clkp, rate);
670  else if (likely(clkp->ops->recalc))
671  clkp->rate = clkp->ops->recalc(clkp);
672  }
673  }
674 }
675 
676 static struct syscore_ops clks_syscore_ops = {
677  .resume = clks_core_resume,
678 };
679 
680 static int __init clk_syscore_init(void)
681 {
682  register_syscore_ops(&clks_syscore_ops);
683 
684  return 0;
685 }
686 subsys_initcall(clk_syscore_init);
687 #endif
688 
689 static int __init clk_late_init(void)
690 {
691  unsigned long flags;
692  struct clk *clk;
693 
694  /* disable all clocks with zero use count */
695  mutex_lock(&clock_list_sem);
696  spin_lock_irqsave(&clock_lock, flags);
697 
698  list_for_each_entry(clk, &clock_list, node)
699  if (!clk->usecount && clk->ops && clk->ops->disable)
700  clk->ops->disable(clk);
701 
702  /* from now on allow clock disable operations */
703  allow_disable = 1;
704 
705  spin_unlock_irqrestore(&clock_lock, flags);
706  mutex_unlock(&clock_list_sem);
707  return 0;
708 }
709 late_initcall(clk_late_init);