Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
clock.c
Go to the documentation of this file.
1 /*
2  * Clock and PLL control for DaVinci devices
3  *
4  * Copyright (C) 2006-2007 Texas Instruments.
5  * Copyright (C) 2008-2009 Deep Root Systems, LLC
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/clk.h>
18 #include <linux/err.h>
19 #include <linux/mutex.h>
20 #include <linux/io.h>
21 #include <linux/delay.h>
22 
23 #include <mach/hardware.h>
24 
25 #include <mach/clock.h>
26 #include <mach/psc.h>
27 #include <mach/cputype.h>
28 #include "clock.h"
29 
30 static LIST_HEAD(clocks);
31 static DEFINE_MUTEX(clocks_mutex);
32 static DEFINE_SPINLOCK(clockfw_lock);
33 
34 static void __clk_enable(struct clk *clk)
35 {
36  if (clk->parent)
37  __clk_enable(clk->parent);
38  if (clk->usecount++ == 0 && (clk->flags & CLK_PSC))
39  davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
40  true, clk->flags);
41 }
42 
43 static void __clk_disable(struct clk *clk)
44 {
45  if (WARN_ON(clk->usecount == 0))
46  return;
47  if (--clk->usecount == 0 && !(clk->flags & CLK_PLL) &&
48  (clk->flags & CLK_PSC))
49  davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
50  false, clk->flags);
51  if (clk->parent)
52  __clk_disable(clk->parent);
53 }
54 
55 int clk_enable(struct clk *clk)
56 {
57  unsigned long flags;
58 
59  if (clk == NULL || IS_ERR(clk))
60  return -EINVAL;
61 
62  spin_lock_irqsave(&clockfw_lock, flags);
63  __clk_enable(clk);
64  spin_unlock_irqrestore(&clockfw_lock, flags);
65 
66  return 0;
67 }
69 
70 void clk_disable(struct clk *clk)
71 {
72  unsigned long flags;
73 
74  if (clk == NULL || IS_ERR(clk))
75  return;
76 
77  spin_lock_irqsave(&clockfw_lock, flags);
78  __clk_disable(clk);
79  spin_unlock_irqrestore(&clockfw_lock, flags);
80 }
82 
83 unsigned long clk_get_rate(struct clk *clk)
84 {
85  if (clk == NULL || IS_ERR(clk))
86  return -EINVAL;
87 
88  return clk->rate;
89 }
91 
92 long clk_round_rate(struct clk *clk, unsigned long rate)
93 {
94  if (clk == NULL || IS_ERR(clk))
95  return -EINVAL;
96 
97  if (clk->round_rate)
98  return clk->round_rate(clk, rate);
99 
100  return clk->rate;
101 }
103 
104 /* Propagate rate to children */
105 static void propagate_rate(struct clk *root)
106 {
107  struct clk *clk;
108 
109  list_for_each_entry(clk, &root->children, childnode) {
110  if (clk->recalc)
111  clk->rate = clk->recalc(clk);
112  propagate_rate(clk);
113  }
114 }
115 
116 int clk_set_rate(struct clk *clk, unsigned long rate)
117 {
118  unsigned long flags;
119  int ret = -EINVAL;
120 
121  if (clk == NULL || IS_ERR(clk))
122  return ret;
123 
124  if (clk->set_rate)
125  ret = clk->set_rate(clk, rate);
126 
127  spin_lock_irqsave(&clockfw_lock, flags);
128  if (ret == 0) {
129  if (clk->recalc)
130  clk->rate = clk->recalc(clk);
131  propagate_rate(clk);
132  }
133  spin_unlock_irqrestore(&clockfw_lock, flags);
134 
135  return ret;
136 }
138 
139 int clk_set_parent(struct clk *clk, struct clk *parent)
140 {
141  unsigned long flags;
142 
143  if (clk == NULL || IS_ERR(clk))
144  return -EINVAL;
145 
146  /* Cannot change parent on enabled clock */
147  if (WARN_ON(clk->usecount))
148  return -EINVAL;
149 
150  mutex_lock(&clocks_mutex);
151  clk->parent = parent;
152  list_del_init(&clk->childnode);
153  list_add(&clk->childnode, &clk->parent->children);
154  mutex_unlock(&clocks_mutex);
155 
156  spin_lock_irqsave(&clockfw_lock, flags);
157  if (clk->recalc)
158  clk->rate = clk->recalc(clk);
159  propagate_rate(clk);
160  spin_unlock_irqrestore(&clockfw_lock, flags);
161 
162  return 0;
163 }
165 
166 int clk_register(struct clk *clk)
167 {
168  if (clk == NULL || IS_ERR(clk))
169  return -EINVAL;
170 
171  if (WARN(clk->parent && !clk->parent->rate,
172  "CLK: %s parent %s has no rate!\n",
173  clk->name, clk->parent->name))
174  return -EINVAL;
175 
176  INIT_LIST_HEAD(&clk->children);
177 
178  mutex_lock(&clocks_mutex);
179  list_add_tail(&clk->node, &clocks);
180  if (clk->parent)
181  list_add_tail(&clk->childnode, &clk->parent->children);
182  mutex_unlock(&clocks_mutex);
183 
184  /* If rate is already set, use it */
185  if (clk->rate)
186  return 0;
187 
188  /* Else, see if there is a way to calculate it */
189  if (clk->recalc)
190  clk->rate = clk->recalc(clk);
191 
192  /* Otherwise, default to parent rate */
193  else if (clk->parent)
194  clk->rate = clk->parent->rate;
195 
196  return 0;
197 }
199 
200 void clk_unregister(struct clk *clk)
201 {
202  if (clk == NULL || IS_ERR(clk))
203  return;
204 
205  mutex_lock(&clocks_mutex);
206  list_del(&clk->node);
207  list_del(&clk->childnode);
208  mutex_unlock(&clocks_mutex);
209 }
211 
212 #ifdef CONFIG_DAVINCI_RESET_CLOCKS
213 /*
214  * Disable any unused clocks left on by the bootloader
215  */
216 int __init davinci_clk_disable_unused(void)
217 {
218  struct clk *ck;
219 
220  spin_lock_irq(&clockfw_lock);
221  list_for_each_entry(ck, &clocks, node) {
222  if (ck->usecount > 0)
223  continue;
224  if (!(ck->flags & CLK_PSC))
225  continue;
226 
227  /* ignore if in Disabled or SwRstDisable states */
228  if (!davinci_psc_is_clk_active(ck->gpsc, ck->lpsc))
229  continue;
230 
231  pr_debug("Clocks: disable unused %s\n", ck->name);
232 
233  davinci_psc_config(ck->domain, ck->gpsc, ck->lpsc,
234  false, ck->flags);
235  }
236  spin_unlock_irq(&clockfw_lock);
237 
238  return 0;
239 }
240 #endif
241 
242 static unsigned long clk_sysclk_recalc(struct clk *clk)
243 {
244  u32 v, plldiv;
245  struct pll_data *pll;
246  unsigned long rate = clk->rate;
247 
248  /* If this is the PLL base clock, no more calculations needed */
249  if (clk->pll_data)
250  return rate;
251 
252  if (WARN_ON(!clk->parent))
253  return rate;
254 
255  rate = clk->parent->rate;
256 
257  /* Otherwise, the parent must be a PLL */
258  if (WARN_ON(!clk->parent->pll_data))
259  return rate;
260 
261  pll = clk->parent->pll_data;
262 
263  /* If pre-PLL, source clock is before the multiplier and divider(s) */
264  if (clk->flags & PRE_PLL)
265  rate = pll->input_rate;
266 
267  if (!clk->div_reg)
268  return rate;
269 
270  v = __raw_readl(pll->base + clk->div_reg);
271  if (v & PLLDIV_EN) {
272  plldiv = (v & pll->div_ratio_mask) + 1;
273  if (plldiv)
274  rate /= plldiv;
275  }
276 
277  return rate;
278 }
279 
280 int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate)
281 {
282  unsigned v;
283  struct pll_data *pll;
284  unsigned long input;
285  unsigned ratio = 0;
286 
287  /* If this is the PLL base clock, wrong function to call */
288  if (clk->pll_data)
289  return -EINVAL;
290 
291  /* There must be a parent... */
292  if (WARN_ON(!clk->parent))
293  return -EINVAL;
294 
295  /* ... the parent must be a PLL... */
296  if (WARN_ON(!clk->parent->pll_data))
297  return -EINVAL;
298 
299  /* ... and this clock must have a divider. */
300  if (WARN_ON(!clk->div_reg))
301  return -EINVAL;
302 
303  pll = clk->parent->pll_data;
304 
305  input = clk->parent->rate;
306 
307  /* If pre-PLL, source clock is before the multiplier and divider(s) */
308  if (clk->flags & PRE_PLL)
309  input = pll->input_rate;
310 
311  if (input > rate) {
312  /*
313  * Can afford to provide an output little higher than requested
314  * only if maximum rate supported by hardware on this sysclk
315  * is known.
316  */
317  if (clk->maxrate) {
318  ratio = DIV_ROUND_CLOSEST(input, rate);
319  if (input / ratio > clk->maxrate)
320  ratio = 0;
321  }
322 
323  if (ratio == 0)
324  ratio = DIV_ROUND_UP(input, rate);
325 
326  ratio--;
327  }
328 
329  if (ratio > pll->div_ratio_mask)
330  return -EINVAL;
331 
332  do {
333  v = __raw_readl(pll->base + PLLSTAT);
334  } while (v & PLLSTAT_GOSTAT);
335 
336  v = __raw_readl(pll->base + clk->div_reg);
337  v &= ~pll->div_ratio_mask;
338  v |= ratio | PLLDIV_EN;
339  __raw_writel(v, pll->base + clk->div_reg);
340 
341  v = __raw_readl(pll->base + PLLCMD);
342  v |= PLLCMD_GOSET;
343  __raw_writel(v, pll->base + PLLCMD);
344 
345  do {
346  v = __raw_readl(pll->base + PLLSTAT);
347  } while (v & PLLSTAT_GOSTAT);
348 
349  return 0;
350 }
352 
353 static unsigned long clk_leafclk_recalc(struct clk *clk)
354 {
355  if (WARN_ON(!clk->parent))
356  return clk->rate;
357 
358  return clk->parent->rate;
359 }
360 
361 int davinci_simple_set_rate(struct clk *clk, unsigned long rate)
362 {
363  clk->rate = rate;
364  return 0;
365 }
366 
367 static unsigned long clk_pllclk_recalc(struct clk *clk)
368 {
369  u32 ctrl, mult = 1, prediv = 1, postdiv = 1;
370  u8 bypass;
371  struct pll_data *pll = clk->pll_data;
372  unsigned long rate = clk->rate;
373 
374  ctrl = __raw_readl(pll->base + PLLCTL);
375  rate = pll->input_rate = clk->parent->rate;
376 
377  if (ctrl & PLLCTL_PLLEN) {
378  bypass = 0;
379  mult = __raw_readl(pll->base + PLLM);
380  if (cpu_is_davinci_dm365())
381  mult = 2 * (mult & PLLM_PLLM_MASK);
382  else
383  mult = (mult & PLLM_PLLM_MASK) + 1;
384  } else
385  bypass = 1;
386 
387  if (pll->flags & PLL_HAS_PREDIV) {
388  prediv = __raw_readl(pll->base + PREDIV);
389  if (prediv & PLLDIV_EN)
390  prediv = (prediv & pll->div_ratio_mask) + 1;
391  else
392  prediv = 1;
393  }
394 
395  /* pre-divider is fixed, but (some?) chips won't report that */
396  if (cpu_is_davinci_dm355() && pll->num == 1)
397  prediv = 8;
398 
399  if (pll->flags & PLL_HAS_POSTDIV) {
400  postdiv = __raw_readl(pll->base + POSTDIV);
401  if (postdiv & PLLDIV_EN)
402  postdiv = (postdiv & pll->div_ratio_mask) + 1;
403  else
404  postdiv = 1;
405  }
406 
407  if (!bypass) {
408  rate /= prediv;
409  rate *= mult;
410  rate /= postdiv;
411  }
412 
413  pr_debug("PLL%d: input = %lu MHz [ ",
414  pll->num, clk->parent->rate / 1000000);
415  if (bypass)
416  pr_debug("bypass ");
417  if (prediv > 1)
418  pr_debug("/ %d ", prediv);
419  if (mult > 1)
420  pr_debug("* %d ", mult);
421  if (postdiv > 1)
422  pr_debug("/ %d ", postdiv);
423  pr_debug("] --> %lu MHz output.\n", rate / 1000000);
424 
425  return rate;
426 }
427 
438 int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
439  unsigned int mult, unsigned int postdiv)
440 {
441  u32 ctrl;
442  unsigned int locktime;
443  unsigned long flags;
444 
445  if (pll->base == NULL)
446  return -EINVAL;
447 
448  /*
449  * PLL lock time required per OMAP-L138 datasheet is
450  * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm)
451  * as 4 and OSCIN cycle as 25 MHz.
452  */
453  if (prediv) {
454  locktime = ((2000 * prediv) / 100);
455  prediv = (prediv - 1) | PLLDIV_EN;
456  } else {
457  locktime = PLL_LOCK_TIME;
458  }
459  if (postdiv)
460  postdiv = (postdiv - 1) | PLLDIV_EN;
461  if (mult)
462  mult = mult - 1;
463 
464  /* Protect against simultaneous calls to PLL setting seqeunce */
465  spin_lock_irqsave(&clockfw_lock, flags);
466 
467  ctrl = __raw_readl(pll->base + PLLCTL);
468 
469  /* Switch the PLL to bypass mode */
470  ctrl &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN);
471  __raw_writel(ctrl, pll->base + PLLCTL);
472 
474 
475  /* Reset and enable PLL */
476  ctrl &= ~(PLLCTL_PLLRST | PLLCTL_PLLDIS);
477  __raw_writel(ctrl, pll->base + PLLCTL);
478 
479  if (pll->flags & PLL_HAS_PREDIV)
480  __raw_writel(prediv, pll->base + PREDIV);
481 
482  __raw_writel(mult, pll->base + PLLM);
483 
484  if (pll->flags & PLL_HAS_POSTDIV)
485  __raw_writel(postdiv, pll->base + POSTDIV);
486 
488 
489  /* Bring PLL out of reset */
490  ctrl |= PLLCTL_PLLRST;
491  __raw_writel(ctrl, pll->base + PLLCTL);
492 
493  udelay(locktime);
494 
495  /* Remove PLL from bypass mode */
496  ctrl |= PLLCTL_PLLEN;
497  __raw_writel(ctrl, pll->base + PLLCTL);
498 
499  spin_unlock_irqrestore(&clockfw_lock, flags);
500 
501  return 0;
502 }
504 
520 int davinci_set_refclk_rate(unsigned long rate)
521 {
522  struct clk *refclk;
523 
524  refclk = clk_get(NULL, "ref");
525  if (IS_ERR(refclk)) {
526  pr_err("%s: failed to get reference clock.\n", __func__);
527  return PTR_ERR(refclk);
528  }
529 
530  clk_set_rate(refclk, rate);
531 
532  clk_put(refclk);
533 
534  return 0;
535 }
536 
537 int __init davinci_clk_init(struct clk_lookup *clocks)
538  {
539  struct clk_lookup *c;
540  struct clk *clk;
541  size_t num_clocks = 0;
542 
543  for (c = clocks; c->clk; c++) {
544  clk = c->clk;
545 
546  if (!clk->recalc) {
547 
548  /* Check if clock is a PLL */
549  if (clk->pll_data)
550  clk->recalc = clk_pllclk_recalc;
551 
552  /* Else, if it is a PLL-derived clock */
553  else if (clk->flags & CLK_PLL)
554  clk->recalc = clk_sysclk_recalc;
555 
556  /* Otherwise, it is a leaf clock (PSC clock) */
557  else if (clk->parent)
558  clk->recalc = clk_leafclk_recalc;
559  }
560 
561  if (clk->pll_data) {
562  struct pll_data *pll = clk->pll_data;
563 
564  if (!pll->div_ratio_mask)
566 
567  if (pll->phys_base && !pll->base) {
568  pll->base = ioremap(pll->phys_base, SZ_4K);
569  WARN_ON(!pll->base);
570  }
571  }
572 
573  if (clk->recalc)
574  clk->rate = clk->recalc(clk);
575 
576  if (clk->lpsc)
577  clk->flags |= CLK_PSC;
578 
579  clk_register(clk);
580  num_clocks++;
581 
582  /* Turn on clocks that Linux doesn't otherwise manage */
583  if (clk->flags & ALWAYS_ENABLED)
584  clk_enable(clk);
585  }
586 
587  clkdev_add_table(clocks, num_clocks);
588 
589  return 0;
590 }
591 
592 #ifdef CONFIG_DEBUG_FS
593 
594 #include <linux/debugfs.h>
595 #include <linux/seq_file.h>
596 
597 #define CLKNAME_MAX 10 /* longest clock name */
598 #define NEST_DELTA 2
599 #define NEST_MAX 4
600 
601 static void
602 dump_clock(struct seq_file *s, unsigned nest, struct clk *parent)
603 {
604  char *state;
605  char buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX];
606  struct clk *clk;
607  unsigned i;
608 
609  if (parent->flags & CLK_PLL)
610  state = "pll";
611  else if (parent->flags & CLK_PSC)
612  state = "psc";
613  else
614  state = "";
615 
616  /* <nest spaces> name <pad to end> */
617  memset(buf, ' ', sizeof(buf) - 1);
618  buf[sizeof(buf) - 1] = 0;
619  i = strlen(parent->name);
620  memcpy(buf + nest, parent->name,
621  min(i, (unsigned)(sizeof(buf) - 1 - nest)));
622 
623  seq_printf(s, "%s users=%2d %-3s %9ld Hz\n",
624  buf, parent->usecount, state, clk_get_rate(parent));
625  /* REVISIT show device associations too */
626 
627  /* cost is now small, but not linear... */
628  list_for_each_entry(clk, &parent->children, childnode) {
629  dump_clock(s, nest + NEST_DELTA, clk);
630  }
631 }
632 
633 static int davinci_ck_show(struct seq_file *m, void *v)
634 {
635  struct clk *clk;
636 
637  /*
638  * Show clock tree; We trust nonzero usecounts equate to PSC enables...
639  */
640  mutex_lock(&clocks_mutex);
641  list_for_each_entry(clk, &clocks, node)
642  if (!clk->parent)
643  dump_clock(m, 0, clk);
644  mutex_unlock(&clocks_mutex);
645 
646  return 0;
647 }
648 
649 static int davinci_ck_open(struct inode *inode, struct file *file)
650 {
651  return single_open(file, davinci_ck_show, NULL);
652 }
653 
654 static const struct file_operations davinci_ck_operations = {
655  .open = davinci_ck_open,
656  .read = seq_read,
657  .llseek = seq_lseek,
658  .release = single_release,
659 };
660 
661 static int __init davinci_clk_debugfs_init(void)
662 {
663  debugfs_create_file("davinci_clocks", S_IFREG | S_IRUGO, NULL, NULL,
664  &davinci_ck_operations);
665  return 0;
666 
667 }
668 device_initcall(davinci_clk_debugfs_init);
669 #endif /* CONFIG_DEBUG_FS */