Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cpg.c
Go to the documentation of this file.
1 /*
2  * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3  *
4  * Copyright (C) 2010 Magnus Damm
5  * Copyright (C) 2010 - 2012 Paul Mundt
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License. See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #include <linux/clk.h>
12 #include <linux/compiler.h>
13 #include <linux/slab.h>
14 #include <linux/io.h>
15 #include <linux/sh_clk.h>
16 
17 #define CPG_CKSTP_BIT BIT(8)
18 
19 static unsigned int sh_clk_read(struct clk *clk)
20 {
21  if (clk->flags & CLK_ENABLE_REG_8BIT)
22  return ioread8(clk->mapped_reg);
23  else if (clk->flags & CLK_ENABLE_REG_16BIT)
24  return ioread16(clk->mapped_reg);
25 
26  return ioread32(clk->mapped_reg);
27 }
28 
29 static void sh_clk_write(int value, struct clk *clk)
30 {
31  if (clk->flags & CLK_ENABLE_REG_8BIT)
32  iowrite8(value, clk->mapped_reg);
33  else if (clk->flags & CLK_ENABLE_REG_16BIT)
34  iowrite16(value, clk->mapped_reg);
35  else
36  iowrite32(value, clk->mapped_reg);
37 }
38 
39 static int sh_clk_mstp_enable(struct clk *clk)
40 {
41  sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
42  return 0;
43 }
44 
45 static void sh_clk_mstp_disable(struct clk *clk)
46 {
47  sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
48 }
49 
50 static struct sh_clk_ops sh_clk_mstp_clk_ops = {
51  .enable = sh_clk_mstp_enable,
52  .disable = sh_clk_mstp_disable,
53  .recalc = followparent_recalc,
54 };
55 
56 int __init sh_clk_mstp_register(struct clk *clks, int nr)
57 {
58  struct clk *clkp;
59  int ret = 0;
60  int k;
61 
62  for (k = 0; !ret && (k < nr); k++) {
63  clkp = clks + k;
64  clkp->ops = &sh_clk_mstp_clk_ops;
65  ret |= clk_register(clkp);
66  }
67 
68  return ret;
69 }
70 
71 /*
72  * Div/mult table lookup helpers
73  */
74 static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
75 {
76  return clk->priv;
77 }
78 
79 static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
80 {
81  return clk_to_div_table(clk)->div_mult_table;
82 }
83 
84 /*
85  * Common div ops
86  */
87 static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
88 {
89  return clk_rate_table_round(clk, clk->freq_table, rate);
90 }
91 
92 static unsigned long sh_clk_div_recalc(struct clk *clk)
93 {
94  struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
95  unsigned int idx;
96 
98  table, clk->arch_flags ? &clk->arch_flags : NULL);
99 
100  idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
101 
102  return clk->freq_table[idx].frequency;
103 }
104 
105 static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
106 {
107  struct clk_div_table *dt = clk_to_div_table(clk);
108  unsigned long value;
109  int idx;
110 
111  idx = clk_rate_table_find(clk, clk->freq_table, rate);
112  if (idx < 0)
113  return idx;
114 
115  value = sh_clk_read(clk);
116  value &= ~(clk->div_mask << clk->enable_bit);
117  value |= (idx << clk->enable_bit);
118  sh_clk_write(value, clk);
119 
120  /* XXX: Should use a post-change notifier */
121  if (dt->kick)
122  dt->kick(clk);
123 
124  return 0;
125 }
126 
127 static int sh_clk_div_enable(struct clk *clk)
128 {
129  sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
130  return 0;
131 }
132 
133 static void sh_clk_div_disable(struct clk *clk)
134 {
135  unsigned int val;
136 
137  val = sh_clk_read(clk);
138  val |= CPG_CKSTP_BIT;
139 
140  /*
141  * div6 clocks require the divisor field to be non-zero or the
142  * above CKSTP toggle silently fails. Ensure that the divisor
143  * array is reset to its initial state on disable.
144  */
145  if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
146  val |= clk->div_mask;
147 
148  sh_clk_write(val, clk);
149 }
150 
151 static struct sh_clk_ops sh_clk_div_clk_ops = {
152  .recalc = sh_clk_div_recalc,
153  .set_rate = sh_clk_div_set_rate,
154  .round_rate = sh_clk_div_round_rate,
155 };
156 
157 static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
158  .recalc = sh_clk_div_recalc,
159  .set_rate = sh_clk_div_set_rate,
160  .round_rate = sh_clk_div_round_rate,
161  .enable = sh_clk_div_enable,
162  .disable = sh_clk_div_disable,
163 };
164 
165 static int __init sh_clk_init_parent(struct clk *clk)
166 {
167  u32 val;
168 
169  if (clk->parent)
170  return 0;
171 
172  if (!clk->parent_table || !clk->parent_num)
173  return 0;
174 
175  if (!clk->src_width) {
176  pr_err("sh_clk_init_parent: cannot select parent clock\n");
177  return -EINVAL;
178  }
179 
180  val = (sh_clk_read(clk) >> clk->src_shift);
181  val &= (1 << clk->src_width) - 1;
182 
183  if (val >= clk->parent_num) {
184  pr_err("sh_clk_init_parent: parent table size failed\n");
185  return -EINVAL;
186  }
187 
188  clk_reparent(clk, clk->parent_table[val]);
189  if (!clk->parent) {
190  pr_err("sh_clk_init_parent: unable to set parent");
191  return -EINVAL;
192  }
193 
194  return 0;
195 }
196 
197 static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
198  struct clk_div_table *table, struct sh_clk_ops *ops)
199 {
200  struct clk *clkp;
201  void *freq_table;
202  int nr_divs = table->div_mult_table->nr_divisors;
203  int freq_table_size = sizeof(struct cpufreq_frequency_table);
204  int ret = 0;
205  int k;
206 
207  freq_table_size *= (nr_divs + 1);
208  freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
209  if (!freq_table) {
210  pr_err("%s: unable to alloc memory\n", __func__);
211  return -ENOMEM;
212  }
213 
214  for (k = 0; !ret && (k < nr); k++) {
215  clkp = clks + k;
216 
217  clkp->ops = ops;
218  clkp->priv = table;
219 
220  clkp->freq_table = freq_table + (k * freq_table_size);
221  clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
222 
223  ret = clk_register(clkp);
224  if (ret == 0)
225  ret = sh_clk_init_parent(clkp);
226  }
227 
228  return ret;
229 }
230 
231 /*
232  * div6 support
233  */
234 static int sh_clk_div6_divisors[64] = {
235  1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
236  17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
237  33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
238  49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
239 };
240 
241 static struct clk_div_mult_table div6_div_mult_table = {
242  .divisors = sh_clk_div6_divisors,
243  .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
244 };
245 
246 static struct clk_div_table sh_clk_div6_table = {
247  .div_mult_table = &div6_div_mult_table,
248 };
249 
250 static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
251 {
252  struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
253  u32 value;
254  int ret, i;
255 
256  if (!clk->parent_table || !clk->parent_num)
257  return -EINVAL;
258 
259  /* Search the parent */
260  for (i = 0; i < clk->parent_num; i++)
261  if (clk->parent_table[i] == parent)
262  break;
263 
264  if (i == clk->parent_num)
265  return -ENODEV;
266 
267  ret = clk_reparent(clk, parent);
268  if (ret < 0)
269  return ret;
270 
271  value = sh_clk_read(clk) &
272  ~(((1 << clk->src_width) - 1) << clk->src_shift);
273 
274  sh_clk_write(value | (i << clk->src_shift), clk);
275 
276  /* Rebuild the frequency table */
277  clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
278  table, NULL);
279 
280  return 0;
281 }
282 
283 static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
284  .recalc = sh_clk_div_recalc,
285  .round_rate = sh_clk_div_round_rate,
286  .set_rate = sh_clk_div_set_rate,
287  .enable = sh_clk_div_enable,
288  .disable = sh_clk_div_disable,
289  .set_parent = sh_clk_div6_set_parent,
290 };
291 
292 int __init sh_clk_div6_register(struct clk *clks, int nr)
293 {
294  return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
295  &sh_clk_div_enable_clk_ops);
296 }
297 
298 int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
299 {
300  return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
301  &sh_clk_div6_reparent_clk_ops);
302 }
303 
304 /*
305  * div4 support
306  */
307 static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
308 {
309  struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
310  u32 value;
311  int ret;
312 
313  /* we really need a better way to determine parent index, but for
314  * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
315  * no CLK_ENABLE_ON_INIT means external clock...
316  */
317 
318  if (parent->flags & CLK_ENABLE_ON_INIT)
319  value = sh_clk_read(clk) & ~(1 << 7);
320  else
321  value = sh_clk_read(clk) | (1 << 7);
322 
323  ret = clk_reparent(clk, parent);
324  if (ret < 0)
325  return ret;
326 
327  sh_clk_write(value, clk);
328 
329  /* Rebiuld the frequency table */
330  clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
331  table, &clk->arch_flags);
332 
333  return 0;
334 }
335 
336 static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
337  .recalc = sh_clk_div_recalc,
338  .set_rate = sh_clk_div_set_rate,
339  .round_rate = sh_clk_div_round_rate,
340  .enable = sh_clk_div_enable,
341  .disable = sh_clk_div_disable,
342  .set_parent = sh_clk_div4_set_parent,
343 };
344 
345 int __init sh_clk_div4_register(struct clk *clks, int nr,
346  struct clk_div4_table *table)
347 {
348  return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
349 }
350 
351 int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
352  struct clk_div4_table *table)
353 {
354  return sh_clk_div_register_ops(clks, nr, table,
355  &sh_clk_div_enable_clk_ops);
356 }
357 
358 int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
359  struct clk_div4_table *table)
360 {
361  return sh_clk_div_register_ops(clks, nr, table,
362  &sh_clk_div4_reparent_clk_ops);
363 }