Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
tcb_clksrc.c
Go to the documentation of this file.
1 #include <linux/init.h>
2 #include <linux/clocksource.h>
3 #include <linux/clockchips.h>
4 #include <linux/interrupt.h>
5 #include <linux/irq.h>
6 
7 #include <linux/clk.h>
8 #include <linux/err.h>
9 #include <linux/ioport.h>
10 #include <linux/io.h>
11 #include <linux/platform_device.h>
12 #include <linux/atmel_tc.h>
13 
14 
15 /*
16  * We're configured to use a specific TC block, one that's not hooked
17  * up to external hardware, to provide a time solution:
18  *
19  * - Two channels combine to create a free-running 32 bit counter
20  * with a base rate of 5+ MHz, packaged as a clocksource (with
21  * resolution better than 200 nsec).
22  * - Some chips support 32 bit counter. A single channel is used for
23  * this 32 bit free-running counter. the second channel is not used.
24  *
25  * - The third channel may be used to provide a 16-bit clockevent
26  * source, used in either periodic or oneshot mode. This runs
27  * at 32 KiHZ, and can handle delays of up to two seconds.
28  *
29  * A boot clocksource and clockevent source are also currently needed,
30  * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
31  * this code can be used when init_timers() is called, well before most
32  * devices are set up. (Some low end AT91 parts, which can run uClinux,
33  * have only the timers in one TC block... they currently don't support
34  * the tclib code, because of that initialization issue.)
35  *
36  * REVISIT behavior during system suspend states... we should disable
37  * all clocks and save the power. Easily done for clockevent devices,
38  * but clocksources won't necessarily get the needed notifications.
39  * For deeper system sleep states, this will be mandatory...
40  */
41 
42 static void __iomem *tcaddr;
43 
44 static cycle_t tc_get_cycles(struct clocksource *cs)
45 {
46  unsigned long flags;
47  u32 lower, upper;
48 
49  raw_local_irq_save(flags);
50  do {
51  upper = __raw_readl(tcaddr + ATMEL_TC_REG(1, CV));
52  lower = __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
53  } while (upper != __raw_readl(tcaddr + ATMEL_TC_REG(1, CV)));
54 
55  raw_local_irq_restore(flags);
56  return (upper << 16) | lower;
57 }
58 
59 static cycle_t tc_get_cycles32(struct clocksource *cs)
60 {
61  return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
62 }
63 
64 static struct clocksource clksrc = {
65  .name = "tcb_clksrc",
66  .rating = 200,
67  .read = tc_get_cycles,
68  .mask = CLOCKSOURCE_MASK(32),
70 };
71 
72 #ifdef CONFIG_GENERIC_CLOCKEVENTS
73 
74 struct tc_clkevt_device {
75  struct clock_event_device clkevt;
76  struct clk *clk;
77  void __iomem *regs;
78 };
79 
80 static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
81 {
82  return container_of(clkevt, struct tc_clkevt_device, clkevt);
83 }
84 
85 /* For now, we always use the 32K clock ... this optimizes for NO_HZ,
86  * because using one of the divided clocks would usually mean the
87  * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
88  *
89  * A divided clock could be good for high resolution timers, since
90  * 30.5 usec resolution can seem "low".
91  */
92 static u32 timer_clock;
93 
94 static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
95 {
96  struct tc_clkevt_device *tcd = to_tc_clkevt(d);
97  void __iomem *regs = tcd->regs;
98 
99  if (tcd->clkevt.mode == CLOCK_EVT_MODE_PERIODIC
100  || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
101  __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
103  clk_disable(tcd->clk);
104  }
105 
106  switch (m) {
107 
108  /* By not making the gentime core emulate periodic mode on top
109  * of oneshot, we get lower overhead and improved accuracy.
110  */
111  case CLOCK_EVT_MODE_PERIODIC:
112  clk_enable(tcd->clk);
113 
114  /* slow clock, count up to RC, then irq and restart */
115  __raw_writel(timer_clock
117  regs + ATMEL_TC_REG(2, CMR));
118  __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
119 
120  /* Enable clock and interrupts on RC compare */
122 
123  /* go go gadget! */
125  regs + ATMEL_TC_REG(2, CCR));
126  break;
127 
128  case CLOCK_EVT_MODE_ONESHOT:
129  clk_enable(tcd->clk);
130 
131  /* slow clock, count up to RC, then irq and stop */
132  __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
134  regs + ATMEL_TC_REG(2, CMR));
136 
137  /* set_next_event() configures and starts the timer */
138  break;
139 
140  default:
141  break;
142  }
143 }
144 
145 static int tc_next_event(unsigned long delta, struct clock_event_device *d)
146 {
147  __raw_writel(delta, tcaddr + ATMEL_TC_REG(2, RC));
148 
149  /* go go gadget! */
151  tcaddr + ATMEL_TC_REG(2, CCR));
152  return 0;
153 }
154 
155 static struct tc_clkevt_device clkevt = {
156  .clkevt = {
157  .name = "tc_clkevt",
158  .features = CLOCK_EVT_FEAT_PERIODIC
159  | CLOCK_EVT_FEAT_ONESHOT,
160  .shift = 32,
161  /* Should be lower than at91rm9200's system timer */
162  .rating = 125,
163  .set_next_event = tc_next_event,
164  .set_mode = tc_mode,
165  },
166 };
167 
168 static irqreturn_t ch2_irq(int irq, void *handle)
169 {
170  struct tc_clkevt_device *dev = handle;
171  unsigned int sr;
172 
173  sr = __raw_readl(dev->regs + ATMEL_TC_REG(2, SR));
174  if (sr & ATMEL_TC_CPCS) {
175  dev->clkevt.event_handler(&dev->clkevt);
176  return IRQ_HANDLED;
177  }
178 
179  return IRQ_NONE;
180 }
181 
182 static struct irqaction tc_irqaction = {
183  .name = "tc_clkevt",
184  .flags = IRQF_TIMER | IRQF_DISABLED,
185  .handler = ch2_irq,
186 };
187 
188 static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
189 {
190  struct clk *t2_clk = tc->clk[2];
191  int irq = tc->irq[2];
192 
193  clkevt.regs = tc->regs;
194  clkevt.clk = t2_clk;
195  tc_irqaction.dev_id = &clkevt;
196 
197  timer_clock = clk32k_divisor_idx;
198 
199  clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift);
200  clkevt.clkevt.max_delta_ns
201  = clockevent_delta2ns(0xffff, &clkevt.clkevt);
202  clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
203  clkevt.clkevt.cpumask = cpumask_of(0);
204 
205  clockevents_register_device(&clkevt.clkevt);
206 
207  setup_irq(irq, &tc_irqaction);
208 }
209 
210 #else /* !CONFIG_GENERIC_CLOCKEVENTS */
211 
212 static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
213 {
214  /* NOTHING */
215 }
216 
217 #endif
218 
219 static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
220 {
221  /* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */
222  __raw_writel(mck_divisor_idx /* likely divide-by-8 */
223  | ATMEL_TC_WAVE
224  | ATMEL_TC_WAVESEL_UP /* free-run */
225  | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
226  | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
227  tcaddr + ATMEL_TC_REG(0, CMR));
228  __raw_writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
229  __raw_writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
230  __raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
232 
233  /* channel 1: waveform mode, input TIOA0 */
234  __raw_writel(ATMEL_TC_XC1 /* input: TIOA0 */
235  | ATMEL_TC_WAVE
236  | ATMEL_TC_WAVESEL_UP, /* free-run */
237  tcaddr + ATMEL_TC_REG(1, CMR));
238  __raw_writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */
240 
241  /* chain channel 0 to channel 1*/
243  /* then reset all the timers */
245 }
246 
247 static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
248 {
249  /* channel 0: waveform mode, input mclk/8 */
250  __raw_writel(mck_divisor_idx /* likely divide-by-8 */
251  | ATMEL_TC_WAVE
252  | ATMEL_TC_WAVESEL_UP, /* free-run */
253  tcaddr + ATMEL_TC_REG(0, CMR));
254  __raw_writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
256 
257  /* then reset all the timers */
259 }
260 
261 static int __init tcb_clksrc_init(void)
262 {
263  static char bootinfo[] __initdata
264  = KERN_DEBUG "%s: tc%d at %d.%03d MHz\n";
265 
266  struct platform_device *pdev;
267  struct atmel_tc *tc;
268  struct clk *t0_clk;
269  u32 rate, divided_rate = 0;
270  int best_divisor_idx = -1;
271  int clk32k_divisor_idx = -1;
272  int i;
273 
274  tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK, clksrc.name);
275  if (!tc) {
276  pr_debug("can't alloc TC for clocksource\n");
277  return -ENODEV;
278  }
279  tcaddr = tc->regs;
280  pdev = tc->pdev;
281 
282  t0_clk = tc->clk[0];
283  clk_enable(t0_clk);
284 
285  /* How fast will we be counting? Pick something over 5 MHz. */
286  rate = (u32) clk_get_rate(t0_clk);
287  for (i = 0; i < 5; i++) {
288  unsigned divisor = atmel_tc_divisors[i];
289  unsigned tmp;
290 
291  /* remember 32 KiHz clock for later */
292  if (!divisor) {
293  clk32k_divisor_idx = i;
294  continue;
295  }
296 
297  tmp = rate / divisor;
298  pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
299  if (best_divisor_idx > 0) {
300  if (tmp < 5 * 1000 * 1000)
301  continue;
302  }
303  divided_rate = tmp;
304  best_divisor_idx = i;
305  }
306 
307 
308  printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
309  divided_rate / 1000000,
310  ((divided_rate + 500000) % 1000000) / 1000);
311 
312  if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
313  /* use apropriate function to read 32 bit counter */
314  clksrc.read = tc_get_cycles32;
315  /* setup ony channel 0 */
316  tcb_setup_single_chan(tc, best_divisor_idx);
317  } else {
318  /* tclib will give us three clocks no matter what the
319  * underlying platform supports.
320  */
321  clk_enable(tc->clk[1]);
322  /* setup both channel 0 & 1 */
323  tcb_setup_dual_chan(tc, best_divisor_idx);
324  }
325 
326  /* and away we go! */
327  clocksource_register_hz(&clksrc, divided_rate);
328 
329  /* channel 2: periodic and oneshot timer support */
330  setup_clkevents(tc, clk32k_divisor_idx);
331 
332  return 0;
333 }
334 arch_initcall(tcb_clksrc_init);