Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
timer.c
Go to the documentation of this file.
1 /*
2  *
3  * Copyright (C) 2007 Google, Inc.
4  * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  */
16 
17 #include <linux/clocksource.h>
18 #include <linux/clockchips.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/irq.h>
22 #include <linux/io.h>
23 #include <linux/of.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
26 
27 #include <asm/mach/time.h>
28 #include <asm/hardware/gic.h>
29 #include <asm/localtimer.h>
30 #include <asm/sched_clock.h>
31 
32 #include "common.h"
33 
34 #define TIMER_MATCH_VAL 0x0000
35 #define TIMER_COUNT_VAL 0x0004
36 #define TIMER_ENABLE 0x0008
37 #define TIMER_ENABLE_CLR_ON_MATCH_EN BIT(1)
38 #define TIMER_ENABLE_EN BIT(0)
39 #define TIMER_CLEAR 0x000C
40 #define DGT_CLK_CTL_DIV_4 0x3
41 
42 #define GPT_HZ 32768
43 
44 #define MSM_DGT_SHIFT 5
45 
46 static void __iomem *event_base;
47 
48 static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
49 {
50  struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
51  /* Stop the timer tick */
52  if (evt->mode == CLOCK_EVT_MODE_ONESHOT) {
53  u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
54  ctrl &= ~TIMER_ENABLE_EN;
55  writel_relaxed(ctrl, event_base + TIMER_ENABLE);
56  }
57  evt->event_handler(evt);
58  return IRQ_HANDLED;
59 }
60 
61 static int msm_timer_set_next_event(unsigned long cycles,
62  struct clock_event_device *evt)
63 {
64  u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
65 
66  writel_relaxed(0, event_base + TIMER_CLEAR);
67  writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
68  writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
69  return 0;
70 }
71 
72 static void msm_timer_set_mode(enum clock_event_mode mode,
73  struct clock_event_device *evt)
74 {
75  u32 ctrl;
76 
77  ctrl = readl_relaxed(event_base + TIMER_ENABLE);
79 
80  switch (mode) {
81  case CLOCK_EVT_MODE_RESUME:
82  case CLOCK_EVT_MODE_PERIODIC:
83  break;
84  case CLOCK_EVT_MODE_ONESHOT:
85  /* Timer is enabled in set_next_event */
86  break;
87  case CLOCK_EVT_MODE_UNUSED:
88  case CLOCK_EVT_MODE_SHUTDOWN:
89  break;
90  }
91  writel_relaxed(ctrl, event_base + TIMER_ENABLE);
92 }
93 
94 static struct clock_event_device msm_clockevent = {
95  .name = "gp_timer",
96  .features = CLOCK_EVT_FEAT_ONESHOT,
97  .rating = 200,
98  .set_next_event = msm_timer_set_next_event,
99  .set_mode = msm_timer_set_mode,
100 };
101 
102 static union {
103  struct clock_event_device *evt;
104  struct clock_event_device * __percpu *percpu_evt;
105 } msm_evt;
106 
107 static void __iomem *source_base;
108 
109 static notrace cycle_t msm_read_timer_count(struct clocksource *cs)
110 {
111  return readl_relaxed(source_base + TIMER_COUNT_VAL);
112 }
113 
114 static notrace cycle_t msm_read_timer_count_shift(struct clocksource *cs)
115 {
116  /*
117  * Shift timer count down by a constant due to unreliable lower bits
118  * on some targets.
119  */
120  return msm_read_timer_count(cs) >> MSM_DGT_SHIFT;
121 }
122 
123 static struct clocksource msm_clocksource = {
124  .name = "dg_timer",
125  .rating = 300,
126  .read = msm_read_timer_count,
127  .mask = CLOCKSOURCE_MASK(32),
129 };
130 
131 #ifdef CONFIG_LOCAL_TIMERS
132 static int __cpuinit msm_local_timer_setup(struct clock_event_device *evt)
133 {
134  /* Use existing clock_event for cpu 0 */
135  if (!smp_processor_id())
136  return 0;
137 
138  writel_relaxed(0, event_base + TIMER_ENABLE);
139  writel_relaxed(0, event_base + TIMER_CLEAR);
140  writel_relaxed(~0, event_base + TIMER_MATCH_VAL);
141  evt->irq = msm_clockevent.irq;
142  evt->name = "local_timer";
143  evt->features = msm_clockevent.features;
144  evt->rating = msm_clockevent.rating;
145  evt->set_mode = msm_timer_set_mode;
146  evt->set_next_event = msm_timer_set_next_event;
147  evt->shift = msm_clockevent.shift;
148  evt->mult = div_sc(GPT_HZ, NSEC_PER_SEC, evt->shift);
149  evt->max_delta_ns = clockevent_delta2ns(0xf0000000, evt);
150  evt->min_delta_ns = clockevent_delta2ns(4, evt);
151 
152  *__this_cpu_ptr(msm_evt.percpu_evt) = evt;
155  return 0;
156 }
157 
158 static void msm_local_timer_stop(struct clock_event_device *evt)
159 {
160  evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
161  disable_percpu_irq(evt->irq);
162 }
163 
164 static struct local_timer_ops msm_local_timer_ops __cpuinitdata = {
165  .setup = msm_local_timer_setup,
166  .stop = msm_local_timer_stop,
167 };
168 #endif /* CONFIG_LOCAL_TIMERS */
169 
170 static notrace u32 msm_sched_clock_read(void)
171 {
172  return msm_clocksource.read(&msm_clocksource);
173 }
174 
175 static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
176  bool percpu)
177 {
178  struct clock_event_device *ce = &msm_clockevent;
179  struct clocksource *cs = &msm_clocksource;
180  int res;
181 
182  writel_relaxed(0, event_base + TIMER_ENABLE);
183  writel_relaxed(0, event_base + TIMER_CLEAR);
184  writel_relaxed(~0, event_base + TIMER_MATCH_VAL);
185  ce->cpumask = cpumask_of(0);
186  ce->irq = irq;
187 
188  clockevents_config_and_register(ce, GPT_HZ, 4, 0xffffffff);
189  if (percpu) {
190  msm_evt.percpu_evt = alloc_percpu(struct clock_event_device *);
191  if (!msm_evt.percpu_evt) {
192  pr_err("memory allocation failed for %s\n", ce->name);
193  goto err;
194  }
195  *__this_cpu_ptr(msm_evt.percpu_evt) = ce;
196  res = request_percpu_irq(ce->irq, msm_timer_interrupt,
197  ce->name, msm_evt.percpu_evt);
198  if (!res) {
200 #ifdef CONFIG_LOCAL_TIMERS
201  local_timer_register(&msm_local_timer_ops);
202 #endif
203  }
204  } else {
205  msm_evt.evt = ce;
206  res = request_irq(ce->irq, msm_timer_interrupt,
208  IRQF_TRIGGER_RISING, ce->name, &msm_evt.evt);
209  }
210 
211  if (res)
212  pr_err("request_irq failed for %s\n", ce->name);
213 err:
214  writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE);
215  res = clocksource_register_hz(cs, dgt_hz);
216  if (res)
217  pr_err("clocksource_register failed\n");
218  setup_sched_clock(msm_sched_clock_read, sched_bits, dgt_hz);
219 }
220 
221 #ifdef CONFIG_OF
222 static const struct of_device_id msm_dgt_match[] __initconst = {
223  { .compatible = "qcom,msm-dgt" },
224  { },
225 };
226 
227 static const struct of_device_id msm_gpt_match[] __initconst = {
228  { .compatible = "qcom,msm-gpt" },
229  { },
230 };
231 
232 static void __init msm_dt_timer_init(void)
233 {
234  struct device_node *np;
235  u32 freq;
236  int irq;
237  struct resource res;
238  u32 percpu_offset;
239  void __iomem *dgt_clk_ctl;
240 
241  np = of_find_matching_node(NULL, msm_gpt_match);
242  if (!np) {
243  pr_err("Can't find GPT DT node\n");
244  return;
245  }
246 
247  event_base = of_iomap(np, 0);
248  if (!event_base) {
249  pr_err("Failed to map event base\n");
250  return;
251  }
252 
253  irq = irq_of_parse_and_map(np, 0);
254  if (irq <= 0) {
255  pr_err("Can't get irq\n");
256  return;
257  }
258  of_node_put(np);
259 
260  np = of_find_matching_node(NULL, msm_dgt_match);
261  if (!np) {
262  pr_err("Can't find DGT DT node\n");
263  return;
264  }
265 
266  if (of_property_read_u32(np, "cpu-offset", &percpu_offset))
267  percpu_offset = 0;
268 
269  if (of_address_to_resource(np, 0, &res)) {
270  pr_err("Failed to parse DGT resource\n");
271  return;
272  }
273 
274  source_base = ioremap(res.start + percpu_offset, resource_size(&res));
275  if (!source_base) {
276  pr_err("Failed to map source base\n");
277  return;
278  }
279 
280  if (!of_address_to_resource(np, 1, &res)) {
281  dgt_clk_ctl = ioremap(res.start + percpu_offset,
282  resource_size(&res));
283  if (!dgt_clk_ctl) {
284  pr_err("Failed to map DGT control base\n");
285  return;
286  }
287  writel_relaxed(DGT_CLK_CTL_DIV_4, dgt_clk_ctl);
288  iounmap(dgt_clk_ctl);
289  }
290 
291  if (of_property_read_u32(np, "clock-frequency", &freq)) {
292  pr_err("Unknown frequency\n");
293  return;
294  }
295  of_node_put(np);
296 
297  msm_timer_init(freq, 32, irq, !!percpu_offset);
298 }
299 
300 struct sys_timer msm_dt_timer = {
301  .init = msm_dt_timer_init
302 };
303 #endif
304 
305 static int __init msm_timer_map(phys_addr_t event, phys_addr_t source)
306 {
307  event_base = ioremap(event, SZ_64);
308  if (!event_base) {
309  pr_err("Failed to map event base\n");
310  return 1;
311  }
312  source_base = ioremap(source, SZ_64);
313  if (!source_base) {
314  pr_err("Failed to map source base\n");
315  return 1;
316  }
317  return 0;
318 }
319 
320 static void __init msm7x01_timer_init(void)
321 {
322  struct clocksource *cs = &msm_clocksource;
323 
324  if (msm_timer_map(0xc0100000, 0xc0100010))
325  return;
326  cs->read = msm_read_timer_count_shift;
327  cs->mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT));
328  /* 600 KHz */
329  msm_timer_init(19200000 >> MSM_DGT_SHIFT, 32 - MSM_DGT_SHIFT, 7,
330  false);
331 }
332 
334  .init = msm7x01_timer_init
335 };
336 
337 static void __init msm7x30_timer_init(void)
338 {
339  if (msm_timer_map(0xc0100004, 0xc0100024))
340  return;
341  msm_timer_init(24576000 / 4, 32, 1, false);
342 }
343 
345  .init = msm7x30_timer_init
346 };
347 
348 static void __init qsd8x50_timer_init(void)
349 {
350  if (msm_timer_map(0xAC100000, 0xAC100010))
351  return;
352  msm_timer_init(19200000 / 4, 32, 7, false);
353 }
354 
356  .init = qsd8x50_timer_init
357 };