Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
clockevents.c
Go to the documentation of this file.
1 /*
2  * linux/kernel/time/clockevents.c
3  *
4  * This file contains functions which manage clock event devices.
5  *
6  * Copyright(C) 2005-2006, Thomas Gleixner <[email protected]>
7  * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8  * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
9  *
10  * This code is licenced under the GPL version 2. For details see
11  * kernel-base/COPYING.
12  */
13 
14 #include <linux/clockchips.h>
15 #include <linux/hrtimer.h>
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/notifier.h>
19 #include <linux/smp.h>
20 
21 #include "tick-internal.h"
22 
23 /* The registered clock event devices */
24 static LIST_HEAD(clockevent_devices);
25 static LIST_HEAD(clockevents_released);
26 
27 /* Notification for clock events */
28 static RAW_NOTIFIER_HEAD(clockevents_chain);
29 
30 /* Protection for the above */
31 static DEFINE_RAW_SPINLOCK(clockevents_lock);
32 
40 u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
41 {
42  u64 clc = (u64) latch << evt->shift;
43 
44  if (unlikely(!evt->mult)) {
45  evt->mult = 1;
46  WARN_ON(1);
47  }
48 
49  do_div(clc, evt->mult);
50  if (clc < 1000)
51  clc = 1000;
52  if (clc > KTIME_MAX)
53  clc = KTIME_MAX;
54 
55  return clc;
56 }
58 
66 void clockevents_set_mode(struct clock_event_device *dev,
67  enum clock_event_mode mode)
68 {
69  if (dev->mode != mode) {
70  dev->set_mode(mode, dev);
71  dev->mode = mode;
72 
73  /*
74  * A nsec2cyc multiplicator of 0 is invalid and we'd crash
75  * on it, so fix it up and emit a warning:
76  */
77  if (mode == CLOCK_EVT_MODE_ONESHOT) {
78  if (unlikely(!dev->mult)) {
79  dev->mult = 1;
80  WARN_ON(1);
81  }
82  }
83  }
84 }
85 
90 void clockevents_shutdown(struct clock_event_device *dev)
91 {
92  clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
93  dev->next_event.tv64 = KTIME_MAX;
94 }
95 
96 #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
97 
98 /* Limit min_delta to a jiffie */
99 #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
100 
107 static int clockevents_increase_min_delta(struct clock_event_device *dev)
108 {
109  /* Nothing to do if we already reached the limit */
110  if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
111  printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
112  dev->next_event.tv64 = KTIME_MAX;
113  return -ETIME;
114  }
115 
116  if (dev->min_delta_ns < 5000)
117  dev->min_delta_ns = 5000;
118  else
119  dev->min_delta_ns += dev->min_delta_ns >> 1;
120 
121  if (dev->min_delta_ns > MIN_DELTA_LIMIT)
122  dev->min_delta_ns = MIN_DELTA_LIMIT;
123 
124  printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
125  dev->name ? dev->name : "?",
126  (unsigned long long) dev->min_delta_ns);
127  return 0;
128 }
129 
136 static int clockevents_program_min_delta(struct clock_event_device *dev)
137 {
138  unsigned long long clc;
139  int64_t delta;
140  int i;
141 
142  for (i = 0;;) {
143  delta = dev->min_delta_ns;
144  dev->next_event = ktime_add_ns(ktime_get(), delta);
145 
146  if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
147  return 0;
148 
149  dev->retries++;
150  clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
151  if (dev->set_next_event((unsigned long) clc, dev) == 0)
152  return 0;
153 
154  if (++i > 2) {
155  /*
156  * We tried 3 times to program the device with the
157  * given min_delta_ns. Try to increase the minimum
158  * delta, if that fails as well get out of here.
159  */
160  if (clockevents_increase_min_delta(dev))
161  return -ETIME;
162  i = 0;
163  }
164  }
165 }
166 
167 #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
168 
175 static int clockevents_program_min_delta(struct clock_event_device *dev)
176 {
177  unsigned long long clc;
178  int64_t delta;
179 
180  delta = dev->min_delta_ns;
181  dev->next_event = ktime_add_ns(ktime_get(), delta);
182 
183  if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
184  return 0;
185 
186  dev->retries++;
187  clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
188  return dev->set_next_event((unsigned long) clc, dev);
189 }
190 
191 #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
192 
201 int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
202  bool force)
203 {
204  unsigned long long clc;
205  int64_t delta;
206  int rc;
207 
208  if (unlikely(expires.tv64 < 0)) {
209  WARN_ON_ONCE(1);
210  return -ETIME;
211  }
212 
213  dev->next_event = expires;
214 
215  if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
216  return 0;
217 
218  /* Shortcut for clockevent devices that can deal with ktime. */
219  if (dev->features & CLOCK_EVT_FEAT_KTIME)
220  return dev->set_next_ktime(expires, dev);
221 
222  delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
223  if (delta <= 0)
224  return force ? clockevents_program_min_delta(dev) : -ETIME;
225 
226  delta = min(delta, (int64_t) dev->max_delta_ns);
227  delta = max(delta, (int64_t) dev->min_delta_ns);
228 
229  clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
230  rc = dev->set_next_event((unsigned long) clc, dev);
231 
232  return (rc && force) ? clockevents_program_min_delta(dev) : rc;
233 }
234 
239 {
240  unsigned long flags;
241  int ret;
242 
243  raw_spin_lock_irqsave(&clockevents_lock, flags);
244  ret = raw_notifier_chain_register(&clockevents_chain, nb);
245  raw_spin_unlock_irqrestore(&clockevents_lock, flags);
246 
247  return ret;
248 }
249 
250 /*
251  * Notify about a clock event change. Called with clockevents_lock
252  * held.
253  */
254 static void clockevents_do_notify(unsigned long reason, void *dev)
255 {
256  raw_notifier_call_chain(&clockevents_chain, reason, dev);
257 }
258 
259 /*
260  * Called after a notify add to make devices available which were
261  * released from the notifier call.
262  */
263 static void clockevents_notify_released(void)
264 {
265  struct clock_event_device *dev;
266 
267  while (!list_empty(&clockevents_released)) {
268  dev = list_entry(clockevents_released.next,
269  struct clock_event_device, list);
270  list_del(&dev->list);
271  list_add(&dev->list, &clockevent_devices);
272  clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
273  }
274 }
275 
280 void clockevents_register_device(struct clock_event_device *dev)
281 {
282  unsigned long flags;
283 
284  BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
285  if (!dev->cpumask) {
286  WARN_ON(num_possible_cpus() > 1);
287  dev->cpumask = cpumask_of(smp_processor_id());
288  }
289 
290  raw_spin_lock_irqsave(&clockevents_lock, flags);
291 
292  list_add(&dev->list, &clockevent_devices);
293  clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
294  clockevents_notify_released();
295 
296  raw_spin_unlock_irqrestore(&clockevents_lock, flags);
297 }
299 
300 void clockevents_config(struct clock_event_device *dev, u32 freq)
301 {
302  u64 sec;
303 
304  if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
305  return;
306 
307  /*
308  * Calculate the maximum number of seconds we can sleep. Limit
309  * to 10 minutes for hardware which can program more than
310  * 32bit ticks so we still get reasonable conversion values.
311  */
312  sec = dev->max_delta_ticks;
313  do_div(sec, freq);
314  if (!sec)
315  sec = 1;
316  else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
317  sec = 600;
318 
319  clockevents_calc_mult_shift(dev, freq, sec);
320  dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
321  dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
322 }
323 
333 void clockevents_config_and_register(struct clock_event_device *dev,
334  u32 freq, unsigned long min_delta,
335  unsigned long max_delta)
336 {
337  dev->min_delta_ticks = min_delta;
338  dev->max_delta_ticks = max_delta;
339  clockevents_config(dev, freq);
341 }
342 
353 int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
354 {
355  clockevents_config(dev, freq);
356 
357  if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
358  return 0;
359 
360  return clockevents_program_event(dev, dev->next_event, false);
361 }
362 
363 /*
364  * Noop handler when we shut down an event device
365  */
366 void clockevents_handle_noop(struct clock_event_device *dev)
367 {
368 }
369 
377 void clockevents_exchange_device(struct clock_event_device *old,
378  struct clock_event_device *new)
379 {
380  unsigned long flags;
381 
382  local_irq_save(flags);
383  /*
384  * Caller releases a clock event device. We queue it into the
385  * released list and do a notify add later.
386  */
387  if (old) {
388  clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
389  list_del(&old->list);
390  list_add(&old->list, &clockevents_released);
391  }
392 
393  if (new) {
394  BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
396  }
397  local_irq_restore(flags);
398 }
399 
404 {
405  struct clock_event_device *dev;
406 
407  list_for_each_entry_reverse(dev, &clockevent_devices, list)
408  if (dev->suspend)
409  dev->suspend(dev);
410 }
411 
416 {
417  struct clock_event_device *dev;
418 
419  list_for_each_entry(dev, &clockevent_devices, list)
420  if (dev->resume)
421  dev->resume(dev);
422 }
423 
424 #ifdef CONFIG_GENERIC_CLOCKEVENTS
425 
428 void clockevents_notify(unsigned long reason, void *arg)
429 {
430  struct clock_event_device *dev, *tmp;
431  unsigned long flags;
432  int cpu;
433 
434  raw_spin_lock_irqsave(&clockevents_lock, flags);
435  clockevents_do_notify(reason, arg);
436 
437  switch (reason) {
438  case CLOCK_EVT_NOTIFY_CPU_DEAD:
439  /*
440  * Unregister the clock event devices which were
441  * released from the users in the notify chain.
442  */
443  list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
444  list_del(&dev->list);
445  /*
446  * Now check whether the CPU has left unused per cpu devices
447  */
448  cpu = *((int *)arg);
449  list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
450  if (cpumask_test_cpu(cpu, dev->cpumask) &&
451  cpumask_weight(dev->cpumask) == 1 &&
452  !tick_is_broadcast_device(dev)) {
453  BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
454  list_del(&dev->list);
455  }
456  }
457  break;
458  default:
459  break;
460  }
461  raw_spin_unlock_irqrestore(&clockevents_lock, flags);
462 }
464 #endif