16 #include <linux/hrtimer.h>
20 #include <linux/sched.h>
22 #include <asm/irq_regs.h>
43 return &
per_cpu(tick_cpu_device, cpu);
53 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
55 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
57 return tick_broadcast_oneshot_available();
63 static void tick_periodic(
int cpu)
65 if (tick_do_timer_cpu == cpu) {
66 write_seqlock(&xtime_lock);
69 tick_next_period = ktime_add(tick_next_period, tick_period);
72 write_sequnlock(&xtime_lock);
89 if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
95 next = ktime_add(dev->next_event, tick_period);
110 next = ktime_add(next, tick_period);
122 if (!tick_device_is_functional(dev))
125 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
126 !tick_broadcast_oneshot_active()) {
142 next = ktime_add(next, tick_period);
150 static void tick_setup_device(
struct tick_device *
td,
151 struct clock_event_device *newdev,
int cpu,
155 void (*handler)(
struct clock_event_device *) =
NULL;
165 if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
166 tick_do_timer_cpu =
cpu;
174 td->mode = TICKDEV_MODE_PERIODIC;
176 handler = td->evtdev->event_handler;
177 next_event = td->evtdev->next_event;
187 if (!cpumask_equal(newdev->cpumask, cpumask))
188 irq_set_affinity(newdev->irq, cpumask);
199 if (td->mode == TICKDEV_MODE_PERIODIC)
208 static int tick_check_new_device(
struct clock_event_device *newdev)
210 struct clock_event_device *
curdev;
211 struct tick_device *
td;
221 td = &
per_cpu(tick_cpu_device, cpu);
225 if (!cpumask_equal(newdev->cpumask,
cpumask_of(cpu))) {
231 if (!irq_can_set_affinity(newdev->irq))
238 if (curdev && cpumask_equal(curdev->cpumask,
cpumask_of(cpu)))
250 if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
251 !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
256 if (curdev->rating >= newdev->rating)
270 tick_setup_device(td, newdev, cpu,
cpumask_of(cpu));
271 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
294 static void tick_handover_do_timer(
int *cpup)
296 if (*cpup == tick_do_timer_cpu) {
297 int cpu = cpumask_first(cpu_online_mask);
299 tick_do_timer_cpu = (cpu <
nr_cpu_ids) ? cpu :
311 static void tick_shutdown(
unsigned int *cpup)
313 struct tick_device *td = &
per_cpu(tick_cpu_device, *cpup);
314 struct clock_event_device *
dev = td->evtdev;
318 td->mode = TICKDEV_MODE_PERIODIC;
324 dev->mode = CLOCK_EVT_MODE_UNUSED;
331 static void tick_suspend(
void)
341 static void tick_resume(
void)
351 if (td->mode == TICKDEV_MODE_PERIODIC)
367 case CLOCK_EVT_NOTIFY_ADD:
368 return tick_check_new_device(dev);
370 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
371 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
372 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
376 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
377 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
378 tick_broadcast_oneshot_control(reason);
381 case CLOCK_EVT_NOTIFY_CPU_DYING:
382 tick_handover_do_timer(dev);
385 case CLOCK_EVT_NOTIFY_CPU_DEAD:
386 tick_shutdown_broadcast_oneshot(dev);
391 case CLOCK_EVT_NOTIFY_SUSPEND:
396 case CLOCK_EVT_NOTIFY_RESUME:
408 .notifier_call = tick_notify,