16 #include <linux/hrtimer.h>
20 #include <linux/sched.h>
29 static struct tick_device tick_broadcast_device;
34 static int tick_broadcast_force;
36 #ifdef CONFIG_TICK_ONESHOT
37 static void tick_broadcast_clear_oneshot(
int cpu);
39 static inline void tick_broadcast_clear_oneshot(
int cpu) { }
47 return &tick_broadcast_device;
58 static void tick_broadcast_start_periodic(
struct clock_event_device *
bc)
69 if ((tick_broadcast_device.evtdev &&
70 tick_broadcast_device.evtdev->rating >= dev->rating) ||
71 (dev->features & CLOCK_EVT_FEAT_C3STOP))
75 tick_broadcast_device.evtdev =
dev;
77 tick_broadcast_start_periodic(dev);
86 return (dev && tick_broadcast_device.evtdev == dev);
106 if (!tick_device_is_functional(dev)) {
109 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
117 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
121 tick_broadcast_clear_oneshot(cpu);
131 static void tick_do_broadcast(
struct cpumask *
mask)
134 struct tick_device *
td;
140 cpumask_clear_cpu(cpu, mask);
141 td = &
per_cpu(tick_cpu_device, cpu);
142 td->evtdev->event_handler(td->evtdev);
145 if (!cpumask_empty(mask)) {
152 td = &
per_cpu(tick_cpu_device, cpumask_first(mask));
153 td->evtdev->broadcast(mask);
161 static void tick_do_periodic_broadcast(
void)
175 static void tick_handle_periodic_broadcast(
struct clock_event_device *
dev)
179 tick_do_periodic_broadcast();
184 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
194 for (next = dev->next_event; ;) {
199 tick_do_periodic_broadcast();
207 static void tick_do_broadcast_on_off(
unsigned long *
reason)
209 struct clock_event_device *
bc, *
dev;
210 struct tick_device *
td;
217 td = &
per_cpu(tick_cpu_device, cpu);
219 bc = tick_broadcast_device.evtdev;
224 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
227 if (!tick_device_is_functional(dev))
233 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
234 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
237 if (tick_broadcast_device.mode ==
238 TICKDEV_MODE_PERIODIC)
241 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
242 tick_broadcast_force = 1;
244 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
245 if (!tick_broadcast_force &&
248 if (tick_broadcast_device.mode ==
249 TICKDEV_MODE_PERIODIC)
258 }
else if (bc_stopped) {
259 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
260 tick_broadcast_start_periodic(bc);
262 tick_broadcast_setup_oneshot(bc);
276 "offline CPU #%d\n", *oncpu);
278 tick_do_broadcast_on_off(&reason);
289 dev->event_handler = tick_handle_periodic_broadcast;
297 struct clock_event_device *
bc;
299 unsigned int cpu = *cpup;
303 bc = tick_broadcast_device.evtdev;
306 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
316 struct clock_event_device *
bc;
321 bc = tick_broadcast_device.evtdev;
330 struct clock_event_device *
bc;
336 bc = tick_broadcast_device.evtdev;
341 switch (tick_broadcast_device.mode) {
342 case TICKDEV_MODE_PERIODIC:
344 tick_broadcast_start_periodic(bc);
348 case TICKDEV_MODE_ONESHOT:
350 broadcast = tick_resume_broadcast_oneshot(bc);
360 #ifdef CONFIG_TICK_ONESHOT
368 struct cpumask *tick_get_broadcast_oneshot_mask(
void)
370 return to_cpumask(tick_broadcast_oneshot_mask);
373 static int tick_broadcast_set_event(
ktime_t expires,
int force)
375 struct clock_event_device *bc = tick_broadcast_device.evtdev;
377 if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
383 int tick_resume_broadcast_oneshot(
struct clock_event_device *bc)
393 void tick_check_oneshot_broadcast(
int cpu)
396 struct tick_device *td = &
per_cpu(tick_cpu_device, cpu);
405 static void tick_handle_oneshot_broadcast(
struct clock_event_device *dev)
407 struct tick_device *
td;
419 td = &
per_cpu(tick_cpu_device, cpu);
420 if (td->evtdev->next_event.tv64 <= now.
tv64)
422 else if (td->evtdev->next_event.tv64 < next_event.
tv64)
423 next_event.
tv64 = td->evtdev->next_event.tv64;
446 if (tick_broadcast_set_event(next_event, 0))
456 void tick_broadcast_oneshot_control(
unsigned long reason)
458 struct clock_event_device *
bc, *
dev;
459 struct tick_device *
td;
467 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
475 td = &
per_cpu(tick_cpu_device, cpu);
478 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
481 bc = tick_broadcast_device.evtdev;
484 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
486 cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
488 if (dev->next_event.tv64 < bc->next_event.tv64)
489 tick_broadcast_set_event(dev->next_event, 1);
493 cpumask_clear_cpu(cpu,
494 tick_get_broadcast_oneshot_mask());
508 static void tick_broadcast_clear_oneshot(
int cpu)
510 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
513 static void tick_broadcast_init_next_event(
struct cpumask *mask,
516 struct tick_device *
td;
520 td = &
per_cpu(tick_cpu_device, cpu);
522 td->evtdev->next_event = expires;
529 void tick_broadcast_setup_oneshot(
struct clock_event_device *bc)
534 if (bc->event_handler != tick_handle_oneshot_broadcast) {
535 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
537 bc->event_handler = tick_handle_oneshot_broadcast;
540 tick_do_timer_cpu =
cpu;
550 cpumask_or(tick_get_broadcast_oneshot_mask(),
551 tick_get_broadcast_oneshot_mask(),
554 if (was_periodic && !cpumask_empty(
to_cpumask(tmpmask))) {
556 tick_broadcast_init_next_event(
to_cpumask(tmpmask),
569 tick_broadcast_clear_oneshot(cpu);
576 void tick_broadcast_switch_to_oneshot(
void)
578 struct clock_event_device *
bc;
583 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
584 bc = tick_broadcast_device.evtdev;
586 tick_broadcast_setup_oneshot(bc);
595 void tick_shutdown_broadcast_oneshot(
unsigned int *cpup)
598 unsigned int cpu = *cpup;
606 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
614 int tick_broadcast_oneshot_active(
void)
616 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
622 bool tick_broadcast_oneshot_available(
void)
624 struct clock_event_device *bc = tick_broadcast_device.evtdev;
626 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT :
false;