22 #include <linux/slab.h>
24 #include <asm/uv/uv_mmrs.h>
25 #include <asm/uv/uv_hub.h>
27 #include <asm/uv/uv.h>
31 #define RTC_NAME "sgi_rtc"
34 static int uv_rtc_next_event(
unsigned long,
struct clock_event_device *);
35 static void uv_rtc_timer_setup(
enum clock_event_mode,
36 struct clock_event_device *);
46 static struct clock_event_device clock_event_device_uv = {
48 .features = CLOCK_EVT_FEAT_ONESHOT,
52 .set_next_event = uv_rtc_next_event,
53 .set_mode = uv_rtc_timer_setup,
54 .event_handler =
NULL,
77 static int uv_rtc_evt_enable;
84 static void uv_rtc_send_IPI(
int cpu)
90 pnode = uv_apicid_to_pnode(apicid);
100 static int uv_intr_pending(
int pnode)
115 int pnode = uv_cpu_to_pnode(cpu);
136 if (uv_read_rtc(
NULL) <= expires)
139 return !uv_intr_pending(pnode);
146 static __init void uv_rtc_deallocate_timers(
void)
150 for_each_possible_blade(bid) {
151 kfree(blade_info[bid]);
157 static __init int uv_rtc_allocate_timers(
void)
168 int bid = uv_cpu_to_blade_id(cpu);
174 (uv_blade_nr_possible_cpus(bid) *
178 uv_rtc_deallocate_timers();
182 head->
ncpus = uv_blade_nr_possible_cpus(bid);
187 head->
cpu[bcpu].lcpu =
cpu;
201 for (c = 0; c < head->
ncpus; c++) {
210 c = head->
cpu[bcpu].lcpu;
211 if (uv_setup_intr(c, lowest))
225 static int uv_rtc_set_timer(
int cpu,
u64 expires)
227 int pnode = uv_cpu_to_pnode(cpu);
228 int bid = uv_cpu_to_blade_id(cpu);
231 u64 *
t = &head->
cpu[bcpu].expires;
241 if (next_cpu < 0 || bcpu == next_cpu ||
242 expires < head->cpu[next_cpu].expires) {
244 if (uv_setup_intr(cpu, expires)) {
246 uv_rtc_find_next_timer(head, pnode);
247 spin_unlock_irqrestore(&head->
lock, flags);
252 spin_unlock_irqrestore(&head->
lock, flags);
261 static int uv_rtc_unset_timer(
int cpu,
int force)
263 int pnode = uv_cpu_to_pnode(cpu);
264 int bid = uv_cpu_to_blade_id(cpu);
267 u64 *t = &head->
cpu[bcpu].expires;
273 if ((head->
next_cpu == bcpu && uv_read_rtc(
NULL) >= *t) || force)
280 uv_rtc_find_next_timer(head, pnode);
283 spin_unlock_irqrestore(&head->
lock, flags);
304 if (uv_get_min_hub_revision_id() == 1)
315 static int uv_rtc_next_event(
unsigned long delta,
316 struct clock_event_device *ced)
318 int ced_cpu = cpumask_first(ced->cpumask);
320 return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(
NULL));
326 static void uv_rtc_timer_setup(
enum clock_event_mode
mode,
327 struct clock_event_device *
evt)
329 int ced_cpu = cpumask_first(evt->cpumask);
332 case CLOCK_EVT_MODE_PERIODIC:
333 case CLOCK_EVT_MODE_ONESHOT:
334 case CLOCK_EVT_MODE_RESUME:
337 case CLOCK_EVT_MODE_UNUSED:
338 case CLOCK_EVT_MODE_SHUTDOWN:
339 uv_rtc_unset_timer(ced_cpu, 1);
344 static void uv_rtc_interrupt(
void)
347 struct clock_event_device *ced = &
per_cpu(cpu_ced, cpu);
349 if (!ced || !ced->event_handler)
352 if (uv_rtc_unset_timer(cpu, 0) != 1)
355 ced->event_handler(ced);
358 static int __init uv_enable_evt_rtc(
char *
str)
360 uv_rtc_evt_enable = 1;
364 __setup(
"uvrtcevt", uv_enable_evt_rtc);
370 *ced = clock_event_device_uv;
375 static __init int uv_rtc_setup_clock(
void)
393 rc = uv_rtc_allocate_timers();
405 clock_event_device_uv.max_delta_ns = clocksource_uv.
mask *
411 uv_rtc_deallocate_timers();