11 #include <linux/kernel.h>
13 #include <linux/sched.h>
29 static void sched_clock_poll(
unsigned long wrap_ticks);
30 static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
31 static int irqtime = -1;
41 static u32 notrace jiffy_sched_clock_read(
void)
50 return (cyc * mult) >>
shift;
53 static unsigned long long cyc_to_sched_clock(
u32 cyc,
u32 mask)
75 return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.
mult, cd.
shift);
81 static void notrace update_sched_clock(
void)
87 cyc = read_sched_clock();
89 cyc_to_ns((cyc - cd.
epoch_cyc) & sched_clock_mask,
104 static void sched_clock_poll(
unsigned long wrap_ticks)
107 update_sched_clock();
118 WARN_ON(read_sched_clock != jiffy_sched_clock_read);
119 read_sched_clock =
read;
120 sched_clock_mask = (1 <<
bits) - 1;
129 }
else if (r >= 1000) {
136 wrap = cyc_to_ns((1ULL << bits) - 1, cd.
mult, cd.
shift);
141 res = cyc_to_ns(1ULL, cd.
mult, cd.
shift);
142 pr_info(
"sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
143 bits, r, r_unit, res, w);
150 update_sched_clock();
158 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
159 enable_sched_clock_irqtime();
161 pr_debug(
"Registered %pF as sched_clock source\n",
read);
166 u32 cyc = read_sched_clock();
167 return cyc_to_sched_clock(cyc, sched_clock_mask);
176 if (read_sched_clock == jiffy_sched_clock_read)
179 sched_clock_poll(sched_clock_timer.data);
182 static int sched_clock_suspend(
void)
184 sched_clock_poll(sched_clock_timer.data);
189 static void sched_clock_resume(
void)
197 .suspend = sched_clock_suspend,
198 .resume = sched_clock_resume,
201 static int __init sched_clock_syscore_init(
void)