Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sched_clock.c
Go to the documentation of this file.
1 /*
2  * sched_clock.c: support for extending counters to full 64-bit ns counter
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #include <linux/clocksource.h>
9 #include <linux/init.h>
10 #include <linux/jiffies.h>
11 #include <linux/kernel.h>
12 #include <linux/moduleparam.h>
13 #include <linux/sched.h>
14 #include <linux/syscore_ops.h>
15 #include <linux/timer.h>
16 
17 #include <asm/sched_clock.h>
18 
19 struct clock_data {
25  bool suspended;
27 };
28 
29 static void sched_clock_poll(unsigned long wrap_ticks);
30 static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
31 static int irqtime = -1;
32 
33 core_param(irqtime, irqtime, int, 0400);
34 
35 static struct clock_data cd = {
36  .mult = NSEC_PER_SEC / HZ,
37 };
38 
39 static u32 __read_mostly sched_clock_mask = 0xffffffff;
40 
41 static u32 notrace jiffy_sched_clock_read(void)
42 {
43  return (u32)(jiffies - INITIAL_JIFFIES);
44 }
45 
46 static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
47 
48 static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
49 {
50  return (cyc * mult) >> shift;
51 }
52 
53 static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
54 {
55  u64 epoch_ns;
56  u32 epoch_cyc;
57 
58  if (cd.suspended)
59  return cd.epoch_ns;
60 
61  /*
62  * Load the epoch_cyc and epoch_ns atomically. We do this by
63  * ensuring that we always write epoch_cyc, epoch_ns and
64  * epoch_cyc_copy in strict order, and read them in strict order.
65  * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
66  * the middle of an update, and we should repeat the load.
67  */
68  do {
69  epoch_cyc = cd.epoch_cyc;
70  smp_rmb();
71  epoch_ns = cd.epoch_ns;
72  smp_rmb();
73  } while (epoch_cyc != cd.epoch_cyc_copy);
74 
75  return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
76 }
77 
78 /*
79  * Atomically update the sched_clock epoch.
80  */
81 static void notrace update_sched_clock(void)
82 {
83  unsigned long flags;
84  u32 cyc;
85  u64 ns;
86 
87  cyc = read_sched_clock();
88  ns = cd.epoch_ns +
89  cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
90  cd.mult, cd.shift);
91  /*
92  * Write epoch_cyc and epoch_ns in a way that the update is
93  * detectable in cyc_to_fixed_sched_clock().
94  */
95  raw_local_irq_save(flags);
96  cd.epoch_cyc = cyc;
97  smp_wmb();
98  cd.epoch_ns = ns;
99  smp_wmb();
100  cd.epoch_cyc_copy = cyc;
101  raw_local_irq_restore(flags);
102 }
103 
104 static void sched_clock_poll(unsigned long wrap_ticks)
105 {
106  mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
107  update_sched_clock();
108 }
109 
110 void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
111 {
112  unsigned long r, w;
113  u64 res, wrap;
114  char r_unit;
115 
116  BUG_ON(bits > 32);
118  WARN_ON(read_sched_clock != jiffy_sched_clock_read);
119  read_sched_clock = read;
120  sched_clock_mask = (1 << bits) - 1;
121 
122  /* calculate the mult/shift to convert counter ticks to ns. */
123  clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
124 
125  r = rate;
126  if (r >= 4000000) {
127  r /= 1000000;
128  r_unit = 'M';
129  } else if (r >= 1000) {
130  r /= 1000;
131  r_unit = 'k';
132  } else
133  r_unit = ' ';
134 
135  /* calculate how many ns until we wrap */
136  wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
137  do_div(wrap, NSEC_PER_MSEC);
138  w = wrap;
139 
140  /* calculate the ns resolution of this counter */
141  res = cyc_to_ns(1ULL, cd.mult, cd.shift);
142  pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
143  bits, r, r_unit, res, w);
144 
145  /*
146  * Start the timer to keep sched_clock() properly updated and
147  * sets the initial epoch.
148  */
149  sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
150  update_sched_clock();
151 
152  /*
153  * Ensure that sched_clock() starts off at 0ns
154  */
155  cd.epoch_ns = 0;
156 
157  /* Enable IRQ time accounting if we have a fast enough sched_clock */
158  if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
159  enable_sched_clock_irqtime();
160 
161  pr_debug("Registered %pF as sched_clock source\n", read);
162 }
163 
164 unsigned long long notrace sched_clock(void)
165 {
166  u32 cyc = read_sched_clock();
167  return cyc_to_sched_clock(cyc, sched_clock_mask);
168 }
169 
171 {
172  /*
173  * If no sched_clock function has been provided at that point,
174  * make it the final one one.
175  */
176  if (read_sched_clock == jiffy_sched_clock_read)
177  setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
178 
179  sched_clock_poll(sched_clock_timer.data);
180 }
181 
182 static int sched_clock_suspend(void)
183 {
184  sched_clock_poll(sched_clock_timer.data);
185  cd.suspended = true;
186  return 0;
187 }
188 
189 static void sched_clock_resume(void)
190 {
191  cd.epoch_cyc = read_sched_clock();
192  cd.epoch_cyc_copy = cd.epoch_cyc;
193  cd.suspended = false;
194 }
195 
196 static struct syscore_ops sched_clock_ops = {
197  .suspend = sched_clock_suspend,
198  .resume = sched_clock_resume,
199 };
200 
201 static int __init sched_clock_syscore_init(void)
202 {
203  register_syscore_ops(&sched_clock_ops);
204  return 0;
205 }
206 device_initcall(sched_clock_syscore_init);