Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
processor_idle.c
Go to the documentation of this file.
1 /*
2  * processor_idle - idle state cpuidle driver.
3  * Adapted from drivers/idle/intel_idle.c and
4  * drivers/acpi/processor_idle.c
5  *
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/moduleparam.h>
12 #include <linux/cpuidle.h>
13 #include <linux/cpu.h>
14 #include <linux/notifier.h>
15 
16 #include <asm/paca.h>
17 #include <asm/reg.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/runlatch.h>
21 
22 #include "plpar_wrappers.h"
23 #include "pseries.h"
24 
26  .name = "pseries_idle",
27  .owner = THIS_MODULE,
28 };
29 
30 #define MAX_IDLE_STATE_COUNT 2
31 
32 static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
33 static struct cpuidle_device __percpu *pseries_cpuidle_devices;
34 static struct cpuidle_state *cpuidle_state_table;
35 
36 static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
37 {
38 
39  *kt_before = ktime_get_real();
40  *in_purr = mfspr(SPRN_PURR);
41  /*
42  * Indicate to the HV that we are idle. Now would be
43  * a good time to find other work to dispatch.
44  */
45  get_lppaca()->idle = 1;
46 }
47 
48 static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before)
49 {
50  get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
51  get_lppaca()->idle = 0;
52 
53  return ktime_to_us(ktime_sub(ktime_get_real(), kt_before));
54 }
55 
56 static int snooze_loop(struct cpuidle_device *dev,
57  struct cpuidle_driver *drv,
58  int index)
59 {
60  unsigned long in_purr;
61  ktime_t kt_before;
62  int cpu = dev->cpu;
63 
64  idle_loop_prolog(&in_purr, &kt_before);
66  set_thread_flag(TIF_POLLING_NRFLAG);
67 
68  while ((!need_resched()) && cpu_online(cpu)) {
70  HMT_low();
71  HMT_very_low();
72  }
73 
74  HMT_medium();
75  clear_thread_flag(TIF_POLLING_NRFLAG);
76  smp_mb();
77 
78  dev->last_residency =
79  (int)idle_loop_epilog(in_purr, kt_before);
80  return index;
81 }
82 
83 static void check_and_cede_processor(void)
84 {
85  /*
86  * Ensure our interrupt state is properly tracked,
87  * also checks if no interrupt has occurred while we
88  * were soft-disabled
89  */
90  if (prep_irq_for_idle()) {
91  cede_processor();
92 #ifdef CONFIG_TRACE_IRQFLAGS
93  /* Ensure that H_CEDE returns with IRQs on */
94  if (WARN_ON(!(mfmsr() & MSR_EE)))
95  __hard_irq_enable();
96 #endif
97  }
98 }
99 
100 static int dedicated_cede_loop(struct cpuidle_device *dev,
101  struct cpuidle_driver *drv,
102  int index)
103 {
104  unsigned long in_purr;
105  ktime_t kt_before;
106 
107  idle_loop_prolog(&in_purr, &kt_before);
108  get_lppaca()->donate_dedicated_cpu = 1;
109 
111  HMT_medium();
112  check_and_cede_processor();
113 
114  get_lppaca()->donate_dedicated_cpu = 0;
115  dev->last_residency =
116  (int)idle_loop_epilog(in_purr, kt_before);
117  return index;
118 }
119 
120 static int shared_cede_loop(struct cpuidle_device *dev,
121  struct cpuidle_driver *drv,
122  int index)
123 {
124  unsigned long in_purr;
125  ktime_t kt_before;
126 
127  idle_loop_prolog(&in_purr, &kt_before);
128 
129  /*
130  * Yield the processor to the hypervisor. We return if
131  * an external interrupt occurs (which are driven prior
132  * to returning here) or if a prod occurs from another
133  * processor. When returning here, external interrupts
134  * are enabled.
135  */
136  check_and_cede_processor();
137 
138  dev->last_residency =
139  (int)idle_loop_epilog(in_purr, kt_before);
140  return index;
141 }
142 
143 /*
144  * States for dedicated partition case.
145  */
146 static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = {
147  { /* Snooze */
148  .name = "snooze",
149  .desc = "snooze",
150  .flags = CPUIDLE_FLAG_TIME_VALID,
151  .exit_latency = 0,
152  .target_residency = 0,
153  .enter = &snooze_loop },
154  { /* CEDE */
155  .name = "CEDE",
156  .desc = "CEDE",
157  .flags = CPUIDLE_FLAG_TIME_VALID,
158  .exit_latency = 10,
159  .target_residency = 100,
160  .enter = &dedicated_cede_loop },
161 };
162 
163 /*
164  * States for shared partition case.
165  */
166 static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
167  { /* Shared Cede */
168  .name = "Shared Cede",
169  .desc = "Shared Cede",
170  .flags = CPUIDLE_FLAG_TIME_VALID,
171  .exit_latency = 0,
172  .target_residency = 0,
173  .enter = &shared_cede_loop },
174 };
175 
176 void update_smt_snooze_delay(int cpu, int residency)
177 {
178  struct cpuidle_driver *drv = cpuidle_get_driver();
179  struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
180 
181  if (cpuidle_state_table != dedicated_states)
182  return;
183 
184  if (residency < 0) {
185  /* Disable the Nap state on that cpu */
186  if (dev)
187  dev->states_usage[1].disable = 1;
188  } else
189  if (drv)
190  drv->states[1].target_residency = residency;
191 }
192 
193 static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
194  unsigned long action, void *hcpu)
195 {
196  int hotcpu = (unsigned long)hcpu;
197  struct cpuidle_device *dev =
198  per_cpu_ptr(pseries_cpuidle_devices, hotcpu);
199 
200  if (dev && cpuidle_get_driver()) {
201  switch (action) {
202  case CPU_ONLINE:
203  case CPU_ONLINE_FROZEN:
207  break;
208 
209  case CPU_DEAD:
210  case CPU_DEAD_FROZEN:
214  break;
215 
216  default:
217  return NOTIFY_DONE;
218  }
219  }
220  return NOTIFY_OK;
221 }
222 
223 static struct notifier_block setup_hotplug_notifier = {
224  .notifier_call = pseries_cpuidle_add_cpu_notifier,
225 };
226 
227 /*
228  * pseries_cpuidle_driver_init()
229  */
230 static int pseries_cpuidle_driver_init(void)
231 {
232  int idle_state;
233  struct cpuidle_driver *drv = &pseries_idle_driver;
234 
235  drv->state_count = 0;
236 
237  for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) {
238 
239  if (idle_state > max_idle_state)
240  break;
241 
242  /* is the state not enabled? */
243  if (cpuidle_state_table[idle_state].enter == NULL)
244  continue;
245 
246  drv->states[drv->state_count] = /* structure copy */
247  cpuidle_state_table[idle_state];
248 
249  drv->state_count += 1;
250  }
251 
252  return 0;
253 }
254 
255 /* pseries_idle_devices_uninit(void)
256  * unregister cpuidle devices and de-allocate memory
257  */
258 static void pseries_idle_devices_uninit(void)
259 {
260  int i;
261  struct cpuidle_device *dev;
262 
264  dev = per_cpu_ptr(pseries_cpuidle_devices, i);
266  }
267 
268  free_percpu(pseries_cpuidle_devices);
269  return;
270 }
271 
272 /* pseries_idle_devices_init()
273  * allocate, initialize and register cpuidle device
274  */
275 static int pseries_idle_devices_init(void)
276 {
277  int i;
278  struct cpuidle_driver *drv = &pseries_idle_driver;
279  struct cpuidle_device *dev;
280 
281  pseries_cpuidle_devices = alloc_percpu(struct cpuidle_device);
282  if (pseries_cpuidle_devices == NULL)
283  return -ENOMEM;
284 
286  dev = per_cpu_ptr(pseries_cpuidle_devices, i);
287  dev->state_count = drv->state_count;
288  dev->cpu = i;
289  if (cpuidle_register_device(dev)) {
291  "cpuidle_register_device %d failed!\n", i);
292  return -EIO;
293  }
294  }
295 
296  return 0;
297 }
298 
299 /*
300  * pseries_idle_probe()
301  * Choose state table for shared versus dedicated partition
302  */
303 static int pseries_idle_probe(void)
304 {
305 
306  if (!firmware_has_feature(FW_FEATURE_SPLPAR))
307  return -ENODEV;
308 
310  return -ENODEV;
311 
312  if (max_idle_state == 0) {
313  printk(KERN_DEBUG "pseries processor idle disabled.\n");
314  return -EPERM;
315  }
316 
317  if (get_lppaca()->shared_proc)
318  cpuidle_state_table = shared_states;
319  else
320  cpuidle_state_table = dedicated_states;
321 
322  return 0;
323 }
324 
325 static int __init pseries_processor_idle_init(void)
326 {
327  int retval;
328 
329  retval = pseries_idle_probe();
330  if (retval)
331  return retval;
332 
333  pseries_cpuidle_driver_init();
334  retval = cpuidle_register_driver(&pseries_idle_driver);
335  if (retval) {
336  printk(KERN_DEBUG "Registration of pseries driver failed.\n");
337  return retval;
338  }
339 
340  retval = pseries_idle_devices_init();
341  if (retval) {
342  pseries_idle_devices_uninit();
343  cpuidle_unregister_driver(&pseries_idle_driver);
344  return retval;
345  }
346 
347  register_cpu_notifier(&setup_hotplug_notifier);
348  printk(KERN_DEBUG "pseries_idle_driver registered\n");
349 
350  return 0;
351 }
352 
353 static void __exit pseries_processor_idle_exit(void)
354 {
355 
356  unregister_cpu_notifier(&setup_hotplug_notifier);
357  pseries_idle_devices_uninit();
358  cpuidle_unregister_driver(&pseries_idle_driver);
359 
360  return;
361 }
362 
363 module_init(pseries_processor_idle_init);
364 module_exit(pseries_processor_idle_exit);
365 
366 MODULE_AUTHOR("Deepthi Dharwar <[email protected]>");
367 MODULE_DESCRIPTION("Cpuidle driver for POWER");
368 MODULE_LICENSE("GPL");