Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cpuidle44xx.c
Go to the documentation of this file.
1 /*
2  * OMAP4 CPU idle Routines
3  *
4  * Copyright (C) 2011 Texas Instruments, Inc.
5  * Santosh Shilimkar <[email protected]>
6  * Rajendra Nayak <[email protected]>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/sched.h>
14 #include <linux/cpuidle.h>
15 #include <linux/cpu_pm.h>
16 #include <linux/export.h>
17 #include <linux/clockchips.h>
18 
19 #include <asm/proc-fns.h>
20 
21 #include "common.h"
22 #include "pm.h"
23 #include "prm.h"
24 #include "clockdomain.h"
25 
26 /* Machine specific information */
31 };
32 
33 static struct omap4_idle_statedata omap4_idle_data[] = {
34  {
35  .cpu_state = PWRDM_POWER_ON,
36  .mpu_state = PWRDM_POWER_ON,
37  .mpu_logic_state = PWRDM_POWER_RET,
38  },
39  {
40  .cpu_state = PWRDM_POWER_OFF,
41  .mpu_state = PWRDM_POWER_RET,
42  .mpu_logic_state = PWRDM_POWER_RET,
43  },
44  {
45  .cpu_state = PWRDM_POWER_OFF,
46  .mpu_state = PWRDM_POWER_RET,
47  .mpu_logic_state = PWRDM_POWER_OFF,
48  },
49 };
50 
51 static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
52 static struct clockdomain *cpu_clkdm[NR_CPUS];
53 
54 static atomic_t abort_barrier;
55 static bool cpu_done[NR_CPUS];
56 
67 static int omap4_enter_idle_simple(struct cpuidle_device *dev,
68  struct cpuidle_driver *drv,
69  int index)
70 {
71  local_fiq_disable();
72  omap_do_wfi();
73  local_fiq_enable();
74 
75  return index;
76 }
77 
78 static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
79  struct cpuidle_driver *drv,
80  int index)
81 {
82  struct omap4_idle_statedata *cx = &omap4_idle_data[index];
83  int cpu_id = smp_processor_id();
84 
85  local_fiq_disable();
86 
87  /*
88  * CPU0 has to wait and stay ON until CPU1 is OFF state.
89  * This is necessary to honour hardware recommondation
90  * of triggeing all the possible low power modes once CPU1 is
91  * out of coherency and in OFF mode.
92  */
93  if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
94  while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
95  cpu_relax();
96 
97  /*
98  * CPU1 could have already entered & exited idle
99  * without hitting off because of a wakeup
100  * or a failed attempt to hit off mode. Check for
101  * that here, otherwise we could spin forever
102  * waiting for CPU1 off.
103  */
104  if (cpu_done[1])
105  goto fail;
106 
107  }
108  }
109 
110  clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
111 
112  /*
113  * Call idle CPU PM enter notifier chain so that
114  * VFP and per CPU interrupt context is saved.
115  */
116  cpu_pm_enter();
117 
118  if (dev->cpu == 0) {
120  omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
121 
122  /*
123  * Call idle CPU cluster PM enter notifier chain
124  * to save GIC and wakeupgen context.
125  */
126  if ((cx->mpu_state == PWRDM_POWER_RET) &&
129  }
130 
131  omap4_enter_lowpower(dev->cpu, cx->cpu_state);
132  cpu_done[dev->cpu] = true;
133 
134  /* Wakeup CPU1 only if it is not offlined */
135  if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
136  clkdm_wakeup(cpu_clkdm[1]);
137  clkdm_allow_idle(cpu_clkdm[1]);
138  }
139 
140  /*
141  * Call idle CPU PM exit notifier chain to restore
142  * VFP and per CPU IRQ context.
143  */
144  cpu_pm_exit();
145 
146  /*
147  * Call idle CPU cluster PM exit notifier chain
148  * to restore GIC and wakeupgen context.
149  */
150  if (omap4_mpuss_read_prev_context_state())
152 
153  clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
154 
155 fail:
156  cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
157  cpu_done[dev->cpu] = false;
158 
159  local_fiq_enable();
160 
161  return index;
162 }
163 
164 DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
165 
167  .name = "omap4_idle",
168  .owner = THIS_MODULE,
169  .en_core_tk_irqen = 1,
170  .states = {
171  {
172  /* C1 - CPU0 ON + CPU1 ON + MPU ON */
173  .exit_latency = 2 + 2,
174  .target_residency = 5,
175  .flags = CPUIDLE_FLAG_TIME_VALID,
176  .enter = omap4_enter_idle_simple,
177  .name = "C1",
178  .desc = "MPUSS ON"
179  },
180  {
181  /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
182  .exit_latency = 328 + 440,
183  .target_residency = 960,
185  .enter = omap4_enter_idle_coupled,
186  .name = "C2",
187  .desc = "MPUSS CSWR",
188  },
189  {
190  /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
191  .exit_latency = 460 + 518,
192  .target_residency = 1100,
194  .enter = omap4_enter_idle_coupled,
195  .name = "C3",
196  .desc = "MPUSS OSWR",
197  },
198  },
199  .state_count = ARRAY_SIZE(omap4_idle_data),
200  .safe_state_index = 0,
201 };
202 
203 /*
204  * For each cpu, setup the broadcast timer because local timers
205  * stops for the states above C1.
206  */
207 static void omap_setup_broadcast_timer(void *arg)
208 {
209  int cpu = smp_processor_id();
210  clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
211 }
212 
220 {
221  struct cpuidle_device *dev;
222  unsigned int cpu_id = 0;
223 
224  mpu_pd = pwrdm_lookup("mpu_pwrdm");
225  cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
226  cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
227  if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
228  return -ENODEV;
229 
230  cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
231  cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
232  if (!cpu_clkdm[0] || !cpu_clkdm[1])
233  return -ENODEV;
234 
235  /* Configure the broadcast timer on each cpu */
236  on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
237 
238  for_each_cpu(cpu_id, cpu_online_mask) {
239  dev = &per_cpu(omap4_idle_dev, cpu_id);
240  dev->cpu = cpu_id;
241 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
242  dev->coupled_cpus = *cpu_online_mask;
243 #endif
244  cpuidle_register_driver(&omap4_idle_driver);
245 
246  if (cpuidle_register_device(dev)) {
247  pr_err("%s: CPUidle register failed\n", __func__);
248  return -EIO;
249  }
250  }
251 
252  return 0;
253 }