Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
smp.c
Go to the documentation of this file.
1 /*
2  * Author: Andy Fleming <[email protected]>
3  * Kumar Gala <[email protected]>
4  *
5  * Copyright 2006-2008, 2011-2012 Freescale Semiconductor Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License as published by the
9  * Free Software Foundation; either version 2 of the License, or (at your
10  * option) any later version.
11  */
12 
13 #include <linux/stddef.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/delay.h>
17 #include <linux/of.h>
18 #include <linux/kexec.h>
19 #include <linux/highmem.h>
20 #include <linux/cpu.h>
21 
22 #include <asm/machdep.h>
23 #include <asm/pgtable.h>
24 #include <asm/page.h>
25 #include <asm/mpic.h>
26 #include <asm/cacheflush.h>
27 #include <asm/dbell.h>
28 #include <asm/fsl_guts.h>
29 
30 #include <sysdev/fsl_soc.h>
31 #include <sysdev/mpic.h>
32 #include "smp.h"
33 
41 };
42 
43 static struct ccsr_guts __iomem *guts;
44 static u64 timebase;
45 static int tb_req;
46 static int tb_valid;
47 
48 static void mpc85xx_timebase_freeze(int freeze)
49 {
50  uint32_t mask;
51 
52  mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1;
53  if (freeze)
54  setbits32(&guts->devdisr, mask);
55  else
56  clrbits32(&guts->devdisr, mask);
57 
58  in_be32(&guts->devdisr);
59 }
60 
61 static void mpc85xx_give_timebase(void)
62 {
63  unsigned long flags;
64 
65  local_irq_save(flags);
66 
67  while (!tb_req)
68  barrier();
69  tb_req = 0;
70 
71  mpc85xx_timebase_freeze(1);
72  timebase = get_tb();
73  mb();
74  tb_valid = 1;
75 
76  while (tb_valid)
77  barrier();
78 
79  mpc85xx_timebase_freeze(0);
80 
81  local_irq_restore(flags);
82 }
83 
84 static void mpc85xx_take_timebase(void)
85 {
86  unsigned long flags;
87 
88  local_irq_save(flags);
89 
90  tb_req = 1;
91  while (!tb_valid)
92  barrier();
93 
94  set_tb(timebase >> 32, timebase & 0xffffffff);
95  isync();
96  tb_valid = 0;
97 
98  local_irq_restore(flags);
99 }
100 
101 #ifdef CONFIG_HOTPLUG_CPU
102 static void __cpuinit smp_85xx_mach_cpu_die(void)
103 {
104  unsigned int cpu = smp_processor_id();
105  u32 tmp;
106 
108  idle_task_exit();
109  generic_set_cpu_dead(cpu);
110  mb();
111 
112  mtspr(SPRN_TCR, 0);
113 
115  tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
116  mtspr(SPRN_HID0, tmp);
117  isync();
118 
119  /* Enter NAP mode. */
120  tmp = mfmsr();
121  tmp |= MSR_WE;
122  mb();
123  mtmsr(tmp);
124  isync();
125 
126  while (1)
127  ;
128 }
129 #endif
130 
131 static int __cpuinit smp_85xx_kick_cpu(int nr)
132 {
133  unsigned long flags;
134  const u64 *cpu_rel_addr;
135  __iomem struct epapr_spin_table *spin_table;
136  struct device_node *np;
137  int hw_cpu = get_hard_smp_processor_id(nr);
138  int ioremappable;
139  int ret = 0;
140 
141  WARN_ON(nr < 0 || nr >= NR_CPUS);
142  WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
143 
144  pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
145 
146  np = of_get_cpu_node(nr, NULL);
147  cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
148 
149  if (cpu_rel_addr == NULL) {
150  printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
151  return -ENOENT;
152  }
153 
154  /*
155  * A secondary core could be in a spinloop in the bootpage
156  * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
157  * The bootpage and highmem can be accessed via ioremap(), but
158  * we need to directly access the spinloop if its in lowmem.
159  */
160  ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
161 
162  /* Map the spin table */
163  if (ioremappable)
164  spin_table = ioremap(*cpu_rel_addr,
165  sizeof(struct epapr_spin_table));
166  else
167  spin_table = phys_to_virt(*cpu_rel_addr);
168 
169  local_irq_save(flags);
170 #ifdef CONFIG_PPC32
171 #ifdef CONFIG_HOTPLUG_CPU
172  /* Corresponding to generic_set_cpu_dead() */
173  generic_set_cpu_up(nr);
174 
175  if (system_state == SYSTEM_RUNNING) {
176  out_be32(&spin_table->addr_l, 0);
177 
178  /*
179  * We don't set the BPTR register here since it already points
180  * to the boot page properly.
181  */
182  mpic_reset_core(hw_cpu);
183 
184  /* wait until core is ready... */
185  if (!spin_event_timeout(in_be32(&spin_table->addr_l) == 1,
186  10000, 100)) {
187  pr_err("%s: timeout waiting for core %d to reset\n",
188  __func__, hw_cpu);
189  ret = -ENOENT;
190  goto out;
191  }
192 
193  /* clear the acknowledge status */
194  __secondary_hold_acknowledge = -1;
195  }
196 #endif
197  out_be32(&spin_table->pir, hw_cpu);
198  out_be32(&spin_table->addr_l, __pa(__early_start));
199 
200  if (!ioremappable)
201  flush_dcache_range((ulong)spin_table,
202  (ulong)spin_table + sizeof(struct epapr_spin_table));
203 
204  /* Wait a bit for the CPU to ack. */
205  if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
206  10000, 100)) {
207  pr_err("%s: timeout waiting for core %d to ack\n",
208  __func__, hw_cpu);
209  ret = -ENOENT;
210  goto out;
211  }
212 out:
213 #else
214  smp_generic_kick_cpu(nr);
215 
216  out_be32(&spin_table->pir, hw_cpu);
217  out_be64((u64 *)(&spin_table->addr_h),
218  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
219 
220  if (!ioremappable)
221  flush_dcache_range((ulong)spin_table,
222  (ulong)spin_table + sizeof(struct epapr_spin_table));
223 #endif
224 
225  local_irq_restore(flags);
226 
227  if (ioremappable)
228  iounmap(spin_table);
229 
230  return ret;
231 }
232 
233 struct smp_ops_t smp_85xx_ops = {
234  .kick_cpu = smp_85xx_kick_cpu,
235 #ifdef CONFIG_HOTPLUG_CPU
236  .cpu_disable = generic_cpu_disable,
237  .cpu_die = generic_cpu_die,
238 #endif
239 #ifdef CONFIG_KEXEC
240  .give_timebase = smp_generic_give_timebase,
241  .take_timebase = smp_generic_take_timebase,
242 #endif
243 };
244 
245 #ifdef CONFIG_KEXEC
246 atomic_t kexec_down_cpus = ATOMIC_INIT(0);
247 
248 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
249 {
251 
252  if (secondary) {
253  atomic_inc(&kexec_down_cpus);
254  /* loop forever */
255  while (1);
256  }
257 }
258 
259 static void mpc85xx_smp_kexec_down(void *arg)
260 {
261  if (ppc_md.kexec_cpu_down)
262  ppc_md.kexec_cpu_down(0,1);
263 }
264 
265 static void map_and_flush(unsigned long paddr)
266 {
267  struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
268  unsigned long kaddr = (unsigned long)kmap(page);
269 
270  flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
271  kunmap(page);
272 }
273 
280 static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image)
281 {
282  kimage_entry_t *ptr, entry;
283  unsigned long paddr;
284  int i;
285 
286  if (image->type == KEXEC_TYPE_DEFAULT) {
287  /* normal kexec images are stored in temporary pages */
288  for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
289  ptr = (entry & IND_INDIRECTION) ?
290  phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
291  if (!(entry & IND_DESTINATION)) {
292  map_and_flush(entry);
293  }
294  }
295  /* flush out last IND_DONE page */
296  map_and_flush(entry);
297  } else {
298  /* crash type kexec images are copied to the crash region */
299  for (i = 0; i < image->nr_segments; i++) {
300  struct kexec_segment *seg = &image->segment[i];
301  for (paddr = seg->mem; paddr < seg->mem + seg->memsz;
302  paddr += PAGE_SIZE) {
303  map_and_flush(paddr);
304  }
305  }
306  }
307 
308  /* also flush the kimage struct to be passed in as well */
309  flush_dcache_range((unsigned long)image,
310  (unsigned long)image + sizeof(*image));
311 }
312 
313 static void mpc85xx_smp_machine_kexec(struct kimage *image)
314 {
315  int timeout = INT_MAX;
316  int i, num_cpus = num_present_cpus();
317 
318  mpc85xx_smp_flush_dcache_kexec(image);
319 
320  if (image->type == KEXEC_TYPE_DEFAULT)
321  smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
322 
323  while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) &&
324  ( timeout > 0 ) )
325  {
326  timeout--;
327  }
328 
329  if ( !timeout )
330  printk(KERN_ERR "Unable to bring down secondary cpu(s)");
331 
333  {
334  if ( i == smp_processor_id() ) continue;
335  mpic_reset_core(i);
336  }
337 
338  default_machine_kexec(image);
339 }
340 #endif /* CONFIG_KEXEC */
341 
342 static void __cpuinit smp_85xx_setup_cpu(int cpu_nr)
343 {
344  if (smp_85xx_ops.probe == smp_mpic_probe)
346 
349 }
350 
351 static const struct of_device_id mpc85xx_smp_guts_ids[] = {
352  { .compatible = "fsl,mpc8572-guts", },
353  { .compatible = "fsl,p1020-guts", },
354  { .compatible = "fsl,p1021-guts", },
355  { .compatible = "fsl,p1022-guts", },
356  { .compatible = "fsl,p1023-guts", },
357  { .compatible = "fsl,p2020-guts", },
358  {},
359 };
360 
362 {
363  struct device_node *np;
364 
365  smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
366 
367  np = of_find_node_by_type(NULL, "open-pic");
368  if (np) {
369  smp_85xx_ops.probe = smp_mpic_probe;
370  smp_85xx_ops.message_pass = smp_mpic_message_pass;
371  }
372 
374  /*
375  * If left NULL, .message_pass defaults to
376  * smp_muxed_ipi_message_pass
377  */
378  smp_85xx_ops.message_pass = NULL;
379  smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
380  }
381 
382  np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
383  if (np) {
384  guts = of_iomap(np, 0);
385  of_node_put(np);
386  if (!guts) {
387  pr_err("%s: Could not map guts node address\n",
388  __func__);
389  return;
390  }
391  smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
392  smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
393 #ifdef CONFIG_HOTPLUG_CPU
394  ppc_md.cpu_die = smp_85xx_mach_cpu_die;
395 #endif
396  }
397 
399 
400 #ifdef CONFIG_KEXEC
401  ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down;
402  ppc_md.machine_kexec = mpc85xx_smp_machine_kexec;
403 #endif
404 }