Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
setup.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2010 Analog Devices Inc.
3  *
4  * Licensed under the GPL-2 or later.
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/console.h>
9 #include <linux/bootmem.h>
10 #include <linux/seq_file.h>
11 #include <linux/cpu.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/tty.h>
15 #include <linux/pfn.h>
16 
17 #ifdef CONFIG_MTD_UCLINUX
18 #include <linux/mtd/map.h>
19 #include <linux/ext2_fs.h>
20 #include <linux/cramfs_fs.h>
21 #include <linux/romfs_fs.h>
22 #endif
23 
24 #include <asm/cplb.h>
25 #include <asm/cacheflush.h>
26 #include <asm/blackfin.h>
27 #include <asm/cplbinit.h>
28 #include <asm/clocks.h>
29 #include <asm/div64.h>
30 #include <asm/cpu.h>
31 #include <asm/fixed_code.h>
32 #include <asm/early_printk.h>
33 #include <asm/irq_handler.h>
34 #include <asm/pda.h>
35 #ifdef CONFIG_BF60x
36 #include <mach/pm.h>
37 #endif
38 
41 
43 unsigned long _rambase, _ramstart, _ramend;
44 unsigned long reserved_mem_dcache_on;
45 unsigned long reserved_mem_icache_on;
51 
52 #ifdef CONFIG_MTD_UCLINUX
53 extern struct map_info uclinux_ram_map;
55 EXPORT_SYMBOL(memory_mtd_end);
56 EXPORT_SYMBOL(memory_mtd_start);
57 EXPORT_SYMBOL(mtd_size);
58 #endif
59 
62 
63 /* boot memmap, for parsing "memmap=" */
64 #define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */
65 #define BFIN_MEMMAP_RAM 1
66 #define BFIN_MEMMAP_RESERVED 2
67 static struct bfin_memmap {
68  int nr_map;
70  unsigned long long addr; /* start of memory segment */
71  unsigned long long size;
72  unsigned long type;
74 } bfin_memmap __initdata;
75 
76 /* for memmap sanitization */
77 struct change_member {
78  struct bfin_memmap_entry *pentry; /* pointer to original entry */
79  unsigned long long addr; /* address for this change point */
80 };
81 static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata;
82 static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
83 static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
84 static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
85 
87 
88 static int early_init_clkin_hz(char *buf);
89 
90 #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
91 void __init generate_cplb_tables(void)
92 {
93  unsigned int cpu;
94 
96  /* Generate per-CPU I&D CPLB tables */
97  for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
99 }
100 #endif
101 
102 void __cpuinit bfin_setup_caches(unsigned int cpu)
103 {
104 #ifdef CONFIG_BFIN_ICACHE
106 #endif
107 
108 #ifdef CONFIG_BFIN_DCACHE
110 #endif
111 
112  bfin_setup_cpudata(cpu);
113 
114  /*
115  * In cache coherence emulation mode, we need to have the
116  * D-cache enabled before running any atomic operation which
117  * might involve cache invalidation (i.e. spinlock, rwlock).
118  * So printk's are deferred until then.
119  */
120 #ifdef CONFIG_BFIN_ICACHE
121  printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
122  printk(KERN_INFO " External memory:"
123 # ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
124  " cacheable"
125 # else
126  " uncacheable"
127 # endif
128  " in instruction cache\n");
129  if (L2_LENGTH)
130  printk(KERN_INFO " L2 SRAM :"
131 # ifdef CONFIG_BFIN_L2_ICACHEABLE
132  " cacheable"
133 # else
134  " uncacheable"
135 # endif
136  " in instruction cache\n");
137 
138 #else
139  printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
140 #endif
141 
142 #ifdef CONFIG_BFIN_DCACHE
143  printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
144  printk(KERN_INFO " External memory:"
145 # if defined CONFIG_BFIN_EXTMEM_WRITEBACK
146  " cacheable (write-back)"
147 # elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
148  " cacheable (write-through)"
149 # else
150  " uncacheable"
151 # endif
152  " in data cache\n");
153  if (L2_LENGTH)
154  printk(KERN_INFO " L2 SRAM :"
155 # if defined CONFIG_BFIN_L2_WRITEBACK
156  " cacheable (write-back)"
157 # elif defined CONFIG_BFIN_L2_WRITETHROUGH
158  " cacheable (write-through)"
159 # else
160  " uncacheable"
161 # endif
162  " in data cache\n");
163 #else
164  printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
165 #endif
166 }
167 
168 void __cpuinit bfin_setup_cpudata(unsigned int cpu)
169 {
170  struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
171 
172  cpudata->imemctl = bfin_read_IMEM_CONTROL();
173  cpudata->dmemctl = bfin_read_DMEM_CONTROL();
174 }
175 
177 {
178 #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
179  generate_cplb_tables();
180 #endif
182 }
183 
185 {
186  unsigned long text_l1_len = (unsigned long)_text_l1_len;
187  unsigned long data_l1_len = (unsigned long)_data_l1_len;
188  unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
189  unsigned long l2_len = (unsigned long)_l2_len;
190 
192 
193  /*
194  * due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S
195  * we know that everything about l1 text/data is nice and aligned,
196  * so copy by 4 byte chunks, and don't worry about overlapping
197  * src/dest.
198  *
199  * We can't use the dma_memcpy functions, since they can call
200  * scheduler functions which might be in L1 :( and core writes
201  * into L1 instruction cause bad access errors, so we are stuck,
202  * we are required to use DMA, but can't use the common dma
203  * functions. We can't use memcpy either - since that might be
204  * going to be in the relocated L1
205  */
206 
208 
209  /* if necessary, copy L1 text to L1 instruction SRAM */
210  if (L1_CODE_LENGTH && text_l1_len)
211  early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len);
212 
213  /* if necessary, copy L1 data to L1 data bank A SRAM */
214  if (L1_DATA_A_LENGTH && data_l1_len)
215  early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len);
216 
217  /* if necessary, copy L1 data B to L1 data bank B SRAM */
218  if (L1_DATA_B_LENGTH && data_b_l1_len)
220 
222 
223 #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
224  blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1;
225 #endif
226 
227  /* if necessary, copy L2 text/data to L2 SRAM */
228  if (L2_LENGTH && l2_len)
229  memcpy(_stext_l2, _l2_lma, l2_len);
230 }
231 
232 #ifdef CONFIG_SMP
234 {
235  unsigned long text_l1_len = (unsigned long)_text_l1_len;
236  unsigned long data_l1_len = (unsigned long)_data_l1_len;
237  unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
238 
240 
241  /* if necessary, copy L1 text to L1 instruction SRAM */
242  if (L1_CODE_LENGTH && text_l1_len)
243  early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma,
244  text_l1_len);
245 
246  /* if necessary, copy L1 data to L1 data bank A SRAM */
247  if (L1_DATA_A_LENGTH && data_l1_len)
248  early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma,
249  data_l1_len);
250 
251  /* if necessary, copy L1 data B to L1 data bank B SRAM */
252  if (L1_DATA_B_LENGTH && data_b_l1_len)
253  early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma,
254  data_b_l1_len);
255 
257 
258 #ifdef CONFIG_ICACHE_FLUSH_L1
259  blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 -
260  (unsigned long)_stext_l1 + COREB_L1_CODE_START;
261 #endif
262 }
263 #endif
264 
265 #ifdef CONFIG_ROMKERNEL
266 void __init bfin_relocate_xip_data(void)
267 {
269 
270  memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info));
271  memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len);
272 }
273 #endif
274 
275 /* add_memory_region to memmap */
276 static void __init add_memory_region(unsigned long long start,
277  unsigned long long size, int type)
278 {
279  int i;
280 
281  i = bfin_memmap.nr_map;
282 
283  if (i == BFIN_MEMMAP_MAX) {
284  printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
285  return;
286  }
287 
288  bfin_memmap.map[i].addr = start;
289  bfin_memmap.map[i].size = size;
290  bfin_memmap.map[i].type = type;
291  bfin_memmap.nr_map++;
292 }
293 
294 /*
295  * Sanitize the boot memmap, removing overlaps.
296  */
297 static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
298 {
299  struct change_member *change_tmp;
300  unsigned long current_type, last_type;
301  unsigned long long last_addr;
302  int chgidx, still_changing;
303  int overlap_entries;
304  int new_entry;
305  int old_nr, new_nr, chg_nr;
306  int i;
307 
308  /*
309  Visually we're performing the following (1,2,3,4 = memory types)
310 
311  Sample memory map (w/overlaps):
312  ____22__________________
313  ______________________4_
314  ____1111________________
315  _44_____________________
316  11111111________________
317  ____________________33__
318  ___________44___________
319  __________33333_________
320  ______________22________
321  ___________________2222_
322  _________111111111______
323  _____________________11_
324  _________________4______
325 
326  Sanitized equivalent (no overlap):
327  1_______________________
328  _44_____________________
329  ___1____________________
330  ____22__________________
331  ______11________________
332  _________1______________
333  __________3_____________
334  ___________44___________
335  _____________33_________
336  _______________2________
337  ________________1_______
338  _________________4______
339  ___________________2____
340  ____________________33__
341  ______________________4_
342  */
343  /* if there's only one memory region, don't bother */
344  if (*pnr_map < 2)
345  return -1;
346 
347  old_nr = *pnr_map;
348 
349  /* bail out if we find any unreasonable addresses in memmap */
350  for (i = 0; i < old_nr; i++)
351  if (map[i].addr + map[i].size < map[i].addr)
352  return -1;
353 
354  /* create pointers for initial change-point information (for sorting) */
355  for (i = 0; i < 2*old_nr; i++)
356  change_point[i] = &change_point_list[i];
357 
358  /* record all known change-points (starting and ending addresses),
359  omitting those that are for empty memory regions */
360  chgidx = 0;
361  for (i = 0; i < old_nr; i++) {
362  if (map[i].size != 0) {
363  change_point[chgidx]->addr = map[i].addr;
364  change_point[chgidx++]->pentry = &map[i];
365  change_point[chgidx]->addr = map[i].addr + map[i].size;
366  change_point[chgidx++]->pentry = &map[i];
367  }
368  }
369  chg_nr = chgidx; /* true number of change-points */
370 
371  /* sort change-point list by memory addresses (low -> high) */
372  still_changing = 1;
373  while (still_changing) {
374  still_changing = 0;
375  for (i = 1; i < chg_nr; i++) {
376  /* if <current_addr> > <last_addr>, swap */
377  /* or, if current=<start_addr> & last=<end_addr>, swap */
378  if ((change_point[i]->addr < change_point[i-1]->addr) ||
379  ((change_point[i]->addr == change_point[i-1]->addr) &&
380  (change_point[i]->addr == change_point[i]->pentry->addr) &&
381  (change_point[i-1]->addr != change_point[i-1]->pentry->addr))
382  ) {
383  change_tmp = change_point[i];
384  change_point[i] = change_point[i-1];
385  change_point[i-1] = change_tmp;
386  still_changing = 1;
387  }
388  }
389  }
390 
391  /* create a new memmap, removing overlaps */
392  overlap_entries = 0; /* number of entries in the overlap table */
393  new_entry = 0; /* index for creating new memmap entries */
394  last_type = 0; /* start with undefined memory type */
395  last_addr = 0; /* start with 0 as last starting address */
396  /* loop through change-points, determining affect on the new memmap */
397  for (chgidx = 0; chgidx < chg_nr; chgidx++) {
398  /* keep track of all overlapping memmap entries */
399  if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) {
400  /* add map entry to overlap list (> 1 entry implies an overlap) */
401  overlap_list[overlap_entries++] = change_point[chgidx]->pentry;
402  } else {
403  /* remove entry from list (order independent, so swap with last) */
404  for (i = 0; i < overlap_entries; i++) {
405  if (overlap_list[i] == change_point[chgidx]->pentry)
406  overlap_list[i] = overlap_list[overlap_entries-1];
407  }
408  overlap_entries--;
409  }
410  /* if there are overlapping entries, decide which "type" to use */
411  /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
412  current_type = 0;
413  for (i = 0; i < overlap_entries; i++)
414  if (overlap_list[i]->type > current_type)
415  current_type = overlap_list[i]->type;
416  /* continue building up new memmap based on this information */
417  if (current_type != last_type) {
418  if (last_type != 0) {
419  new_map[new_entry].size =
420  change_point[chgidx]->addr - last_addr;
421  /* move forward only if the new size was non-zero */
422  if (new_map[new_entry].size != 0)
423  if (++new_entry >= BFIN_MEMMAP_MAX)
424  break; /* no more space left for new entries */
425  }
426  if (current_type != 0) {
427  new_map[new_entry].addr = change_point[chgidx]->addr;
428  new_map[new_entry].type = current_type;
429  last_addr = change_point[chgidx]->addr;
430  }
431  last_type = current_type;
432  }
433  }
434  new_nr = new_entry; /* retain count for new entries */
435 
436  /* copy new mapping into original location */
437  memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
438  *pnr_map = new_nr;
439 
440  return 0;
441 }
442 
443 static void __init print_memory_map(char *who)
444 {
445  int i;
446 
447  for (i = 0; i < bfin_memmap.nr_map; i++) {
448  printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who,
449  bfin_memmap.map[i].addr,
450  bfin_memmap.map[i].addr + bfin_memmap.map[i].size);
451  switch (bfin_memmap.map[i].type) {
452  case BFIN_MEMMAP_RAM:
453  printk(KERN_CONT "(usable)\n");
454  break;
456  printk(KERN_CONT "(reserved)\n");
457  break;
458  default:
459  printk(KERN_CONT "type %lu\n", bfin_memmap.map[i].type);
460  break;
461  }
462  }
463 }
464 
465 static __init int parse_memmap(char *arg)
466 {
467  unsigned long long start_at, mem_size;
468 
469  if (!arg)
470  return -EINVAL;
471 
472  mem_size = memparse(arg, &arg);
473  if (*arg == '@') {
474  start_at = memparse(arg+1, &arg);
475  add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM);
476  } else if (*arg == '$') {
477  start_at = memparse(arg+1, &arg);
478  add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED);
479  }
480 
481  return 0;
482 }
483 
484 /*
485  * Initial parsing of the command line. Currently, we support:
486  * - Controlling the linux memory size: mem=xxx[KMG]
487  * - Controlling the physical memory size: max_mem=xxx[KMG][$][#]
488  * $ -> reserved memory is dcacheable
489  * # -> reserved memory is icacheable
490  * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
491  * @ from <start> to <start>+<mem>, type RAM
492  * $ from <start> to <start>+<mem>, type RESERVED
493  */
494 static __init void parse_cmdline_early(char *cmdline_p)
495 {
496  char c = ' ', *to = cmdline_p;
497  unsigned int memsize;
498  for (;;) {
499  if (c == ' ') {
500  if (!memcmp(to, "mem=", 4)) {
501  to += 4;
502  memsize = memparse(to, &to);
503  if (memsize)
504  _ramend = memsize;
505 
506  } else if (!memcmp(to, "max_mem=", 8)) {
507  to += 8;
508  memsize = memparse(to, &to);
509  if (memsize) {
511  if (*to != ' ') {
512  if (*to == '$'
513  || *(to + 1) == '$')
515  if (*to == '#'
516  || *(to + 1) == '#')
518  }
519  }
520  } else if (!memcmp(to, "clkin_hz=", 9)) {
521  to += 9;
522  early_init_clkin_hz(to);
523 #ifdef CONFIG_EARLY_PRINTK
524  } else if (!memcmp(to, "earlyprintk=", 12)) {
525  to += 12;
526  setup_early_printk(to);
527 #endif
528  } else if (!memcmp(to, "memmap=", 7)) {
529  to += 7;
530  parse_memmap(to);
531  }
532  }
533  c = *(to++);
534  if (!c)
535  break;
536  }
537 }
538 
539 /*
540  * Setup memory defaults from user config.
541  * The physical memory layout looks like:
542  *
543  * [_rambase, _ramstart]: kernel image
544  * [memory_start, memory_end]: dynamic memory managed by kernel
545  * [memory_end, _ramend]: reserved memory
546  * [memory_mtd_start(memory_end),
547  * memory_mtd_start + mtd_size]: rootfs (if any)
548  * [_ramend - DMA_UNCACHED_REGION,
549  * _ramend]: uncached DMA region
550  * [_ramend, physical_mem_end]: memory not managed by kernel
551  */
552 static __init void memory_setup(void)
553 {
554 #ifdef CONFIG_MTD_UCLINUX
555  unsigned long mtd_phys = 0;
556 #endif
557  unsigned long max_mem;
558 
559  _rambase = CONFIG_BOOT_LOAD;
560  _ramstart = (unsigned long)_end;
561 
563  console_init();
564  panic("DMA region exceeds memory limit: %lu.",
565  _ramend - _ramstart);
566  }
567  max_mem = memory_end = _ramend - DMA_UNCACHED_REGION;
568 
569 #if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
570  /* Due to a Hardware Anomaly we need to limit the size of usable
571  * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
572  * 05000263 - Hardware loop corrupted when taking an ICPLB exception
573  */
574 # if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
575  if (max_mem >= 56 * 1024 * 1024)
576  max_mem = 56 * 1024 * 1024;
577 # else
578  if (max_mem >= 60 * 1024 * 1024)
579  max_mem = 60 * 1024 * 1024;
580 # endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */
581 #endif /* ANOMALY_05000263 */
582 
583 
584 #ifdef CONFIG_MPU
585  /* Round up to multiple of 4MB */
586  memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
587 #else
589 #endif
590 
591 #if defined(CONFIG_MTD_UCLINUX)
592  /* generic memory mapped MTD driver */
594 
595  mtd_phys = _ramstart;
596  mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
597 
598 # if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
599  if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC)
600  mtd_size =
601  PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10);
602 # endif
603 
604 # if defined(CONFIG_CRAMFS)
605  if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC)
606  mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4)));
607 # endif
608 
609 # if defined(CONFIG_ROMFS_FS)
610  if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0
611  && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) {
612  mtd_size =
613  PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
614 
615  /* ROM_FS is XIP, so if we found it, we need to limit memory */
616  if (memory_end > max_mem) {
617  pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n",
618  (max_mem - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
619  memory_end = max_mem;
620  }
621  }
622 # endif /* CONFIG_ROMFS_FS */
623 
624  /* Since the default MTD_UCLINUX has no magic number, we just blindly
625  * read 8 past the end of the kernel's image, and look at it.
626  * When no image is attached, mtd_size is set to a random number
627  * Do some basic sanity checks before operating on things
628  */
629  if (mtd_size == 0 || memory_end <= mtd_size) {
630  pr_emerg("Could not find valid ram mtd attached.\n");
631  } else {
632  memory_end -= mtd_size;
633 
634  /* Relocate MTD image to the top of memory after the uncached memory area */
636  uclinux_ram_map.size = mtd_size;
637  pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n",
638  _end, mtd_size, (void *)memory_mtd_start);
639  dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
640  }
641 #endif /* CONFIG_MTD_UCLINUX */
642 
643  /* We need lo limit memory, since everything could have a text section
644  * of userspace in it, and expose anomaly 05000263. If the anomaly
645  * doesn't exist, or we don't need to - then dont.
646  */
647  if (memory_end > max_mem) {
648  pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n",
649  (max_mem - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
650  memory_end = max_mem;
651  }
652 
653 #ifdef CONFIG_MPU
654 #if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
656  ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32;
657 #else
658  page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
659 #endif
660  page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
661 #endif
662 
663  init_mm.start_code = (unsigned long)_stext;
664  init_mm.end_code = (unsigned long)_etext;
665  init_mm.end_data = (unsigned long)_edata;
666  init_mm.brk = (unsigned long)0;
667 
668  printk(KERN_INFO "Board Memory: %ldMB\n", (physical_mem_end - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
669  printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", (_ramend - CONFIG_PHY_RAM_BASE_ADDRESS) >> 20);
670 
671  printk(KERN_INFO "Memory map:\n"
672  " fixedcode = 0x%p-0x%p\n"
673  " text = 0x%p-0x%p\n"
674  " rodata = 0x%p-0x%p\n"
675  " bss = 0x%p-0x%p\n"
676  " data = 0x%p-0x%p\n"
677  " stack = 0x%p-0x%p\n"
678  " init = 0x%p-0x%p\n"
679  " available = 0x%p-0x%p\n"
680 #ifdef CONFIG_MTD_UCLINUX
681  " rootfs = 0x%p-0x%p\n"
682 #endif
683 #if DMA_UNCACHED_REGION > 0
684  " DMA Zone = 0x%p-0x%p\n"
685 #endif
686  , (void *)FIXED_CODE_START, (void *)FIXED_CODE_END,
687  _stext, _etext,
688  __start_rodata, __end_rodata,
689  __bss_start, __bss_stop,
690  _sdata, _edata,
691  (void *)&init_thread_union,
692  (void *)((int)(&init_thread_union) + THREAD_SIZE),
693  __init_begin, __init_end,
694  (void *)_ramstart, (void *)memory_end
695 #ifdef CONFIG_MTD_UCLINUX
696  , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
697 #endif
698 #if DMA_UNCACHED_REGION > 0
699  , (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend)
700 #endif
701  );
702 }
703 
704 /*
705  * Find the lowest, highest page frame number we have available
706  */
708 {
709  int i;
710 
711  max_pfn = 0;
713 
714  for (i = 0; i < bfin_memmap.nr_map; i++) {
715  unsigned long start, end;
716  /* RAM? */
717  if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
718  continue;
719  start = PFN_UP(bfin_memmap.map[i].addr);
720  end = PFN_DOWN(bfin_memmap.map[i].addr +
721  bfin_memmap.map[i].size);
722  if (start >= end)
723  continue;
724  if (end > max_pfn)
725  max_pfn = end;
726  if (start < min_low_pfn)
727  min_low_pfn = start;
728  }
729 }
730 
731 static __init void setup_bootmem_allocator(void)
732 {
733  int bootmap_size;
734  int i;
735  unsigned long start_pfn, end_pfn;
736  unsigned long curr_pfn, last_pfn, size;
737 
738  /* mark memory between memory_start and memory_end usable */
739  add_memory_region(memory_start,
741  /* sanity check for overlap */
742  sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
743  print_memory_map("boot memmap");
744 
745  /* initialize globals in linux/bootmem.h */
747  /* pfn of the last usable page frame */
748  if (max_pfn > memory_end >> PAGE_SHIFT)
750  /* pfn of last page frame directly mapped by kernel */
752  /* pfn of the first usable page frame after kernel image*/
753  if (min_low_pfn < memory_start >> PAGE_SHIFT)
756  end_pfn = memory_end >> PAGE_SHIFT;
757 
758  /*
759  * give all the memory to the bootmap allocator, tell it to put the
760  * boot mem_map at the start of memory.
761  */
762  bootmap_size = init_bootmem_node(NODE_DATA(0),
763  memory_start >> PAGE_SHIFT, /* map goes here */
764  start_pfn, end_pfn);
765 
766  /* register the memmap regions with the bootmem allocator */
767  for (i = 0; i < bfin_memmap.nr_map; i++) {
768  /*
769  * Reserve usable memory
770  */
771  if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
772  continue;
773  /*
774  * We are rounding up the start address of usable memory:
775  */
776  curr_pfn = PFN_UP(bfin_memmap.map[i].addr);
777  if (curr_pfn >= end_pfn)
778  continue;
779  /*
780  * ... and at the end of the usable range downwards:
781  */
782  last_pfn = PFN_DOWN(bfin_memmap.map[i].addr +
783  bfin_memmap.map[i].size);
784 
785  if (last_pfn > end_pfn)
786  last_pfn = end_pfn;
787 
788  /*
789  * .. finally, did all the rounding and playing
790  * around just make the area go away?
791  */
792  if (last_pfn <= curr_pfn)
793  continue;
794 
795  size = last_pfn - curr_pfn;
796  free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
797  }
798 
799  /* reserve memory before memory_start, including bootmap */
801  memory_start + bootmap_size + PAGE_SIZE - 1 - CONFIG_PHY_RAM_BASE_ADDRESS,
803 }
804 
805 #define EBSZ_TO_MEG(ebsz) \
806 ({ \
807  int meg = 0; \
808  switch (ebsz & 0xf) { \
809  case 0x1: meg = 16; break; \
810  case 0x3: meg = 32; break; \
811  case 0x5: meg = 64; break; \
812  case 0x7: meg = 128; break; \
813  case 0x9: meg = 256; break; \
814  case 0xb: meg = 512; break; \
815  } \
816  meg; \
817 })
818 static inline int __init get_mem_size(void)
819 {
820 #if defined(EBIU_SDBCTL)
821 # if defined(BF561_FAMILY)
822  int ret = 0;
823  u32 sdbctl = bfin_read_EBIU_SDBCTL();
824  ret += EBSZ_TO_MEG(sdbctl >> 0);
825  ret += EBSZ_TO_MEG(sdbctl >> 8);
826  ret += EBSZ_TO_MEG(sdbctl >> 16);
827  ret += EBSZ_TO_MEG(sdbctl >> 24);
828  return ret;
829 # else
831 # endif
832 #elif defined(EBIU_DDRCTL1)
833  u32 ddrctl = bfin_read_EBIU_DDRCTL1();
834  int ret = 0;
835  switch (ddrctl & 0xc0000) {
836  case DEVSZ_64:
837  ret = 64 / 8;
838  break;
839  case DEVSZ_128:
840  ret = 128 / 8;
841  break;
842  case DEVSZ_256:
843  ret = 256 / 8;
844  break;
845  case DEVSZ_512:
846  ret = 512 / 8;
847  break;
848  }
849  switch (ddrctl & 0x30000) {
850  case DEVWD_4:
851  ret *= 2;
852  case DEVWD_8:
853  ret *= 2;
854  case DEVWD_16:
855  break;
856  }
857  if ((ddrctl & 0xc000) == 0x4000)
858  ret *= 2;
859  return ret;
860 #elif defined(CONFIG_BF60x)
861  u32 ddrctl = bfin_read_DMC0_CFG();
862  int ret;
863  switch (ddrctl & 0xf00) {
864  case DEVSZ_64:
865  ret = 64 / 8;
866  break;
867  case DEVSZ_128:
868  ret = 128 / 8;
869  break;
870  case DEVSZ_256:
871  ret = 256 / 8;
872  break;
873  case DEVSZ_512:
874  ret = 512 / 8;
875  break;
876  case DEVSZ_1G:
877  ret = 1024 / 8;
878  break;
879  case DEVSZ_2G:
880  ret = 2048 / 8;
881  break;
882  }
883  return ret;
884 #endif
885  BUG();
886 }
887 
890 {
891 }
892 
893 #ifdef CONFIG_BF60x
894 static inline u_long bfin_get_clk(char *name)
895 {
896  struct clk *clk;
897  u_long clk_rate;
898 
899  clk = clk_get(NULL, name);
900  if (IS_ERR(clk))
901  return 0;
902 
903  clk_rate = clk_get_rate(clk);
904  clk_put(clk);
905  return clk_rate;
906 }
907 #endif
908 
909 void __init setup_arch(char **cmdline_p)
910 {
911  u32 mmr;
912  unsigned long sclk, cclk;
913 
915 
917 
918  /* Check to make sure we are running on the right processor */
919  mmr = bfin_cpuid();
920  if (unlikely(CPUID != bfin_cpuid()))
921  printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
922  CPU, bfin_cpuid(), bfin_revid());
923 
924 #ifdef CONFIG_DUMMY_CONSOLE
926 #endif
927 
928 #if defined(CONFIG_CMDLINE_BOOL)
929  strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line));
930  command_line[sizeof(command_line) - 1] = 0;
931 #endif
932 
933  /* Keep a copy of command line */
934  *cmdline_p = &command_line[0];
937 
938  memset(&bfin_memmap, 0, sizeof(bfin_memmap));
939 
940 #ifdef CONFIG_BF60x
941  /* Should init clock device before parse command early */
942  clk_init();
943 #endif
944  /* If the user does not specify things on the command line, use
945  * what the bootloader set things up as
946  */
947  physical_mem_end = 0;
948  parse_cmdline_early(&command_line[0]);
949 
950  if (_ramend == 0)
951  _ramend = get_mem_size() * 1024 * 1024;
952 
953  if (physical_mem_end == 0)
955 
956  memory_setup();
957 
958 #ifndef CONFIG_BF60x
959  /* Initialize Async memory banks */
963 #ifdef CONFIG_EBIU_MBSCTLVAL
964  bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL);
965  bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL);
966  bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
967 #endif
968 #endif
969 #ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
970  bfin_write_PORTF_HYSTERESIS(HYST_PORTF_0_15);
971  bfin_write_PORTG_HYSTERESIS(HYST_PORTG_0_15);
972  bfin_write_PORTH_HYSTERESIS(HYST_PORTH_0_15);
974  ~HYST_NONEGPIO_MASK) | HYST_NONEGPIO);
975 #endif
976 
977  cclk = get_cclk();
978  sclk = get_sclk();
979 
980  if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk)
981  panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
982 
983 #ifdef BF561_FAMILY
984  if (ANOMALY_05000266) {
987  }
988 #endif
989 
990  mmr = bfin_read_TBUFCTL();
991  printk(KERN_INFO "Hardware Trace %s and %sabled\n",
992  (mmr & 0x1) ? "active" : "off",
993  (mmr & 0x2) ? "en" : "dis");
994 #ifndef CONFIG_BF60x
995  mmr = bfin_read_SYSCR();
996  printk(KERN_INFO "Boot Mode: %i\n", mmr & 0xF);
997 
998  /* Newer parts mirror SWRST bits in SYSCR */
999 #if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
1000  defined(CONFIG_BF538) || defined(CONFIG_BF539)
1002 #else
1003  /* Clear boot mode field */
1004  _bfin_swrst = mmr & ~0xf;
1005 #endif
1006 
1007 #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
1009 #endif
1010 #ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET
1012 #endif
1013 
1014 #ifdef CONFIG_SMP
1016 #else
1017  if (_bfin_swrst & RESET_DOUBLE) {
1018 #endif
1019  printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
1020 #ifdef CONFIG_DEBUG_DOUBLEFAULT
1021  /* We assume the crashing kernel, and the current symbol table match */
1022  printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
1023  initial_pda.seqstat_doublefault & SEQSTAT_EXCAUSE,
1024  initial_pda.retx_doublefault);
1025  printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n",
1026  initial_pda.dcplb_doublefault_addr);
1027  printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n",
1028  initial_pda.icplb_doublefault_addr);
1029 #endif
1030  printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
1031  initial_pda.retx);
1032  } else if (_bfin_swrst & RESET_WDOG)
1033  printk(KERN_INFO "Recovering from Watchdog event\n");
1034  else if (_bfin_swrst & RESET_SOFTWARE)
1035  printk(KERN_NOTICE "Reset caused by Software reset\n");
1036 #endif
1037  printk(KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n");
1038  if (bfin_compiled_revid() == 0xffff)
1039  printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid());
1040  else if (bfin_compiled_revid() == -1)
1041  printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
1042  else
1043  printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
1044 
1045  if (likely(CPUID == bfin_cpuid())) {
1046  if (bfin_revid() != bfin_compiled_revid()) {
1047  if (bfin_compiled_revid() == -1)
1048  printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
1049  bfin_revid());
1050  else if (bfin_compiled_revid() != 0xffff) {
1051  printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
1052  bfin_compiled_revid(), bfin_revid());
1053  if (bfin_compiled_revid() > bfin_revid())
1054  panic("Error: you are missing anomaly workarounds for this rev");
1055  }
1056  }
1057  if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
1058  printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
1059  CPU, bfin_revid());
1060  }
1061 
1062  printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
1063 
1064 #ifdef CONFIG_BF60x
1065  printk(KERN_INFO "Processor Speed: %lu MHz core clock, %lu MHz SCLk, %lu MHz SCLK0, %lu MHz SCLK1 and %lu MHz DCLK\n",
1066  cclk / 1000000, bfin_get_clk("SYSCLK") / 1000000, get_sclk0() / 1000000, get_sclk1() / 1000000, get_dclk() / 1000000);
1067 #else
1068  printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
1069  cclk / 1000000, sclk / 1000000);
1070 #endif
1071 
1072  setup_bootmem_allocator();
1073 
1074  paging_init();
1075 
1076  /* Copy atomic sequences to their fixed location, and sanity check that
1077  these locations are the ones that we advertise to userspace. */
1078  memcpy((void *)FIXED_CODE_START, &fixed_code_start,
1080  BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start
1082  BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start
1084  BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start
1086  BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start
1088  BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start
1090  BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start
1092  BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start
1094  BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
1096  BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
1098 
1099 #ifdef CONFIG_SMP
1101 #endif
1103  bfin_cache_init(); /* Initialize caches for the boot CPU */
1104 }
1105 
1106 static int __init topology_init(void)
1107 {
1108  unsigned int cpu;
1109 
1110  for_each_possible_cpu(cpu) {
1111  register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
1112  }
1113 
1114  return 0;
1115 }
1116 
1117 subsys_initcall(topology_init);
1118 
1119 /* Get the input clock frequency */
1120 static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
1121 #ifndef CONFIG_BF60x
1122 static u_long get_clkin_hz(void)
1123 {
1124  return cached_clkin_hz;
1125 }
1126 #endif
1127 static int __init early_init_clkin_hz(char *buf)
1128 {
1129  cached_clkin_hz = simple_strtoul(buf, NULL, 0);
1130 #ifdef BFIN_KERNEL_CLOCK
1131  if (cached_clkin_hz != CONFIG_CLKIN_HZ)
1132  panic("cannot change clkin_hz when reprogramming clocks");
1133 #endif
1134  return 1;
1135 }
1136 early_param("clkin_hz=", early_init_clkin_hz);
1137 
1138 #ifndef CONFIG_BF60x
1139 /* Get the voltage input multiplier */
1140 static u_long get_vco(void)
1141 {
1142  static u_long cached_vco;
1143  u_long msel, pll_ctl;
1144 
1145  /* The assumption here is that VCO never changes at runtime.
1146  * If, someday, we support that, then we'll have to change this.
1147  */
1148  if (cached_vco)
1149  return cached_vco;
1150 
1151  pll_ctl = bfin_read_PLL_CTL();
1152  msel = (pll_ctl >> 9) & 0x3F;
1153  if (0 == msel)
1154  msel = 64;
1155 
1156  cached_vco = get_clkin_hz();
1157  cached_vco >>= (1 & pll_ctl); /* DF bit */
1158  cached_vco *= msel;
1159  return cached_vco;
1160 }
1161 #endif
1162 
1163 /* Get the Core clock */
1165 {
1166 #ifdef CONFIG_BF60x
1167  return bfin_get_clk("CCLK");
1168 #else
1169  static u_long cached_cclk_pll_div, cached_cclk;
1170  u_long csel, ssel;
1171 
1172  if (bfin_read_PLL_STAT() & 0x1)
1173  return get_clkin_hz();
1174 
1175  ssel = bfin_read_PLL_DIV();
1176  if (ssel == cached_cclk_pll_div)
1177  return cached_cclk;
1178  else
1179  cached_cclk_pll_div = ssel;
1180 
1181  csel = ((ssel >> 4) & 0x03);
1182  ssel &= 0xf;
1183  if (ssel && ssel < (1 << csel)) /* SCLK > CCLK */
1184  cached_cclk = get_vco() / ssel;
1185  else
1186  cached_cclk = get_vco() >> csel;
1187  return cached_cclk;
1188 #endif
1189 }
1191 
1192 #ifdef CONFIG_BF60x
1193 /* Get the bf60x clock of SCLK0 domain */
1194 u_long get_sclk0(void)
1195 {
1196  return bfin_get_clk("SCLK0");
1197 }
1198 EXPORT_SYMBOL(get_sclk0);
1199 
1200 /* Get the bf60x clock of SCLK1 domain */
1201 u_long get_sclk1(void)
1202 {
1203  return bfin_get_clk("SCLK1");
1204 }
1205 EXPORT_SYMBOL(get_sclk1);
1206 
1207 /* Get the bf60x DRAM clock */
1208 u_long get_dclk(void)
1209 {
1210  return bfin_get_clk("DCLK");
1211 }
1212 EXPORT_SYMBOL(get_dclk);
1213 #endif
1214 
1215 /* Get the default system clock */
1217 {
1218 #ifdef CONFIG_BF60x
1219  return get_sclk0();
1220 #else
1221  static u_long cached_sclk;
1222  u_long ssel;
1223 
1224  /* The assumption here is that SCLK never changes at runtime.
1225  * If, someday, we support that, then we'll have to change this.
1226  */
1227  if (cached_sclk)
1228  return cached_sclk;
1229 
1230  if (bfin_read_PLL_STAT() & 0x1)
1231  return get_clkin_hz();
1232 
1233  ssel = bfin_read_PLL_DIV() & 0xf;
1234  if (0 == ssel) {
1235  printk(KERN_WARNING "Invalid System Clock\n");
1236  ssel = 1;
1237  }
1238 
1239  cached_sclk = get_vco() / ssel;
1240  return cached_sclk;
1241 #endif
1242 }
1244 
1245 unsigned long sclk_to_usecs(unsigned long sclk)
1246 {
1247  u64 tmp = USEC_PER_SEC * (u64)sclk;
1248  do_div(tmp, get_sclk());
1249  return tmp;
1250 }
1252 
1253 unsigned long usecs_to_sclk(unsigned long usecs)
1254 {
1255  u64 tmp = get_sclk() * (u64)usecs;
1256  do_div(tmp, USEC_PER_SEC);
1257  return tmp;
1258 }
1260 
1261 /*
1262  * Get CPU information for use by the procfs.
1263  */
1264 static int show_cpuinfo(struct seq_file *m, void *v)
1265 {
1266  char *cpu, *mmu, *fpu, *vendor, *cache;
1267  uint32_t revid;
1268  int cpu_num = *(unsigned int *)v;
1269  u_long sclk, cclk;
1270  u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
1271  struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num);
1272 
1273  cpu = CPU;
1274  mmu = "none";
1275  fpu = "none";
1276  revid = bfin_revid();
1277 
1278  sclk = get_sclk();
1279  cclk = get_cclk();
1280 
1281  switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
1282  case 0xca:
1283  vendor = "Analog Devices";
1284  break;
1285  default:
1286  vendor = "unknown";
1287  break;
1288  }
1289 
1290  seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
1291 
1292  if (CPUID == bfin_cpuid())
1293  seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
1294  else
1295  seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n",
1296  CPUID, bfin_cpuid());
1297 
1298  seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
1299  "stepping\t: %d ",
1300  cpu, cclk/1000000, sclk/1000000,
1301 #ifdef CONFIG_MPU
1302  "mpu on",
1303 #else
1304  "mpu off",
1305 #endif
1306  revid);
1307 
1308  if (bfin_revid() != bfin_compiled_revid()) {
1309  if (bfin_compiled_revid() == -1)
1310  seq_printf(m, "(Compiled for Rev none)");
1311  else if (bfin_compiled_revid() == 0xffff)
1312  seq_printf(m, "(Compiled for Rev any)");
1313  else
1314  seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
1315  }
1316 
1317  seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
1318  cclk/1000000, cclk%1000000,
1319  sclk/1000000, sclk%1000000);
1320  seq_printf(m, "bogomips\t: %lu.%02lu\n"
1321  "Calibration\t: %lu loops\n",
1322  (loops_per_jiffy * HZ) / 500000,
1323  ((loops_per_jiffy * HZ) / 5000) % 100,
1324  (loops_per_jiffy * HZ));
1325 
1326  /* Check Cache configutation */
1327  switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
1328  case ACACHE_BSRAM:
1329  cache = "dbank-A/B\t: cache/sram";
1330  dcache_size = 16;
1331  dsup_banks = 1;
1332  break;
1333  case ACACHE_BCACHE:
1334  cache = "dbank-A/B\t: cache/cache";
1335  dcache_size = 32;
1336  dsup_banks = 2;
1337  break;
1338  case ASRAM_BSRAM:
1339  cache = "dbank-A/B\t: sram/sram";
1340  dcache_size = 0;
1341  dsup_banks = 0;
1342  break;
1343  default:
1344  cache = "unknown";
1345  dcache_size = 0;
1346  dsup_banks = 0;
1347  break;
1348  }
1349 
1350  /* Is it turned on? */
1351  if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
1352  dcache_size = 0;
1353 
1354  if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
1355  icache_size = 0;
1356 
1357  seq_printf(m, "cache size\t: %d KB(L1 icache) "
1358  "%d KB(L1 dcache) %d KB(L2 cache)\n",
1359  icache_size, dcache_size, 0);
1360  seq_printf(m, "%s\n", cache);
1361  seq_printf(m, "external memory\t: "
1362 #if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
1363  "cacheable"
1364 #else
1365  "uncacheable"
1366 #endif
1367  " in instruction cache\n");
1368  seq_printf(m, "external memory\t: "
1369 #if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
1370  "cacheable (write-back)"
1371 #elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
1372  "cacheable (write-through)"
1373 #else
1374  "uncacheable"
1375 #endif
1376  " in data cache\n");
1377 
1378  if (icache_size)
1379  seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
1381  else
1382  seq_printf(m, "icache setup\t: off\n");
1383 
1384  seq_printf(m,
1385  "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
1386  dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1387  BFIN_DLINES);
1388 #ifdef __ARCH_SYNC_CORE_DCACHE
1389  seq_printf(m, "dcache flushes\t: %lu\n", dcache_invld_count[cpu_num]);
1390 #endif
1391 #ifdef __ARCH_SYNC_CORE_ICACHE
1392  seq_printf(m, "icache flushes\t: %lu\n", icache_invld_count[cpu_num]);
1393 #endif
1394 
1395  seq_printf(m, "\n");
1396 
1397  if (cpu_num != num_possible_cpus() - 1)
1398  return 0;
1399 
1400  if (L2_LENGTH) {
1401  seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
1402  seq_printf(m, "L2 SRAM\t\t: "
1403 #if defined(CONFIG_BFIN_L2_ICACHEABLE)
1404  "cacheable"
1405 #else
1406  "uncacheable"
1407 #endif
1408  " in instruction cache\n");
1409  seq_printf(m, "L2 SRAM\t\t: "
1410 #if defined(CONFIG_BFIN_L2_WRITEBACK)
1411  "cacheable (write-back)"
1412 #elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
1413  "cacheable (write-through)"
1414 #else
1415  "uncacheable"
1416 #endif
1417  " in data cache\n");
1418  }
1419  seq_printf(m, "board name\t: %s\n", bfin_board_name);
1420  seq_printf(m, "board memory\t: %ld kB (0x%08lx -> 0x%08lx)\n",
1421  physical_mem_end >> 10, 0ul, physical_mem_end);
1422  seq_printf(m, "kernel memory\t: %d kB (0x%08lx -> 0x%08lx)\n",
1423  ((int)memory_end - (int)_rambase) >> 10,
1424  _rambase, memory_end);
1425 
1426  return 0;
1427 }
1428 
1429 static void *c_start(struct seq_file *m, loff_t *pos)
1430 {
1431  if (*pos == 0)
1432  *pos = cpumask_first(cpu_online_mask);
1433  if (*pos >= num_online_cpus())
1434  return NULL;
1435 
1436  return pos;
1437 }
1438 
1439 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1440 {
1441  *pos = cpumask_next(*pos, cpu_online_mask);
1442 
1443  return c_start(m, pos);
1444 }
1445 
1446 static void c_stop(struct seq_file *m, void *v)
1447 {
1448 }
1449 
1450 const struct seq_operations cpuinfo_op = {
1451  .start = c_start,
1452  .next = c_next,
1453  .stop = c_stop,
1454  .show = show_cpuinfo,
1455 };
1456 
1457 void __init cmdline_init(const char *r0)
1458 {
1460  if (r0)
1462 }