Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cplbinit.c
Go to the documentation of this file.
1 /*
2  * Blackfin CPLB initialization
3  *
4  * Copyright 2007-2009 Analog Devices Inc.
5  *
6  * Licensed under the GPL-2 or later.
7  */
8 
9 #include <linux/module.h>
10 
11 #include <asm/blackfin.h>
12 #include <asm/cacheflush.h>
13 #include <asm/cplb.h>
14 #include <asm/cplbinit.h>
15 #include <asm/mem_map.h>
16 
19 
22 
25 
28 
30 {
31  int i_d, i_i;
32  unsigned long addr;
33 
34  struct cplb_entry *d_tbl = dcplb_tbl[cpu];
35  struct cplb_entry *i_tbl = icplb_tbl[cpu];
36 
37  printk(KERN_INFO "NOMPU: setting up cplb tables\n");
38 
39  i_d = i_i = 0;
40 
41 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
42  /* Set up the zero page. */
43  d_tbl[i_d].addr = 0;
44  d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
45  i_tbl[i_i].addr = 0;
46  i_tbl[i_i++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
47 #endif
48 
49  /* Cover kernel memory with 4M pages. */
50  addr = 0;
51 
52  for (; addr < memory_start; addr += 4 * 1024 * 1024) {
53  d_tbl[i_d].addr = addr;
54  d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
55  i_tbl[i_i].addr = addr;
56  i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
57  }
58 
59 #ifdef CONFIG_ROMKERNEL
60  /* Cover kernel XIP flash area */
61 #ifdef CONFIG_BF60x
62  addr = CONFIG_ROM_BASE & ~(16 * 1024 * 1024 - 1);
63  d_tbl[i_d].addr = addr;
64  d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_16MB;
65  i_tbl[i_i].addr = addr;
66  i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_16MB;
67 #else
68  addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
69  d_tbl[i_d].addr = addr;
70  d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
71  i_tbl[i_i].addr = addr;
72  i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
73 #endif
74 #endif
75 
76  /* Cover L1 memory. One 4M area for code and data each is enough. */
77  if (cpu == 0) {
79  d_tbl[i_d].addr = L1_DATA_A_START;
80  d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
81  }
82  i_tbl[i_i].addr = L1_CODE_START;
83  i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
84  }
85 #ifdef CONFIG_SMP
86  else {
88  d_tbl[i_d].addr = COREB_L1_DATA_A_START;
89  d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
90  }
91  i_tbl[i_i].addr = COREB_L1_CODE_START;
92  i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
93  }
94 #endif
97 
100 
101  while (i_d < MAX_CPLBS)
102  d_tbl[i_d++].data = 0;
103  while (i_i < MAX_CPLBS)
104  i_tbl[i_i++].data = 0;
105 }
106 
108 {
109  unsigned long uncached_end;
110  int i_d, i_i;
111 
112  i_d = 0;
113  /* Normal RAM, including MTD FS. */
114 #ifdef CONFIG_MTD_UCLINUX
115  uncached_end = memory_mtd_start + mtd_size;
116 #else
117  uncached_end = memory_end;
118 #endif
119  /*
120  * if DMA uncached is less than 1MB, mark the 1MB chunk as uncached
121  * so that we don't have to use 4kB pages and cause CPLB thrashing
122  */
123  if ((DMA_UNCACHED_REGION >= 1 * 1024 * 1024) || !DMA_UNCACHED_REGION ||
124  ((_ramend - uncached_end) >= 1 * 1024 * 1024))
125  dcplb_bounds[i_d].eaddr = uncached_end;
126  else
127  dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1);
128  dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
129  /* DMA uncached region. */
130  if (DMA_UNCACHED_REGION) {
131  dcplb_bounds[i_d].eaddr = _ramend;
132  dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL;
133  }
134  if (_ramend != physical_mem_end) {
135  /* Reserved memory. */
136  dcplb_bounds[i_d].eaddr = physical_mem_end;
137  dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ?
139  }
140  /* Addressing hole up to the async bank. */
141  dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE;
142  dcplb_bounds[i_d++].data = 0;
143  /* ASYNC banks. */
145  dcplb_bounds[i_d++].data = SDRAM_EBIU;
146  /* Addressing hole up to BootROM. */
147  dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
148  dcplb_bounds[i_d++].data = 0;
149  /* BootROM -- largest one should be less than 1 meg. */
151  dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
152  if (L2_LENGTH) {
153  /* Addressing hole up to L2 SRAM. */
154  dcplb_bounds[i_d].eaddr = L2_START;
155  dcplb_bounds[i_d++].data = 0;
156  /* L2 SRAM. */
157  dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH;
158  dcplb_bounds[i_d++].data = L2_DMEMORY;
159  }
160  dcplb_nr_bounds = i_d;
162 
163  i_i = 0;
164  /* Normal RAM, including MTD FS. */
165  icplb_bounds[i_i].eaddr = uncached_end;
166  icplb_bounds[i_i++].data = SDRAM_IGENERIC;
167  if (_ramend != physical_mem_end) {
168  /* DMA uncached region. */
169  if (DMA_UNCACHED_REGION) {
170  /* Normally this hole is caught by the async below. */
171  icplb_bounds[i_i].eaddr = _ramend;
172  icplb_bounds[i_i++].data = 0;
173  }
174  /* Reserved memory. */
175  icplb_bounds[i_i].eaddr = physical_mem_end;
176  icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
178  }
179  /* Addressing hole up to the async bank. */
180  icplb_bounds[i_i].eaddr = ASYNC_BANK0_BASE;
181  icplb_bounds[i_i++].data = 0;
182  /* ASYNC banks. */
184  icplb_bounds[i_i++].data = SDRAM_EBIU;
185  /* Addressing hole up to BootROM. */
186  icplb_bounds[i_i].eaddr = BOOT_ROM_START;
187  icplb_bounds[i_i++].data = 0;
188  /* BootROM -- largest one should be less than 1 meg. */
190  icplb_bounds[i_i++].data = SDRAM_IGENERIC;
191 
192  if (L2_LENGTH) {
193  /* Addressing hole up to L2 SRAM. */
194  icplb_bounds[i_i].eaddr = L2_START;
195  icplb_bounds[i_i++].data = 0;
196  /* L2 SRAM. */
197  icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH;
198  icplb_bounds[i_i++].data = L2_IMEMORY;
199  }
200  icplb_nr_bounds = i_i;
202 }