Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cplbmgr.c
Go to the documentation of this file.
1 /*
2  * Blackfin CPLB exception handling for when MPU in on
3  *
4  * Copyright 2008-2009 Analog Devices Inc.
5  *
6  * Licensed under the GPL-2 or later.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 
12 #include <asm/blackfin.h>
13 #include <asm/cacheflush.h>
14 #include <asm/cplb.h>
15 #include <asm/cplbinit.h>
16 #include <asm/mmu_context.h>
17 
18 /*
19  * WARNING
20  *
21  * This file is compiled with certain -ffixed-reg options. We have to
22  * make sure not to call any functions here that could clobber these
23  * registers.
24  */
25 
28 unsigned long *current_rwx_mask[NR_CPUS];
29 
33 
34 #ifdef CONFIG_EXCPT_IRQ_SYSC_L1
35 #define MGR_ATTR __attribute__((l1_text))
36 #else
37 #define MGR_ATTR
38 #endif
39 
40 /*
41  * Given the contents of the status register, return the index of the
42  * CPLB that caused the fault.
43  */
44 static inline int faulting_cplb_index(int status)
45 {
46  int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
47  return 30 - signbits;
48 }
49 
50 /*
51  * Given the contents of the status register and the DCPLB_DATA contents,
52  * return true if a write access should be permitted.
53  */
54 static inline int write_permitted(int status, unsigned long data)
55 {
56  if (status & FAULT_USERSUPV)
57  return !!(data & CPLB_SUPV_WR);
58  else
59  return !!(data & CPLB_USER_WR);
60 }
61 
62 /* Counters to implement round-robin replacement. */
63 static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
64 
65 /*
66  * Find an ICPLB entry to be evicted and return its index.
67  */
68 MGR_ATTR static int evict_one_icplb(unsigned int cpu)
69 {
70  int i;
71  for (i = first_switched_icplb; i < MAX_CPLBS; i++)
72  if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
73  return i;
74  i = first_switched_icplb + icplb_rr_index[cpu];
75  if (i >= MAX_CPLBS) {
76  i -= MAX_CPLBS - first_switched_icplb;
77  icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
78  }
79  icplb_rr_index[cpu]++;
80  return i;
81 }
82 
83 MGR_ATTR static int evict_one_dcplb(unsigned int cpu)
84 {
85  int i;
86  for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
87  if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
88  return i;
89  i = first_switched_dcplb + dcplb_rr_index[cpu];
90  if (i >= MAX_CPLBS) {
91  i -= MAX_CPLBS - first_switched_dcplb;
92  dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
93  }
94  dcplb_rr_index[cpu]++;
95  return i;
96 }
97 
98 MGR_ATTR static noinline int dcplb_miss(unsigned int cpu)
99 {
100  unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
101  int status = bfin_read_DCPLB_STATUS();
102  unsigned long *mask;
103  int idx;
104  unsigned long d_data;
105 
106  nr_dcplb_miss[cpu]++;
107 
108  d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
109 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
110  if (bfin_addr_dcacheable(addr)) {
112 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
113  d_data |= CPLB_L1_AOW | CPLB_WT;
114 # endif
115  }
116 #endif
117 
118  if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
119  addr = L2_START;
120  d_data = L2_DMEMORY;
121  } else if (addr >= physical_mem_end) {
122  if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
123 #if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
124  mask = current_rwx_mask[cpu];
125  if (mask) {
126  int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
127  int idx = page >> 5;
128  int bit = 1 << (page & 31);
129 
130  if (mask[idx] & bit)
131  d_data |= CPLB_USER_RD;
132  }
133 #endif
134  } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
135  && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
136  addr &= ~(1 * 1024 * 1024 - 1);
137  d_data &= ~PAGE_SIZE_4KB;
138  d_data |= PAGE_SIZE_1MB;
139  } else
140  return CPLB_PROT_VIOL;
141  } else if (addr >= _ramend) {
142  d_data |= CPLB_USER_RD | CPLB_USER_WR;
144  d_data |= CPLB_L1_CHBL;
145  } else {
146  mask = current_rwx_mask[cpu];
147  if (mask) {
148  int page = addr >> PAGE_SHIFT;
149  int idx = page >> 5;
150  int bit = 1 << (page & 31);
151 
152  if (mask[idx] & bit)
153  d_data |= CPLB_USER_RD;
154 
155  mask += page_mask_nelts;
156  if (mask[idx] & bit)
157  d_data |= CPLB_USER_WR;
158  }
159  }
160  idx = evict_one_dcplb(cpu);
161 
162  addr &= PAGE_MASK;
163  dcplb_tbl[cpu][idx].addr = addr;
164  dcplb_tbl[cpu][idx].data = d_data;
165 
166  _disable_dcplb();
167  bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
168  bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
169  _enable_dcplb();
170 
171  return 0;
172 }
173 
174 MGR_ATTR static noinline int icplb_miss(unsigned int cpu)
175 {
176  unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
177  int status = bfin_read_ICPLB_STATUS();
178  int idx;
179  unsigned long i_data;
180 
181  nr_icplb_miss[cpu]++;
182 
183  /* If inside the uncached DMA region, fault. */
184  if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
185  return CPLB_PROT_VIOL;
186 
187  if (status & FAULT_USERSUPV)
189 
190  /*
191  * First, try to find a CPLB that matches this address. If we
192  * find one, then the fact that we're in the miss handler means
193  * that the instruction crosses a page boundary.
194  */
195  for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
196  if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
197  unsigned long this_addr = icplb_tbl[cpu][idx].addr;
198  if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
199  addr += PAGE_SIZE;
200  break;
201  }
202  }
203  }
204 
205  i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
206 
207 #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
208  /*
209  * Normal RAM, and possibly the reserved memory area, are
210  * cacheable.
211  */
212  if (addr < _ramend ||
215 #endif
216 
217  if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
218  addr = L2_START;
219  i_data = L2_IMEMORY;
220  } else if (addr >= physical_mem_end) {
221  if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
222  if (!(status & FAULT_USERSUPV)) {
223  unsigned long *mask = current_rwx_mask[cpu];
224 
225  if (mask) {
226  int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
227  int idx = page >> 5;
228  int bit = 1 << (page & 31);
229 
230  mask += 2 * page_mask_nelts;
231  if (mask[idx] & bit)
232  i_data |= CPLB_USER_RD;
233  }
234  }
235  } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
236  && (status & FAULT_USERSUPV)) {
237  addr &= ~(1 * 1024 * 1024 - 1);
238  i_data &= ~PAGE_SIZE_4KB;
239  i_data |= PAGE_SIZE_1MB;
240  } else
241  return CPLB_PROT_VIOL;
242  } else if (addr >= _ramend) {
243  i_data |= CPLB_USER_RD;
245  i_data |= CPLB_L1_CHBL;
246  } else {
247  /*
248  * Two cases to distinguish - a supervisor access must
249  * necessarily be for a module page; we grant it
250  * unconditionally (could do better here in the future).
251  * Otherwise, check the x bitmap of the current process.
252  */
253  if (!(status & FAULT_USERSUPV)) {
254  unsigned long *mask = current_rwx_mask[cpu];
255 
256  if (mask) {
257  int page = addr >> PAGE_SHIFT;
258  int idx = page >> 5;
259  int bit = 1 << (page & 31);
260 
261  mask += 2 * page_mask_nelts;
262  if (mask[idx] & bit)
263  i_data |= CPLB_USER_RD;
264  }
265  }
266  }
267  idx = evict_one_icplb(cpu);
268  addr &= PAGE_MASK;
269  icplb_tbl[cpu][idx].addr = addr;
270  icplb_tbl[cpu][idx].data = i_data;
271 
272  _disable_icplb();
273  bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
274  bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
275  _enable_icplb();
276 
277  return 0;
278 }
279 
280 MGR_ATTR static noinline int dcplb_protection_fault(unsigned int cpu)
281 {
282  int status = bfin_read_DCPLB_STATUS();
283 
284  nr_dcplb_prot[cpu]++;
285 
286  if (status & FAULT_RW) {
287  int idx = faulting_cplb_index(status);
288  unsigned long data = dcplb_tbl[cpu][idx].data;
289  if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
290  write_permitted(status, data)) {
291  data |= CPLB_DIRTY;
292  dcplb_tbl[cpu][idx].data = data;
293  bfin_write32(DCPLB_DATA0 + idx * 4, data);
294  return 0;
295  }
296  }
297  return CPLB_PROT_VIOL;
298 }
299 
300 MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
301 {
302  int cause = seqstat & 0x3f;
303  unsigned int cpu = raw_smp_processor_id();
304  switch (cause) {
305  case 0x23:
306  return dcplb_protection_fault(cpu);
307  case 0x2C:
308  return icplb_miss(cpu);
309  case 0x26:
310  return dcplb_miss(cpu);
311  default:
312  return 1;
313  }
314 }
315 
316 void flush_switched_cplbs(unsigned int cpu)
317 {
318  int i;
319  unsigned long flags;
320 
321  nr_cplb_flush[cpu]++;
322 
323  flags = hard_local_irq_save();
324  _disable_icplb();
325  for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
326  icplb_tbl[cpu][i].data = 0;
327  bfin_write32(ICPLB_DATA0 + i * 4, 0);
328  }
329  _enable_icplb();
330 
331  _disable_dcplb();
332  for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
333  dcplb_tbl[cpu][i].data = 0;
334  bfin_write32(DCPLB_DATA0 + i * 4, 0);
335  }
336  _enable_dcplb();
337  hard_local_irq_restore(flags);
338 
339 }
340 
341 void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
342 {
343  int i;
344  unsigned long addr = (unsigned long)masks;
345  unsigned long d_data;
346  unsigned long flags;
347 
348  if (!masks) {
349  current_rwx_mask[cpu] = masks;
350  return;
351  }
352 
353  flags = hard_local_irq_save();
354  current_rwx_mask[cpu] = masks;
355 
356  if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
357  addr = L2_START;
358  d_data = L2_DMEMORY;
359  } else {
360  d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
361 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
362  d_data |= CPLB_L1_CHBL;
363 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
364  d_data |= CPLB_L1_AOW | CPLB_WT;
365 # endif
366 #endif
367  }
368 
369  _disable_dcplb();
370  for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
371  dcplb_tbl[cpu][i].addr = addr;
372  dcplb_tbl[cpu][i].data = d_data;
373  bfin_write32(DCPLB_DATA0 + i * 4, d_data);
374  bfin_write32(DCPLB_ADDR0 + i * 4, addr);
375  addr += PAGE_SIZE;
376  }
377  _enable_dcplb();
378  hard_local_irq_restore(flags);
379 }