Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mcfmmu.c
Go to the documentation of this file.
1 /*
2  * Based upon linux/arch/m68k/mm/sun3mmu.c
3  * Based upon linux/arch/ppc/mm/mmu_context.c
4  *
5  * Implementations of mm routines specific to the Coldfire MMU.
6  *
7  * Copyright (c) 2008 Freescale Semiconductor, Inc.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/string.h>
15 #include <linux/bootmem.h>
16 
17 #include <asm/setup.h>
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20 #include <asm/mmu_context.h>
21 #include <asm/mcf_pgalloc.h>
22 #include <asm/tlbflush.h>
23 
24 #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
25 
27 unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
30 extern unsigned long num_pages;
31 
32 void free_initmem(void)
33 {
34 }
35 
36 /*
37  * ColdFire paging_init derived from sun3.
38  */
39 void __init paging_init(void)
40 {
41  pgd_t *pg_dir;
42  pte_t *pg_table;
43  unsigned long address, size;
44  unsigned long next_pgtable, bootmem_end;
45  unsigned long zones_size[MAX_NR_ZONES];
46  enum zone_type zone;
47  int i;
48 
50  memset((void *) empty_zero_page, 0, PAGE_SIZE);
51 
52  pg_dir = swapper_pg_dir;
54 
55  size = num_pages * sizeof(pte_t);
56  size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
57  next_pgtable = (unsigned long) alloc_bootmem_pages(size);
58 
59  bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
60  pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
61 
62  address = PAGE_OFFSET;
63  while (address < (unsigned long)high_memory) {
64  pg_table = (pte_t *) next_pgtable;
65  next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
66  pgd_val(*pg_dir) = (unsigned long) pg_table;
67  pg_dir++;
68 
69  /* now change pg_table to kernel virtual addresses */
70  for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
71  pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
72  if (address >= (unsigned long) high_memory)
73  pte_val(pte) = 0;
74 
75  set_pte(pg_table, pte);
76  address += PAGE_SIZE;
77  }
78  }
79 
80  current->mm = NULL;
81 
82  for (zone = 0; zone < MAX_NR_ZONES; zone++)
83  zones_size[zone] = 0x0;
84  zones_size[ZONE_DMA] = num_pages;
85  free_area_init(zones_size);
86 }
87 
88 int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
89 {
90  unsigned long flags, mmuar, mmutr;
91  struct mm_struct *mm;
92  pgd_t *pgd;
93  pmd_t *pmd;
94  pte_t *pte;
95  int asid;
96 
97  local_irq_save(flags);
98 
99  mmuar = (dtlb) ? mmu_read(MMUAR) :
100  regs->pc + (extension_word * sizeof(long));
101 
102  mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
103  if (!mm) {
104  local_irq_restore(flags);
105  return -1;
106  }
107 
108  pgd = pgd_offset(mm, mmuar);
109  if (pgd_none(*pgd)) {
110  local_irq_restore(flags);
111  return -1;
112  }
113 
114  pmd = pmd_offset(pgd, mmuar);
115  if (pmd_none(*pmd)) {
116  local_irq_restore(flags);
117  return -1;
118  }
119 
120  pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
121  : pte_offset_map(pmd, mmuar);
122  if (pte_none(*pte) || !pte_present(*pte)) {
123  local_irq_restore(flags);
124  return -1;
125  }
126 
127  if (write) {
128  if (!pte_write(*pte)) {
129  local_irq_restore(flags);
130  return -1;
131  }
132  set_pte(pte, pte_mkdirty(*pte));
133  }
134 
135  set_pte(pte, pte_mkyoung(*pte));
136  asid = mm->context & 0xff;
137  if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
138  set_pte(pte, pte_wrprotect(*pte));
139 
140  mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
141  if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
142  mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
143  mmu_write(MMUTR, mmutr);
144 
145  mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
146  ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
147 
148  if (dtlb)
149  mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
150  else
151  mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
152 
153  local_irq_restore(flags);
154  return 0;
155 }
156 
157 /*
158  * Initialize the context management stuff.
159  * The following was taken from arch/ppc/mmu_context.c
160  */
162 {
163  /*
164  * Some processors have too few contexts to reserve one for
165  * init_mm, and require using context 0 for a normal task.
166  * Other processors reserve the use of context zero for the kernel.
167  * This code assumes FIRST_CONTEXT < 32.
168  */
169  context_map[0] = (1 << FIRST_CONTEXT) - 1;
170  next_mmu_context = FIRST_CONTEXT;
171  atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
172 }
173 
174 /*
175  * Steal a context from a task that has one at the moment.
176  * This is only used on 8xx and 4xx and we presently assume that
177  * they don't do SMP. If they do then thicfpgalloc.hs will have to check
178  * whether the MM we steal is in use.
179  * We also assume that this is only used on systems that don't
180  * use an MMU hash table - this is true for 8xx and 4xx.
181  * This isn't an LRU system, it just frees up each context in
182  * turn (sort-of pseudo-random replacement :). This would be the
183  * place to implement an LRU scheme if anyone was motivated to do it.
184  * -- paulus
185  */
186 void steal_context(void)
187 {
188  struct mm_struct *mm;
189  /*
190  * free up context `next_mmu_context'
191  * if we shouldn't free context 0, don't...
192  */
193  if (next_mmu_context < FIRST_CONTEXT)
194  next_mmu_context = FIRST_CONTEXT;
195  mm = context_mm[next_mmu_context];
196  flush_tlb_mm(mm);
197  destroy_context(mm);
198 }
199