Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mmu_context.h
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2009 Analog Devices Inc.
3  *
4  * Licensed under the GPL-2 or later.
5  */
6 
7 #ifndef __BLACKFIN_MMU_CONTEXT_H__
8 #define __BLACKFIN_MMU_CONTEXT_H__
9 
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <asm/setup.h>
13 #include <asm/page.h>
14 #include <asm/pgalloc.h>
15 #include <asm/cplbinit.h>
16 #include <asm/sections.h>
17 
18 /* Note: L1 stacks are CPU-private things, so we bluntly disable this
19  feature in SMP mode, and use the per-CPU scratch SRAM bank only to
20  store the PDA instead. */
21 
22 extern void *current_l1_stack_save;
23 extern int nr_l1stack_tasks;
24 extern void *l1_stack_base;
25 extern unsigned long l1_stack_len;
26 
27 extern int l1sram_free(const void*);
28 extern void *l1sram_alloc_max(void*);
29 
30 static inline void free_l1stack(void)
31 {
33  if (nr_l1stack_tasks == 0) {
36  l1_stack_len = 0;
37  }
38 }
39 
40 static inline unsigned long
41 alloc_l1stack(unsigned long length, unsigned long *stack_base)
42 {
43  if (nr_l1stack_tasks == 0) {
45  if (!l1_stack_base)
46  return 0;
47  }
48 
49  if (l1_stack_len < length) {
50  if (nr_l1stack_tasks == 0)
52  return 0;
53  }
54  *stack_base = (unsigned long)l1_stack_base;
56  return l1_stack_len;
57 }
58 
59 static inline int
60 activate_l1stack(struct mm_struct *mm, unsigned long sp_base)
61 {
64  mm->context.l1_stack_save = current_l1_stack_save = (void*)sp_base;
66  return 1;
67 }
68 
69 #define deactivate_mm(tsk,mm) do { } while (0)
70 
71 #define activate_mm(prev, next) switch_mm(prev, next, NULL)
72 
73 static inline void __switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
74  struct task_struct *tsk)
75 {
76 #ifdef CONFIG_MPU
77  unsigned int cpu = smp_processor_id();
78 #endif
79  if (prev_mm == next_mm)
80  return;
81 #ifdef CONFIG_MPU
82  if (prev_mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
84  set_mask_dcplbs(next_mm->context.page_rwx_mask, cpu);
85  }
86 #endif
87 
88 #ifdef CONFIG_APP_STACK_L1
89  /* L1 stack switching. */
90  if (!next_mm->context.l1_stack_save)
91  return;
93  return;
96  }
99 #endif
100 }
101 
102 #ifdef CONFIG_IPIPE
103 #define lock_mm_switch(flags) flags = hard_local_irq_save_cond()
104 #define unlock_mm_switch(flags) hard_local_irq_restore_cond(flags)
105 #else
106 #define lock_mm_switch(flags) do { (void)(flags); } while (0)
107 #define unlock_mm_switch(flags) do { (void)(flags); } while (0)
108 #endif /* CONFIG_IPIPE */
109 
110 #ifdef CONFIG_MPU
111 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
112  struct task_struct *tsk)
113 {
114  unsigned long flags;
115  lock_mm_switch(flags);
116  __switch_mm(prev, next, tsk);
117  unlock_mm_switch(flags);
118 }
119 
120 static inline void protect_page(struct mm_struct *mm, unsigned long addr,
121  unsigned long flags)
122 {
123  unsigned long *mask = mm->context.page_rwx_mask;
124  unsigned long page;
125  unsigned long idx;
126  unsigned long bit;
127 
129  page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> 12;
130  else
131  page = addr >> 12;
132  idx = page >> 5;
133  bit = 1 << (page & 31);
134 
135  if (flags & VM_READ)
136  mask[idx] |= bit;
137  else
138  mask[idx] &= ~bit;
139  mask += page_mask_nelts;
140  if (flags & VM_WRITE)
141  mask[idx] |= bit;
142  else
143  mask[idx] &= ~bit;
144  mask += page_mask_nelts;
145  if (flags & VM_EXEC)
146  mask[idx] |= bit;
147  else
148  mask[idx] &= ~bit;
149 }
150 
151 static inline void update_protections(struct mm_struct *mm)
152 {
153  unsigned int cpu = smp_processor_id();
154  if (mm->context.page_rwx_mask == current_rwx_mask[cpu]) {
156  set_mask_dcplbs(mm->context.page_rwx_mask, cpu);
157  }
158 }
159 #else /* !CONFIG_MPU */
160 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
161  struct task_struct *tsk)
162 {
163  __switch_mm(prev, next, tsk);
164 }
165 #endif
166 
167 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
168 {
169 }
170 
171 /* Called when creating a new context during fork() or execve(). */
172 static inline int
173 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
174 {
175 #ifdef CONFIG_MPU
176  unsigned long p = __get_free_pages(GFP_KERNEL, page_mask_order);
177  mm->context.page_rwx_mask = (unsigned long *)p;
178  memset(mm->context.page_rwx_mask, 0,
179  page_mask_nelts * 3 * sizeof(long));
180 #endif
181  return 0;
182 }
183 
184 static inline void destroy_context(struct mm_struct *mm)
185 {
186  struct sram_list_struct *tmp;
187 #ifdef CONFIG_MPU
188  unsigned int cpu = smp_processor_id();
189 #endif
190 
191 #ifdef CONFIG_APP_STACK_L1
194  if (mm->context.l1_stack_save)
195  free_l1stack();
196 #endif
197 
198  while ((tmp = mm->context.sram_list)) {
199  mm->context.sram_list = tmp->next;
200  sram_free(tmp->addr);
201  kfree(tmp);
202  }
203 #ifdef CONFIG_MPU
204  if (current_rwx_mask[cpu] == mm->context.page_rwx_mask)
206  free_pages((unsigned long)mm->context.page_rwx_mask, page_mask_order);
207 #endif
208 }
209 
210 #define ipipe_mm_switch_protect(flags) \
211  flags = hard_local_irq_save_cond()
212 
213 #define ipipe_mm_switch_unprotect(flags) \
214  hard_local_irq_restore_cond(flags)
215 
216 #endif