Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vmem.c
Go to the documentation of this file.
1 /*
2  * Copyright IBM Corp. 2006
3  * Author(s): Heiko Carstens <[email protected]>
4  */
5 
6 #include <linux/bootmem.h>
7 #include <linux/pfn.h>
8 #include <linux/mm.h>
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/setup.h>
16 #include <asm/tlbflush.h>
17 #include <asm/sections.h>
18 
19 static DEFINE_MUTEX(vmem_mutex);
20 
22  struct list_head list;
23  unsigned long start;
24  unsigned long size;
25 };
26 
27 static LIST_HEAD(mem_segs);
28 
29 static void __ref *vmem_alloc_pages(unsigned int order)
30 {
31  if (slab_is_available())
32  return (void *)__get_free_pages(GFP_KERNEL, order);
33  return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
34 }
35 
36 static inline pud_t *vmem_pud_alloc(void)
37 {
38  pud_t *pud = NULL;
39 
40 #ifdef CONFIG_64BIT
41  pud = vmem_alloc_pages(2);
42  if (!pud)
43  return NULL;
44  clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
45 #endif
46  return pud;
47 }
48 
49 static inline pmd_t *vmem_pmd_alloc(void)
50 {
51  pmd_t *pmd = NULL;
52 
53 #ifdef CONFIG_64BIT
54  pmd = vmem_alloc_pages(2);
55  if (!pmd)
56  return NULL;
57  clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
58 #endif
59  return pmd;
60 }
61 
62 static pte_t __ref *vmem_pte_alloc(unsigned long address)
63 {
64  pte_t *pte;
65 
66  if (slab_is_available())
67  pte = (pte_t *) page_table_alloc(&init_mm, address);
68  else
69  pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
70  if (!pte)
71  return NULL;
72  clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
73  PTRS_PER_PTE * sizeof(pte_t));
74  return pte;
75 }
76 
77 /*
78  * Add a physical memory range to the 1:1 mapping.
79  */
80 static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
81 {
82  unsigned long end = start + size;
83  unsigned long address = start;
84  pgd_t *pg_dir;
85  pud_t *pu_dir;
86  pmd_t *pm_dir;
87  pte_t *pt_dir;
88  pte_t pte;
89  int ret = -ENOMEM;
90 
91  while (address < end) {
92  pg_dir = pgd_offset_k(address);
93  if (pgd_none(*pg_dir)) {
94  pu_dir = vmem_pud_alloc();
95  if (!pu_dir)
96  goto out;
97  pgd_populate(&init_mm, pg_dir, pu_dir);
98  }
99 
100  pu_dir = pud_offset(pg_dir, address);
101  if (pud_none(*pu_dir)) {
102  pm_dir = vmem_pmd_alloc();
103  if (!pm_dir)
104  goto out;
105  pud_populate(&init_mm, pu_dir, pm_dir);
106  }
107 
108  pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
109  pm_dir = pmd_offset(pu_dir, address);
110 
111 #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
112  if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
113  !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
114  pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
115  pmd_val(*pm_dir) = pte_val(pte);
116  address += PMD_SIZE;
117  continue;
118  }
119 #endif
120  if (pmd_none(*pm_dir)) {
121  pt_dir = vmem_pte_alloc(address);
122  if (!pt_dir)
123  goto out;
124  pmd_populate(&init_mm, pm_dir, pt_dir);
125  }
126 
127  pt_dir = pte_offset_kernel(pm_dir, address);
128  *pt_dir = pte;
129  address += PAGE_SIZE;
130  }
131  ret = 0;
132 out:
133  flush_tlb_kernel_range(start, end);
134  return ret;
135 }
136 
137 /*
138  * Remove a physical memory range from the 1:1 mapping.
139  * Currently only invalidates page table entries.
140  */
141 static void vmem_remove_range(unsigned long start, unsigned long size)
142 {
143  unsigned long end = start + size;
144  unsigned long address = start;
145  pgd_t *pg_dir;
146  pud_t *pu_dir;
147  pmd_t *pm_dir;
148  pte_t *pt_dir;
149  pte_t pte;
150 
151  pte_val(pte) = _PAGE_TYPE_EMPTY;
152  while (address < end) {
153  pg_dir = pgd_offset_k(address);
154  if (pgd_none(*pg_dir)) {
155  address += PGDIR_SIZE;
156  continue;
157  }
158  pu_dir = pud_offset(pg_dir, address);
159  if (pud_none(*pu_dir)) {
160  address += PUD_SIZE;
161  continue;
162  }
163  pm_dir = pmd_offset(pu_dir, address);
164  if (pmd_none(*pm_dir)) {
165  address += PMD_SIZE;
166  continue;
167  }
168  if (pmd_large(*pm_dir)) {
169  pmd_clear(pm_dir);
170  address += PMD_SIZE;
171  continue;
172  }
173  pt_dir = pte_offset_kernel(pm_dir, address);
174  *pt_dir = pte;
175  address += PAGE_SIZE;
176  }
177  flush_tlb_kernel_range(start, end);
178 }
179 
180 /*
181  * Add a backed mem_map array to the virtual mem_map array.
182  */
183 int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
184 {
185  unsigned long address, start_addr, end_addr;
186  pgd_t *pg_dir;
187  pud_t *pu_dir;
188  pmd_t *pm_dir;
189  pte_t *pt_dir;
190  pte_t pte;
191  int ret = -ENOMEM;
192 
193  start_addr = (unsigned long) start;
194  end_addr = (unsigned long) (start + nr);
195 
196  for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
197  pg_dir = pgd_offset_k(address);
198  if (pgd_none(*pg_dir)) {
199  pu_dir = vmem_pud_alloc();
200  if (!pu_dir)
201  goto out;
202  pgd_populate(&init_mm, pg_dir, pu_dir);
203  }
204 
205  pu_dir = pud_offset(pg_dir, address);
206  if (pud_none(*pu_dir)) {
207  pm_dir = vmem_pmd_alloc();
208  if (!pm_dir)
209  goto out;
210  pud_populate(&init_mm, pu_dir, pm_dir);
211  }
212 
213  pm_dir = pmd_offset(pu_dir, address);
214  if (pmd_none(*pm_dir)) {
215  pt_dir = vmem_pte_alloc(address);
216  if (!pt_dir)
217  goto out;
218  pmd_populate(&init_mm, pm_dir, pt_dir);
219  }
220 
221  pt_dir = pte_offset_kernel(pm_dir, address);
222  if (pte_none(*pt_dir)) {
223  unsigned long new_page;
224 
225  new_page =__pa(vmem_alloc_pages(0));
226  if (!new_page)
227  goto out;
228  pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
229  *pt_dir = pte;
230  }
231  }
232  memset(start, 0, nr * sizeof(struct page));
233  ret = 0;
234 out:
235  flush_tlb_kernel_range(start_addr, end_addr);
236  return ret;
237 }
238 
239 /*
240  * Add memory segment to the segment list if it doesn't overlap with
241  * an already present segment.
242  */
243 static int insert_memory_segment(struct memory_segment *seg)
244 {
245  struct memory_segment *tmp;
246 
247  if (seg->start + seg->size > VMEM_MAX_PHYS ||
248  seg->start + seg->size < seg->start)
249  return -ERANGE;
250 
251  list_for_each_entry(tmp, &mem_segs, list) {
252  if (seg->start >= tmp->start + tmp->size)
253  continue;
254  if (seg->start + seg->size <= tmp->start)
255  continue;
256  return -ENOSPC;
257  }
258  list_add(&seg->list, &mem_segs);
259  return 0;
260 }
261 
262 /*
263  * Remove memory segment from the segment list.
264  */
265 static void remove_memory_segment(struct memory_segment *seg)
266 {
267  list_del(&seg->list);
268 }
269 
270 static void __remove_shared_memory(struct memory_segment *seg)
271 {
272  remove_memory_segment(seg);
273  vmem_remove_range(seg->start, seg->size);
274 }
275 
276 int vmem_remove_mapping(unsigned long start, unsigned long size)
277 {
278  struct memory_segment *seg;
279  int ret;
280 
281  mutex_lock(&vmem_mutex);
282 
283  ret = -ENOENT;
284  list_for_each_entry(seg, &mem_segs, list) {
285  if (seg->start == start && seg->size == size)
286  break;
287  }
288 
289  if (seg->start != start || seg->size != size)
290  goto out;
291 
292  ret = 0;
293  __remove_shared_memory(seg);
294  kfree(seg);
295 out:
296  mutex_unlock(&vmem_mutex);
297  return ret;
298 }
299 
300 int vmem_add_mapping(unsigned long start, unsigned long size)
301 {
302  struct memory_segment *seg;
303  int ret;
304 
305  mutex_lock(&vmem_mutex);
306  ret = -ENOMEM;
307  seg = kzalloc(sizeof(*seg), GFP_KERNEL);
308  if (!seg)
309  goto out;
310  seg->start = start;
311  seg->size = size;
312 
313  ret = insert_memory_segment(seg);
314  if (ret)
315  goto out_free;
316 
317  ret = vmem_add_mem(start, size, 0);
318  if (ret)
319  goto out_remove;
320  goto out;
321 
322 out_remove:
323  __remove_shared_memory(seg);
324 out_free:
325  kfree(seg);
326 out:
327  mutex_unlock(&vmem_mutex);
328  return ret;
329 }
330 
331 /*
332  * map whole physical memory to virtual memory (identity mapping)
333  * we reserve enough space in the vmalloc area for vmemmap to hotplug
334  * additional memory segments.
335  */
337 {
338  unsigned long ro_start, ro_end;
339  unsigned long start, end;
340  int i;
341 
342  ro_start = PFN_ALIGN((unsigned long)&_stext);
343  ro_end = (unsigned long)&_eshared & PAGE_MASK;
344  for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
345  if (memory_chunk[i].type == CHUNK_CRASHK ||
347  continue;
348  start = memory_chunk[i].addr;
349  end = memory_chunk[i].addr + memory_chunk[i].size;
350  if (start >= ro_end || end <= ro_start)
351  vmem_add_mem(start, end - start, 0);
352  else if (start >= ro_start && end <= ro_end)
353  vmem_add_mem(start, end - start, 1);
354  else if (start >= ro_start) {
355  vmem_add_mem(start, ro_end - start, 1);
356  vmem_add_mem(ro_end, end - ro_end, 0);
357  } else if (end < ro_end) {
358  vmem_add_mem(start, ro_start - start, 0);
359  vmem_add_mem(ro_start, end - ro_start, 1);
360  } else {
361  vmem_add_mem(start, ro_start - start, 0);
362  vmem_add_mem(ro_start, ro_end - ro_start, 1);
363  vmem_add_mem(ro_end, end - ro_end, 0);
364  }
365  }
366 }
367 
368 /*
369  * Convert memory chunk array to a memory segment list so there is a single
370  * list that contains both r/w memory and shared memory segments.
371  */
372 static int __init vmem_convert_memory_chunk(void)
373 {
374  struct memory_segment *seg;
375  int i;
376 
377  mutex_lock(&vmem_mutex);
378  for (i = 0; i < MEMORY_CHUNKS; i++) {
379  if (!memory_chunk[i].size)
380  continue;
381  if (memory_chunk[i].type == CHUNK_CRASHK ||
383  continue;
384  seg = kzalloc(sizeof(*seg), GFP_KERNEL);
385  if (!seg)
386  panic("Out of memory...\n");
387  seg->start = memory_chunk[i].addr;
388  seg->size = memory_chunk[i].size;
389  insert_memory_segment(seg);
390  }
391  mutex_unlock(&vmem_mutex);
392  return 0;
393 }
394 
395 core_initcall(vmem_convert_memory_chunk);