Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mmap.c
Go to the documentation of this file.
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License. See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2011 Wind River Systems,
7  * written by Ralf Baechle <[email protected]>
8  */
9 #include <linux/compiler.h>
10 #include <linux/errno.h>
11 #include <linux/mm.h>
12 #include <linux/mman.h>
13 #include <linux/module.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
16 #include <linux/sched.h>
17 
18 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
20 
21 /* gap between mmap and stack */
22 #define MIN_GAP (128*1024*1024UL)
23 #define MAX_GAP ((TASK_SIZE)/6*5)
24 
25 static int mmap_is_legacy(void)
26 {
27  if (current->personality & ADDR_COMPAT_LAYOUT)
28  return 1;
29 
31  return 1;
32 
34 }
35 
36 static unsigned long mmap_base(unsigned long rnd)
37 {
38  unsigned long gap = rlimit(RLIMIT_STACK);
39 
40  if (gap < MIN_GAP)
41  gap = MIN_GAP;
42  else if (gap > MAX_GAP)
43  gap = MAX_GAP;
44 
45  return PAGE_ALIGN(TASK_SIZE - gap - rnd);
46 }
47 
48 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
49  unsigned long pgoff)
50 {
51  unsigned long base = addr & ~shm_align_mask;
52  unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
53 
54  if (base + off <= addr)
55  return base + off;
56 
57  return base - off;
58 }
59 
60 #define COLOUR_ALIGN(addr, pgoff) \
61  ((((addr) + shm_align_mask) & ~shm_align_mask) + \
62  (((pgoff) << PAGE_SHIFT) & shm_align_mask))
63 
65 
66 static unsigned long arch_get_unmapped_area_common(struct file *filp,
67  unsigned long addr0, unsigned long len, unsigned long pgoff,
68  unsigned long flags, enum mmap_allocation_direction dir)
69 {
70  struct mm_struct *mm = current->mm;
71  struct vm_area_struct *vma;
72  unsigned long addr = addr0;
73  int do_color_align;
74 
75  if (unlikely(len > TASK_SIZE))
76  return -ENOMEM;
77 
78  if (flags & MAP_FIXED) {
79  /* Even MAP_FIXED mappings must reside within TASK_SIZE */
80  if (TASK_SIZE - len < addr)
81  return -EINVAL;
82 
83  /*
84  * We do not accept a shared mapping if it would violate
85  * cache aliasing constraints.
86  */
87  if ((flags & MAP_SHARED) &&
88  ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
89  return -EINVAL;
90  return addr;
91  }
92 
93  do_color_align = 0;
94  if (filp || (flags & MAP_SHARED))
95  do_color_align = 1;
96 
97  /* requesting a specific address */
98  if (addr) {
99  if (do_color_align)
100  addr = COLOUR_ALIGN(addr, pgoff);
101  else
102  addr = PAGE_ALIGN(addr);
103 
104  vma = find_vma(mm, addr);
105  if (TASK_SIZE - len >= addr &&
106  (!vma || addr + len <= vma->vm_start))
107  return addr;
108  }
109 
110  if (dir == UP) {
111  addr = mm->mmap_base;
112  if (do_color_align)
113  addr = COLOUR_ALIGN(addr, pgoff);
114  else
115  addr = PAGE_ALIGN(addr);
116 
117  for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
118  /* At this point: (!vma || addr < vma->vm_end). */
119  if (TASK_SIZE - len < addr)
120  return -ENOMEM;
121  if (!vma || addr + len <= vma->vm_start)
122  return addr;
123  addr = vma->vm_end;
124  if (do_color_align)
125  addr = COLOUR_ALIGN(addr, pgoff);
126  }
127  } else {
128  /* check if free_area_cache is useful for us */
129  if (len <= mm->cached_hole_size) {
130  mm->cached_hole_size = 0;
131  mm->free_area_cache = mm->mmap_base;
132  }
133 
134  /*
135  * either no address requested, or the mapping can't fit into
136  * the requested address hole
137  */
138  addr = mm->free_area_cache;
139  if (do_color_align) {
140  unsigned long base =
141  COLOUR_ALIGN_DOWN(addr - len, pgoff);
142  addr = base + len;
143  }
144 
145  /* make sure it can fit in the remaining address space */
146  if (likely(addr > len)) {
147  vma = find_vma(mm, addr - len);
148  if (!vma || addr <= vma->vm_start) {
149  /* cache the address as a hint for next time */
150  return mm->free_area_cache = addr - len;
151  }
152  }
153 
154  if (unlikely(mm->mmap_base < len))
155  goto bottomup;
156 
157  addr = mm->mmap_base - len;
158  if (do_color_align)
159  addr = COLOUR_ALIGN_DOWN(addr, pgoff);
160 
161  do {
162  /*
163  * Lookup failure means no vma is above this address,
164  * else if new region fits below vma->vm_start,
165  * return with success:
166  */
167  vma = find_vma(mm, addr);
168  if (likely(!vma || addr + len <= vma->vm_start)) {
169  /* cache the address as a hint for next time */
170  return mm->free_area_cache = addr;
171  }
172 
173  /* remember the largest hole we saw so far */
174  if (addr + mm->cached_hole_size < vma->vm_start)
175  mm->cached_hole_size = vma->vm_start - addr;
176 
177  /* try just below the current vma->vm_start */
178  addr = vma->vm_start - len;
179  if (do_color_align)
180  addr = COLOUR_ALIGN_DOWN(addr, pgoff);
181  } while (likely(len < vma->vm_start));
182 
183 bottomup:
184  /*
185  * A failed mmap() very likely causes application failure,
186  * so fall back to the bottom-up function here. This scenario
187  * can happen with large stack limits and large mmap()
188  * allocations.
189  */
190  mm->cached_hole_size = ~0UL;
192  addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
193  /*
194  * Restore the topdown base:
195  */
196  mm->free_area_cache = mm->mmap_base;
197  mm->cached_hole_size = ~0UL;
198 
199  return addr;
200  }
201 }
202 
203 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
204  unsigned long len, unsigned long pgoff, unsigned long flags)
205 {
206  return arch_get_unmapped_area_common(filp,
207  addr0, len, pgoff, flags, UP);
208 }
209 
210 /*
211  * There is no need to export this but sched.h declares the function as
212  * extern so making it static here results in an error.
213  */
214 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
215  unsigned long addr0, unsigned long len, unsigned long pgoff,
216  unsigned long flags)
217 {
218  return arch_get_unmapped_area_common(filp,
219  addr0, len, pgoff, flags, DOWN);
220 }
221 
223 {
224  unsigned long random_factor = 0UL;
225 
226  if (current->flags & PF_RANDOMIZE) {
227  random_factor = get_random_int();
228  random_factor = random_factor << PAGE_SHIFT;
229  if (TASK_IS_32BIT_ADDR)
230  random_factor &= 0xfffffful;
231  else
232  random_factor &= 0xffffffful;
233  }
234 
235  if (mmap_is_legacy()) {
236  mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
237  mm->get_unmapped_area = arch_get_unmapped_area;
238  mm->unmap_area = arch_unmap_area;
239  } else {
240  mm->mmap_base = mmap_base(random_factor);
241  mm->get_unmapped_area = arch_get_unmapped_area_topdown;
242  mm->unmap_area = arch_unmap_area_topdown;
243  }
244 }
245 
246 static inline unsigned long brk_rnd(void)
247 {
248  unsigned long rnd = get_random_int();
249 
250  rnd = rnd << PAGE_SHIFT;
251  /* 8MB for 32bit, 256MB for 64bit */
252  if (TASK_IS_32BIT_ADDR)
253  rnd = rnd & 0x7ffffful;
254  else
255  rnd = rnd & 0xffffffful;
256 
257  return rnd;
258 }
259 
260 unsigned long arch_randomize_brk(struct mm_struct *mm)
261 {
262  unsigned long base = mm->brk;
263  unsigned long ret;
264 
265  ret = PAGE_ALIGN(base + brk_rnd());
266 
267  if (ret < mm->brk)
268  return mm->brk;
269 
270  return ret;
271 }