Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
util.c
Go to the documentation of this file.
1 #include <linux/mm.h>
2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/export.h>
5 #include <linux/err.h>
6 #include <linux/sched.h>
7 #include <linux/security.h>
8 #include <asm/uaccess.h>
9 
10 #include "internal.h"
11 
12 #define CREATE_TRACE_POINTS
13 #include <trace/events/kmem.h>
14 
20 char *kstrdup(const char *s, gfp_t gfp)
21 {
22  size_t len;
23  char *buf;
24 
25  if (!s)
26  return NULL;
27 
28  len = strlen(s) + 1;
29  buf = kmalloc_track_caller(len, gfp);
30  if (buf)
31  memcpy(buf, s, len);
32  return buf;
33 }
35 
42 char *kstrndup(const char *s, size_t max, gfp_t gfp)
43 {
44  size_t len;
45  char *buf;
46 
47  if (!s)
48  return NULL;
49 
50  len = strnlen(s, max);
51  buf = kmalloc_track_caller(len+1, gfp);
52  if (buf) {
53  memcpy(buf, s, len);
54  buf[len] = '\0';
55  }
56  return buf;
57 }
59 
67 void *kmemdup(const void *src, size_t len, gfp_t gfp)
68 {
69  void *p;
70 
71  p = kmalloc_track_caller(len, gfp);
72  if (p)
73  memcpy(p, src, len);
74  return p;
75 }
77 
86 void *memdup_user(const void __user *src, size_t len)
87 {
88  void *p;
89 
90  /*
91  * Always use GFP_KERNEL, since copy_from_user() can sleep and
92  * cause pagefault, which makes it pointless to use GFP_NOFS
93  * or GFP_ATOMIC.
94  */
96  if (!p)
97  return ERR_PTR(-ENOMEM);
98 
99  if (copy_from_user(p, src, len)) {
100  kfree(p);
101  return ERR_PTR(-EFAULT);
102  }
103 
104  return p;
105 }
107 
108 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
109  gfp_t flags)
110 {
111  void *ret;
112  size_t ks = 0;
113 
114  if (p)
115  ks = ksize(p);
116 
117  if (ks >= new_size)
118  return (void *)p;
119 
120  ret = kmalloc_track_caller(new_size, flags);
121  if (ret && p)
122  memcpy(ret, p, ks);
123 
124  return ret;
125 }
126 
137 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
138 {
139  if (unlikely(!new_size))
140  return ZERO_SIZE_PTR;
141 
142  return __do_krealloc(p, new_size, flags);
143 
144 }
146 
158 void *krealloc(const void *p, size_t new_size, gfp_t flags)
159 {
160  void *ret;
161 
162  if (unlikely(!new_size)) {
163  kfree(p);
164  return ZERO_SIZE_PTR;
165  }
166 
167  ret = __do_krealloc(p, new_size, flags);
168  if (ret && p != ret)
169  kfree(p);
170 
171  return ret;
172 }
174 
186 void kzfree(const void *p)
187 {
188  size_t ks;
189  void *mem = (void *)p;
190 
191  if (unlikely(ZERO_OR_NULL_PTR(mem)))
192  return;
193  ks = ksize(mem);
194  memset(mem, 0, ks);
195  kfree(mem);
196 }
198 
199 /*
200  * strndup_user - duplicate an existing string from user space
201  * @s: The string to duplicate
202  * @n: Maximum number of bytes to copy, including the trailing NUL.
203  */
204 char *strndup_user(const char __user *s, long n)
205 {
206  char *p;
207  long length;
208 
209  length = strnlen_user(s, n);
210 
211  if (!length)
212  return ERR_PTR(-EFAULT);
213 
214  if (length > n)
215  return ERR_PTR(-EINVAL);
216 
217  p = memdup_user(s, length);
218 
219  if (IS_ERR(p))
220  return p;
221 
222  p[length - 1] = '\0';
223 
224  return p;
225 }
227 
228 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
229  struct vm_area_struct *prev, struct rb_node *rb_parent)
230 {
231  struct vm_area_struct *next;
232 
233  vma->vm_prev = prev;
234  if (prev) {
235  next = prev->vm_next;
236  prev->vm_next = vma;
237  } else {
238  mm->mmap = vma;
239  if (rb_parent)
240  next = rb_entry(rb_parent,
241  struct vm_area_struct, vm_rb);
242  else
243  next = NULL;
244  }
245  vma->vm_next = next;
246  if (next)
247  next->vm_prev = vma;
248 }
249 
250 /* Check if the vma is being used as a stack by this task */
251 static int vm_is_stack_for_task(struct task_struct *t,
252  struct vm_area_struct *vma)
253 {
254  return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
255 }
256 
257 /*
258  * Check if the vma is being used as a stack.
259  * If is_group is non-zero, check in the entire thread group or else
260  * just check in the current task. Returns the pid of the task that
261  * the vma is stack for.
262  */
264  struct vm_area_struct *vma, int in_group)
265 {
266  pid_t ret = 0;
267 
268  if (vm_is_stack_for_task(task, vma))
269  return task->pid;
270 
271  if (in_group) {
272  struct task_struct *t;
273  rcu_read_lock();
274  if (!pid_alive(task))
275  goto done;
276 
277  t = task;
278  do {
279  if (vm_is_stack_for_task(t, vma)) {
280  ret = t->pid;
281  goto done;
282  }
283  } while_each_thread(task, t);
284 done:
285  rcu_read_unlock();
286  }
287 
288  return ret;
289 }
290 
291 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
292 void arch_pick_mmap_layout(struct mm_struct *mm)
293 {
295  mm->get_unmapped_area = arch_get_unmapped_area;
296  mm->unmap_area = arch_unmap_area;
297 }
298 #endif
299 
300 /*
301  * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
302  * back to the regular GUP.
303  * If the architecture not support this function, simply return with no
304  * page pinned
305  */
306 int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
307  int nr_pages, int write, struct page **pages)
308 {
309  return 0;
310 }
312 
337 int __attribute__((weak)) get_user_pages_fast(unsigned long start,
338  int nr_pages, int write, struct page **pages)
339 {
340  struct mm_struct *mm = current->mm;
341  int ret;
342 
343  down_read(&mm->mmap_sem);
344  ret = get_user_pages(current, mm, start, nr_pages,
345  write, 0, pages, NULL);
346  up_read(&mm->mmap_sem);
347 
348  return ret;
349 }
351 
352 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
353  unsigned long len, unsigned long prot,
354  unsigned long flag, unsigned long pgoff)
355 {
356  unsigned long ret;
357  struct mm_struct *mm = current->mm;
358 
359  ret = security_mmap_file(file, prot, flag);
360  if (!ret) {
361  down_write(&mm->mmap_sem);
362  ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff);
363  up_write(&mm->mmap_sem);
364  }
365  return ret;
366 }
367 
368 unsigned long vm_mmap(struct file *file, unsigned long addr,
369  unsigned long len, unsigned long prot,
370  unsigned long flag, unsigned long offset)
371 {
372  if (unlikely(offset + PAGE_ALIGN(len) < offset))
373  return -EINVAL;
374  if (unlikely(offset & ~PAGE_MASK))
375  return -EINVAL;
376 
377  return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
378 }
380 
381 /* Tracepoints definitions. */
384 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
385 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);