Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
process_vm_access.c
Go to the documentation of this file.
1 /*
2  * linux/mm/process_vm_access.c
3  *
4  * Copyright (C) 2010-2011 Christopher Yeoh <[email protected]>, IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/mm.h>
13 #include <linux/uio.h>
14 #include <linux/sched.h>
15 #include <linux/highmem.h>
16 #include <linux/ptrace.h>
17 #include <linux/slab.h>
18 #include <linux/syscalls.h>
19 
20 #ifdef CONFIG_COMPAT
21 #include <linux/compat.h>
22 #endif
23 
42 static int process_vm_rw_pages(struct task_struct *task,
43  struct mm_struct *mm,
44  struct page **process_pages,
45  unsigned long pa,
46  unsigned long start_offset,
47  unsigned long len,
48  const struct iovec *lvec,
49  unsigned long lvec_cnt,
50  unsigned long *lvec_current,
51  size_t *lvec_offset,
52  int vm_write,
53  unsigned int nr_pages_to_copy,
54  ssize_t *bytes_copied)
55 {
56  int pages_pinned;
57  void *target_kaddr;
58  int pgs_copied = 0;
59  int j;
60  int ret;
61  ssize_t bytes_to_copy;
62  ssize_t rc = 0;
63 
64  *bytes_copied = 0;
65 
66  /* Get the pages we're interested in */
67  down_read(&mm->mmap_sem);
68  pages_pinned = get_user_pages(task, mm, pa,
69  nr_pages_to_copy,
70  vm_write, 0, process_pages, NULL);
71  up_read(&mm->mmap_sem);
72 
73  if (pages_pinned != nr_pages_to_copy) {
74  rc = -EFAULT;
75  goto end;
76  }
77 
78  /* Do the copy for each page */
79  for (pgs_copied = 0;
80  (pgs_copied < nr_pages_to_copy) && (*lvec_current < lvec_cnt);
81  pgs_copied++) {
82  /* Make sure we have a non zero length iovec */
83  while (*lvec_current < lvec_cnt
84  && lvec[*lvec_current].iov_len == 0)
85  (*lvec_current)++;
86  if (*lvec_current == lvec_cnt)
87  break;
88 
89  /*
90  * Will copy smallest of:
91  * - bytes remaining in page
92  * - bytes remaining in destination iovec
93  */
94  bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset,
95  len - *bytes_copied);
96  bytes_to_copy = min_t(ssize_t, bytes_to_copy,
97  lvec[*lvec_current].iov_len
98  - *lvec_offset);
99 
100  target_kaddr = kmap(process_pages[pgs_copied]) + start_offset;
101 
102  if (vm_write)
103  ret = copy_from_user(target_kaddr,
104  lvec[*lvec_current].iov_base
105  + *lvec_offset,
106  bytes_to_copy);
107  else
108  ret = copy_to_user(lvec[*lvec_current].iov_base
109  + *lvec_offset,
110  target_kaddr, bytes_to_copy);
111  kunmap(process_pages[pgs_copied]);
112  if (ret) {
113  *bytes_copied += bytes_to_copy - ret;
114  pgs_copied++;
115  rc = -EFAULT;
116  goto end;
117  }
118  *bytes_copied += bytes_to_copy;
119  *lvec_offset += bytes_to_copy;
120  if (*lvec_offset == lvec[*lvec_current].iov_len) {
121  /*
122  * Need to copy remaining part of page into the
123  * next iovec if there are any bytes left in page
124  */
125  (*lvec_current)++;
126  *lvec_offset = 0;
127  start_offset = (start_offset + bytes_to_copy)
128  % PAGE_SIZE;
129  if (start_offset)
130  pgs_copied--;
131  } else {
132  start_offset = 0;
133  }
134  }
135 
136 end:
137  if (vm_write) {
138  for (j = 0; j < pages_pinned; j++) {
139  if (j < pgs_copied)
140  set_page_dirty_lock(process_pages[j]);
141  put_page(process_pages[j]);
142  }
143  } else {
144  for (j = 0; j < pages_pinned; j++)
145  put_page(process_pages[j]);
146  }
147 
148  return rc;
149 }
150 
151 /* Maximum number of pages kmalloc'd to hold struct page's during copy */
152 #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
153 
170 static int process_vm_rw_single_vec(unsigned long addr,
171  unsigned long len,
172  const struct iovec *lvec,
173  unsigned long lvec_cnt,
174  unsigned long *lvec_current,
175  size_t *lvec_offset,
176  struct page **process_pages,
177  struct mm_struct *mm,
178  struct task_struct *task,
179  int vm_write,
180  ssize_t *bytes_copied)
181 {
182  unsigned long pa = addr & PAGE_MASK;
183  unsigned long start_offset = addr - pa;
184  unsigned long nr_pages;
185  ssize_t bytes_copied_loop;
186  ssize_t rc = 0;
187  unsigned long nr_pages_copied = 0;
188  unsigned long nr_pages_to_copy;
189  unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
190  / sizeof(struct pages *);
191 
192  *bytes_copied = 0;
193 
194  /* Work out address and page range required */
195  if (len == 0)
196  return 0;
197  nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
198 
199  while ((nr_pages_copied < nr_pages) && (*lvec_current < lvec_cnt)) {
200  nr_pages_to_copy = min(nr_pages - nr_pages_copied,
201  max_pages_per_loop);
202 
203  rc = process_vm_rw_pages(task, mm, process_pages, pa,
204  start_offset, len,
205  lvec, lvec_cnt,
206  lvec_current, lvec_offset,
207  vm_write, nr_pages_to_copy,
208  &bytes_copied_loop);
209  start_offset = 0;
210  *bytes_copied += bytes_copied_loop;
211 
212  if (rc < 0) {
213  return rc;
214  } else {
215  len -= bytes_copied_loop;
216  nr_pages_copied += nr_pages_to_copy;
217  pa += nr_pages_to_copy * PAGE_SIZE;
218  }
219  }
220 
221  return rc;
222 }
223 
224 /* Maximum number of entries for process pages array
225  which lives on stack */
226 #define PVM_MAX_PP_ARRAY_COUNT 16
227 
241 static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
242  unsigned long liovcnt,
243  const struct iovec *rvec,
244  unsigned long riovcnt,
245  unsigned long flags, int vm_write)
246 {
247  struct task_struct *task;
248  struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
249  struct page **process_pages = pp_stack;
250  struct mm_struct *mm;
251  unsigned long i;
252  ssize_t rc = 0;
253  ssize_t bytes_copied_loop;
254  ssize_t bytes_copied = 0;
255  unsigned long nr_pages = 0;
256  unsigned long nr_pages_iov;
257  unsigned long iov_l_curr_idx = 0;
258  size_t iov_l_curr_offset = 0;
259  ssize_t iov_len;
260 
261  /*
262  * Work out how many pages of struct pages we're going to need
263  * when eventually calling get_user_pages
264  */
265  for (i = 0; i < riovcnt; i++) {
266  iov_len = rvec[i].iov_len;
267  if (iov_len > 0) {
268  nr_pages_iov = ((unsigned long)rvec[i].iov_base
269  + iov_len)
270  / PAGE_SIZE - (unsigned long)rvec[i].iov_base
271  / PAGE_SIZE + 1;
272  nr_pages = max(nr_pages, nr_pages_iov);
273  }
274  }
275 
276  if (nr_pages == 0)
277  return 0;
278 
279  if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
280  /* For reliability don't try to kmalloc more than
281  2 pages worth */
282  process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
283  sizeof(struct pages *)*nr_pages),
284  GFP_KERNEL);
285 
286  if (!process_pages)
287  return -ENOMEM;
288  }
289 
290  /* Get process information */
291  rcu_read_lock();
292  task = find_task_by_vpid(pid);
293  if (task)
294  get_task_struct(task);
295  rcu_read_unlock();
296  if (!task) {
297  rc = -ESRCH;
298  goto free_proc_pages;
299  }
300 
301  mm = mm_access(task, PTRACE_MODE_ATTACH);
302  if (!mm || IS_ERR(mm)) {
303  rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
304  /*
305  * Explicitly map EACCES to EPERM as EPERM is a more a
306  * appropriate error code for process_vw_readv/writev
307  */
308  if (rc == -EACCES)
309  rc = -EPERM;
310  goto put_task_struct;
311  }
312 
313  for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
314  rc = process_vm_rw_single_vec(
315  (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
316  lvec, liovcnt, &iov_l_curr_idx, &iov_l_curr_offset,
317  process_pages, mm, task, vm_write, &bytes_copied_loop);
318  bytes_copied += bytes_copied_loop;
319  if (rc != 0) {
320  /* If we have managed to copy any data at all then
321  we return the number of bytes copied. Otherwise
322  we return the error code */
323  if (bytes_copied)
324  rc = bytes_copied;
325  goto put_mm;
326  }
327  }
328 
329  rc = bytes_copied;
330 put_mm:
331  mmput(mm);
332 
333 put_task_struct:
334  put_task_struct(task);
335 
336 free_proc_pages:
337  if (process_pages != pp_stack)
338  kfree(process_pages);
339  return rc;
340 }
341 
355 static ssize_t process_vm_rw(pid_t pid,
356  const struct iovec __user *lvec,
357  unsigned long liovcnt,
358  const struct iovec __user *rvec,
359  unsigned long riovcnt,
360  unsigned long flags, int vm_write)
361 {
362  struct iovec iovstack_l[UIO_FASTIOV];
363  struct iovec iovstack_r[UIO_FASTIOV];
364  struct iovec *iov_l = iovstack_l;
365  struct iovec *iov_r = iovstack_r;
366  ssize_t rc;
367 
368  if (flags != 0)
369  return -EINVAL;
370 
371  /* Check iovecs */
372  if (vm_write)
373  rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV,
374  iovstack_l, &iov_l);
375  else
376  rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV,
377  iovstack_l, &iov_l);
378  if (rc <= 0)
379  goto free_iovecs;
380 
382  iovstack_r, &iov_r);
383  if (rc <= 0)
384  goto free_iovecs;
385 
386  rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags,
387  vm_write);
388 
389 free_iovecs:
390  if (iov_r != iovstack_r)
391  kfree(iov_r);
392  if (iov_l != iovstack_l)
393  kfree(iov_l);
394 
395  return rc;
396 }
397 
398 SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec,
399  unsigned long, liovcnt, const struct iovec __user *, rvec,
400  unsigned long, riovcnt, unsigned long, flags)
401 {
402  return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
403 }
404 
405 SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
406  const struct iovec __user *, lvec,
407  unsigned long, liovcnt, const struct iovec __user *, rvec,
408  unsigned long, riovcnt, unsigned long, flags)
409 {
410  return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
411 }
412 
413 #ifdef CONFIG_COMPAT
414 
416 compat_process_vm_rw(compat_pid_t pid,
417  const struct compat_iovec __user *lvec,
418  unsigned long liovcnt,
419  const struct compat_iovec __user *rvec,
420  unsigned long riovcnt,
421  unsigned long flags, int vm_write)
422 {
423  struct iovec iovstack_l[UIO_FASTIOV];
424  struct iovec iovstack_r[UIO_FASTIOV];
425  struct iovec *iov_l = iovstack_l;
426  struct iovec *iov_r = iovstack_r;
427  ssize_t rc = -EFAULT;
428 
429  if (flags != 0)
430  return -EINVAL;
431 
432  if (!access_ok(VERIFY_READ, lvec, liovcnt * sizeof(*lvec)))
433  goto out;
434 
435  if (!access_ok(VERIFY_READ, rvec, riovcnt * sizeof(*rvec)))
436  goto out;
437 
438  if (vm_write)
439  rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
440  UIO_FASTIOV, iovstack_l,
441  &iov_l);
442  else
443  rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt,
444  UIO_FASTIOV, iovstack_l,
445  &iov_l);
446  if (rc <= 0)
447  goto free_iovecs;
449  UIO_FASTIOV, iovstack_r,
450  &iov_r);
451  if (rc <= 0)
452  goto free_iovecs;
453 
454  rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags,
455  vm_write);
456 
457 free_iovecs:
458  if (iov_r != iovstack_r)
459  kfree(iov_r);
460  if (iov_l != iovstack_l)
461  kfree(iov_l);
462 
463 out:
464  return rc;
465 }
466 
468 compat_sys_process_vm_readv(compat_pid_t pid,
469  const struct compat_iovec __user *lvec,
470  unsigned long liovcnt,
471  const struct compat_iovec __user *rvec,
472  unsigned long riovcnt,
473  unsigned long flags)
474 {
475  return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
476  riovcnt, flags, 0);
477 }
478 
480 compat_sys_process_vm_writev(compat_pid_t pid,
481  const struct compat_iovec __user *lvec,
482  unsigned long liovcnt,
483  const struct compat_iovec __user *rvec,
484  unsigned long riovcnt,
485  unsigned long flags)
486 {
487  return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
488  riovcnt, flags, 1);
489 }
490 
491 #endif