13 #include <linux/uio.h>
14 #include <linux/sched.h>
16 #include <linux/ptrace.h>
17 #include <linux/slab.h>
44 struct page **process_pages,
48 const struct iovec *lvec,
49 unsigned long lvec_cnt,
50 unsigned long *lvec_current,
53 unsigned int nr_pages_to_copy,
70 vm_write, 0, process_pages,
NULL);
73 if (pages_pinned != nr_pages_to_copy) {
80 (pgs_copied < nr_pages_to_copy) && (*lvec_current < lvec_cnt);
83 while (*lvec_current < lvec_cnt
84 && lvec[*lvec_current].iov_len == 0)
86 if (*lvec_current == lvec_cnt)
97 lvec[*lvec_current].iov_len
104 lvec[*lvec_current].iov_base
110 target_kaddr, bytes_to_copy);
111 kunmap(process_pages[pgs_copied]);
113 *bytes_copied += bytes_to_copy -
ret;
118 *bytes_copied += bytes_to_copy;
119 *lvec_offset += bytes_to_copy;
120 if (*lvec_offset == lvec[*lvec_current].iov_len) {
127 start_offset = (start_offset + bytes_to_copy)
138 for (j = 0; j < pages_pinned; j++) {
144 for (j = 0; j < pages_pinned; j++)
152 #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
170 static int process_vm_rw_single_vec(
unsigned long addr,
172 const struct iovec *lvec,
173 unsigned long lvec_cnt,
174 unsigned long *lvec_current,
176 struct page **process_pages,
183 unsigned long start_offset = addr - pa;
184 unsigned long nr_pages;
187 unsigned long nr_pages_copied = 0;
188 unsigned long nr_pages_to_copy;
190 /
sizeof(
struct pages *);
199 while ((nr_pages_copied < nr_pages) && (*lvec_current < lvec_cnt)) {
200 nr_pages_to_copy =
min(nr_pages - nr_pages_copied,
203 rc = process_vm_rw_pages(task, mm, process_pages, pa,
206 lvec_current, lvec_offset,
207 vm_write, nr_pages_to_copy,
210 *bytes_copied += bytes_copied_loop;
215 len -= bytes_copied_loop;
216 nr_pages_copied += nr_pages_to_copy;
226 #define PVM_MAX_PP_ARRAY_COUNT 16
242 unsigned long liovcnt,
243 const struct iovec *rvec,
244 unsigned long riovcnt,
245 unsigned long flags,
int vm_write)
249 struct page **process_pages = pp_stack;
255 unsigned long nr_pages = 0;
256 unsigned long nr_pages_iov;
257 unsigned long iov_l_curr_idx = 0;
258 size_t iov_l_curr_offset = 0;
265 for (i = 0; i < riovcnt; i++) {
268 nr_pages_iov = ((
unsigned long)rvec[i].iov_base
272 nr_pages =
max(nr_pages, nr_pages_iov);
283 sizeof(
struct pages *)*nr_pages),
298 goto free_proc_pages;
302 if (!mm || IS_ERR(mm)) {
303 rc = IS_ERR(mm) ? PTR_ERR(mm) : -
ESRCH;
310 goto put_task_struct;
313 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
314 rc = process_vm_rw_single_vec(
315 (
unsigned long)rvec[i].iov_base, rvec[i].iov_len,
316 lvec, liovcnt, &iov_l_curr_idx, &iov_l_curr_offset,
317 process_pages, mm, task, vm_write, &bytes_copied_loop);
318 bytes_copied += bytes_copied_loop;
334 put_task_struct(task);
337 if (process_pages != pp_stack)
338 kfree(process_pages);
356 const struct iovec __user *lvec,
357 unsigned long liovcnt,
358 const struct iovec __user *rvec,
359 unsigned long riovcnt,
360 unsigned long flags,
int vm_write)
364 struct iovec *iov_l = iovstack_l;
365 struct iovec *iov_r = iovstack_r;
386 rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags,
390 if (iov_r != iovstack_r)
392 if (iov_l != iovstack_l)
399 unsigned long, liovcnt,
const struct iovec __user *, rvec,
400 unsigned long, riovcnt,
unsigned long, flags)
402 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
406 const struct iovec __user *, lvec,
407 unsigned long, liovcnt,
const struct iovec __user *, rvec,
408 unsigned long, riovcnt,
unsigned long, flags)
410 return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
417 const struct compat_iovec __user *lvec,
418 unsigned long liovcnt,
419 const struct compat_iovec __user *rvec,
420 unsigned long riovcnt,
421 unsigned long flags,
int vm_write)
425 struct iovec *iov_l = iovstack_l;
426 struct iovec *iov_r = iovstack_r;
454 rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags,
458 if (iov_r != iovstack_r)
460 if (iov_l != iovstack_l)
469 const struct compat_iovec __user *lvec,
470 unsigned long liovcnt,
471 const struct compat_iovec __user *rvec,
472 unsigned long riovcnt,
475 return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
481 const struct compat_iovec __user *lvec,
482 unsigned long liovcnt,
483 const struct compat_iovec __user *rvec,
484 unsigned long riovcnt,
487 return compat_process_vm_rw(pid, lvec, liovcnt, rvec,