7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <asm/pgtable.h>
10 #include <asm/tlbflush.h>
44 #define INIT_HVC(mm, force) \
45 ((struct host_vm_change) \
46 { .ops = { { .type = NONE } }, \
47 .id = &mm->context.id, \
55 struct host_vm_op *
op;
58 for (i = 0; i < end && !
ret; i++) {
62 ret =
map(hvc->
id, op->u.mmap.addr, op->u.mmap.len,
63 op->u.mmap.prot, op->u.mmap.fd,
64 op->u.mmap.offset, finished, &hvc->
data);
67 ret =
unmap(hvc->
id, op->u.munmap.addr,
68 op->u.munmap.len, finished, &hvc->
data);
71 ret =
protect(hvc->
id, op->u.mprotect.addr,
72 op->u.mprotect.len, op->u.mprotect.prot,
73 finished, &hvc->
data);
86 static int add_mmap(
unsigned long virt,
unsigned long phys,
unsigned long len,
90 struct host_vm_op *
last;
94 if (hvc->
index != 0) {
96 if ((last->type == MMAP) &&
97 (last->u.mmap.addr + last->u.mmap.len == virt) &&
98 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
99 (last->u.mmap.offset + last->u.mmap.len == offset)) {
100 last->u.mmap.len += len;
110 hvc->
ops[hvc->
index++] = ((
struct host_vm_op)
112 .u = { .mmap = { .addr = virt,
121 static int add_munmap(
unsigned long addr,
unsigned long len,
124 struct host_vm_op *last;
127 if (hvc->
index != 0) {
129 if ((last->type == MUNMAP) &&
130 (last->u.munmap.addr + last->u.mmap.len == addr)) {
131 last->u.munmap.len += len;
141 hvc->
ops[hvc->
index++] = ((
struct host_vm_op)
143 .u = { .munmap = { .addr =
addr,
148 static int add_mprotect(
unsigned long addr,
unsigned long len,
151 struct host_vm_op *last;
154 if (hvc->
index != 0) {
156 if ((last->type == MPROTECT) &&
157 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
158 (last->u.mprotect.prot == prot)) {
159 last->u.mprotect.len += len;
169 hvc->
ops[hvc->
index++] = ((
struct host_vm_op)
171 .u = { .mprotect = { .addr =
addr,
177 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
179 static inline int update_pte_range(
pmd_t *
pmd,
unsigned long addr,
184 int r,
w,
x, prot, ret = 0;
200 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
201 (x ? UM_PROT_EXEC : 0));
202 if (hvc->
force || pte_newpage(*pte)) {
207 ret = add_munmap(addr, PAGE_SIZE, hvc);
208 }
else if (pte_newprot(*pte))
209 ret = add_mprotect(addr,
PAGE_SIZE, prot, hvc);
210 *pte = pte_mkuptodate(*pte);
211 }
while (pte++, addr +=
PAGE_SIZE, ((addr < end) && !ret));
215 static inline int update_pmd_range(
pud_t *pud,
unsigned long addr,
228 ret = add_munmap(addr, next - addr, hvc);
232 else ret = update_pte_range(pmd, addr, next, hvc);
233 }
while (pmd++, addr = next, ((addr < end) && !ret));
237 static inline int update_pud_range(
pgd_t *pgd,
unsigned long addr,
250 ret = add_munmap(addr, next - addr, hvc);
254 else ret = update_pmd_range(pud, addr, next, hvc);
255 }
while (pud++, addr = next, ((addr < end) && !ret));
260 unsigned long end_addr,
int force)
270 next = pgd_addr_end(addr, end_addr);
272 if (force || pgd_newpage(*pgd)) {
273 ret = add_munmap(addr, next - addr, &hvc);
274 pgd_mkuptodate(*pgd);
277 else ret = update_pud_range(pgd, addr, next, &hvc);
278 }
while (pgd++, addr = next, ((addr < end_addr) && !ret));
281 ret = do_ops(&hvc, hvc.
index, 1);
291 static int flush_tlb_kernel_range_common(
unsigned long start,
unsigned long end)
298 unsigned long addr, last;
299 int updated = 0,
err;
302 for (addr = start; addr <
end;) {
308 if (pgd_newpage(*pgd)) {
313 panic(
"munmap failed, errno = %d\n",
330 panic(
"munmap failed, errno = %d\n",
347 panic(
"munmap failed, errno = %d\n",
360 panic(
"munmap failed, errno = %d\n",
367 else if (pte_newprot(*pte)) {
384 int r,
w,
x, prot,
err = 0;
413 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
414 (x ? UM_PROT_EXEC : 0));
415 if (pte_newpage(*pte)) {
417 unsigned long long offset;
426 else if (pte_newprot(*pte))
432 *pte = pte_mkuptodate(*pte);
437 printk(
KERN_ERR "Failed to flush page for address 0x%lx\n", address);
477 flush_tlb_kernel_range_common(start, end);
487 flush_tlb_kernel_range_common(addr, addr +
PAGE_SIZE);
491 unsigned long end_addr,
int force)
500 flush_tlb_kernel_range_common(start, end);
501 else fix_range(vma->
vm_mm, start, end, 0);
515 fix_range(mm, start, end, 0);
522 while (vma !=
NULL) {
533 while (vma !=
NULL) {