14 #include <linux/kernel.h>
17 #include <linux/module.h>
20 #include <linux/ptrace.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
24 #include <asm/cacheflush.h>
26 #include <asm/pgtable.h>
27 #include <asm/tlbflush.h>
36 #ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT
37 # define KMEMCHECK_ENABLED 0
40 #ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT
41 # define KMEMCHECK_ENABLED 1
44 #ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT
45 # define KMEMCHECK_ENABLED 2
59 "kmemcheck: Limiting number of CPUs to 1.\n");
79 static int __init param_kmemcheck(
char *
str)
143 static void kmemcheck_save_addr(
unsigned long addr)
151 static unsigned int kmemcheck_show_all(
void)
164 static unsigned int kmemcheck_hide_all(
void)
187 kmemcheck_show_all();
197 if (kmemcheck_show_all() == 0)
229 kmemcheck_show_all();
242 n = kmemcheck_hide_all();
244 n = kmemcheck_show_all();
263 for (i = 0; i <
n; ++
i) {
289 for (i = 0; i <
n; ++
i) {
306 static void kmemcheck_read_strict(
struct pt_regs *
regs,
307 unsigned long addr,
unsigned int size)
316 kmemcheck_save_addr(addr);
346 static void kmemcheck_read(
struct pt_regs *regs,
347 unsigned long addr,
unsigned int size)
350 unsigned long next_addr = addr + size - 1;
351 unsigned long next_page = next_addr &
PAGE_MASK;
353 if (
likely(page == next_page)) {
354 kmemcheck_read_strict(regs, addr, size);
365 kmemcheck_read_strict(regs, addr, next_page - addr);
366 kmemcheck_read_strict(regs, next_page, next_addr - next_page);
369 static void kmemcheck_write_strict(
struct pt_regs *regs,
370 unsigned long addr,
unsigned int size)
378 kmemcheck_save_addr(addr);
382 static void kmemcheck_write(
struct pt_regs *regs,
383 unsigned long addr,
unsigned int size)
386 unsigned long next_addr = addr + size - 1;
387 unsigned long next_page = next_addr &
PAGE_MASK;
389 if (
likely(page == next_page)) {
390 kmemcheck_write_strict(regs, addr, size);
395 kmemcheck_write_strict(regs, addr, next_page - addr);
396 kmemcheck_write_strict(regs, next_page, next_addr - next_page);
403 static void kmemcheck_copy(
struct pt_regs *regs,
410 unsigned long next_addr;
411 unsigned long next_page;
417 BUG_ON(size >
sizeof(shadow));
420 next_addr = src_addr + size - 1;
423 if (
likely(page == next_page)) {
427 kmemcheck_save_addr(src_addr);
428 for (i = 0; i <
size; ++
i)
431 for (i = 0; i <
size; ++
i)
436 BUG_ON(n >
sizeof(shadow));
441 kmemcheck_save_addr(src_addr);
442 for (i = 0; i <
n; ++
i)
446 for (i = 0; i <
n; ++
i)
453 kmemcheck_save_addr(next_page);
454 for (i = n; i <
size; ++
i)
455 shadow[i] = x[i - n];
458 for (i = n; i <
size; ++
i)
464 next_addr = dst_addr + size - 1;
467 if (
likely(page == next_page)) {
471 kmemcheck_save_addr(dst_addr);
472 for (i = 0; i <
size; ++
i) {
479 BUG_ON(n >
sizeof(shadow));
484 kmemcheck_save_addr(dst_addr);
485 for (i = 0; i <
n; ++
i) {
494 kmemcheck_save_addr(next_page);
495 for (i = n; i <
size; ++
i) {
496 x[i -
n] = shadow[
i];
518 static void kmemcheck_access(
struct pt_regs *regs,
536 insn = (
const uint8_t *) regs->ip;
541 switch (insn_primary[0]) {
542 #ifdef CONFIG_KMEMCHECK_BITOPS_OK
553 switch ((insn_primary[1] >> 3) & 7) {
560 kmemcheck_write(regs, fallback_address, size);
585 kmemcheck_copy(regs, regs->si, regs->di, size);
591 kmemcheck_read(regs, regs->si, size);
592 kmemcheck_read(regs, regs->di, size);
601 switch (fallback_method) {
603 kmemcheck_read(regs, fallback_address, size);
606 kmemcheck_write(regs, fallback_address, size);
625 if (regs->
flags & X86_VM_MASK)