64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
67 #include <linux/kernel.h>
68 #include <linux/list.h>
69 #include <linux/sched.h>
72 #include <linux/export.h>
74 #include <linux/rbtree.h>
87 #include <linux/slab.h>
91 #include <linux/string.h>
97 #include <asm/sections.h>
98 #include <asm/processor.h>
109 #define MSECS_MIN_AGE 5000
110 #define SECS_FIRST_SCAN 60
111 #define SECS_SCAN_WAIT 600
112 #define MAX_SCAN_SIZE 4096
114 #define BYTES_PER_POINTER sizeof(void *)
117 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
118 __GFP_NORETRY | __GFP_NOMEMALLOC | \
128 #define KMEMLEAK_GREY 0
129 #define KMEMLEAK_BLACK -1
166 #define OBJECT_ALLOCATED (1 << 0)
168 #define OBJECT_REPORTED (1 << 1)
170 #define OBJECT_NO_SCAN (1 << 2)
173 #define HEX_ROW_SIZE 16
175 #define HEX_GROUP_SIZE 1
179 #define HEX_MAX_LINES 2
206 static unsigned long min_addr =
ULONG_MAX;
207 static unsigned long max_addr;
211 static unsigned long jiffies_min_age;
212 static unsigned long jiffies_last_scan;
214 static signed long jiffies_scan_wait;
216 static int kmemleak_stack_scan = 1;
220 static int kmemleak_skip_disable;
262 static void kmemleak_disable(
void);
267 #define kmemleak_warn(x...) do { \
270 atomic_set(&kmemleak_warning, 1); \
278 #define kmemleak_stop(x...) do { \
280 kmemleak_disable(); \
289 static void hex_dump_object(
struct seq_file *seq,
293 int i,
len, remaining;
325 object->count <
object->min_count;
331 object->count >=
object->min_count;
350 static void print_unreferenced(
struct seq_file *seq,
356 seq_printf(seq,
"unreferenced object 0x%08lx (size %zu):\n",
358 seq_printf(seq,
" comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
360 msecs_age / 1000, msecs_age % 1000);
361 hex_dump_object(seq,
object);
364 for (i = 0; i <
object->trace_len; i++) {
365 void *
ptr = (
void *)object->
trace[i];
377 struct stack_trace
trace;
379 trace.nr_entries =
object->trace_len;
380 trace.entries =
object->trace;
382 pr_notice(
"Object 0x%08lx (size %zu):\n",
384 pr_notice(
" comm \"%s\", pid %d, jiffies %lu\n",
411 else if (object->
pointer == ptr || alias)
416 dump_object_info(
object);
476 static struct kmemleak_object *find_and_get_object(
unsigned long ptr,
int alias)
483 if (ptr >= min_addr && ptr < max_addr)
484 object = lookup_object(ptr, alias);
488 if (
object && !get_object(
object))
498 static int __save_stack_trace(
unsigned long *
trace)
500 struct stack_trace stack_trace;
503 stack_trace.nr_entries = 0;
504 stack_trace.entries =
trace;
505 stack_trace.skip = 2;
508 return stack_trace.nr_entries;
524 pr_warning(
"Cannot allocate a kmemleak_object structure\n");
535 object->pointer =
ptr;
537 object->min_count = min_count;
540 object->checksum = 0;
561 object->trace_len = __save_stack_trace(object->
trace);
565 min_addr =
min(min_addr, ptr);
566 max_addr =
max(max_addr, ptr + size);
567 link = &object_tree_root.
rb_node;
572 if (ptr + size <= parent->pointer)
573 link = &parent->
rb_node.rb_left;
575 link = &parent->
rb_node.rb_right;
578 "search tree (overlaps existing)\n",
582 spin_lock(&object->
lock);
583 dump_object_info(
object);
584 spin_unlock(&object->
lock);
588 rb_link_node(&object->
rb_node, rb_parent, link);
591 list_add_tail_rcu(&object->
object_list, &object_list);
619 spin_unlock_irqrestore(&object->
lock, flags);
627 static void delete_object_full(
unsigned long ptr)
631 object = find_and_get_object(ptr, 0);
639 __delete_object(
object);
648 static void delete_object_part(
unsigned long ptr,
size_t size)
653 object = find_and_get_object(ptr, 1);
657 "(size %zu)\n", ptr, size);
661 __delete_object(
object);
670 start =
object->pointer;
671 end =
object->pointer +
object->size;
673 create_object(start, ptr - start, object->
min_count,
675 if (ptr + size < end)
676 create_object(ptr + size, end - ptr - size, object->
min_count,
684 object->min_count =
color;
694 __paint_it(
object, color);
695 spin_unlock_irqrestore(&object->
lock, flags);
698 static void paint_ptr(
unsigned long ptr,
int color)
702 object = find_and_get_object(ptr, 0);
705 "at 0x%08lx as %s\n", ptr,
710 paint_it(
object, color);
718 static void make_gray_object(
unsigned long ptr)
727 static void make_black_object(
unsigned long ptr)
736 static void add_scan_area(
unsigned long ptr,
size_t size,
gfp_t gfp)
742 object = find_and_get_object(ptr, 1);
744 kmemleak_warn(
"Adding scan area to unknown object at 0x%08lx\n",
757 kmemleak_warn(
"Scan area larger than object 0x%08lx\n", ptr);
758 dump_object_info(
object);
763 INIT_HLIST_NODE(&area->
node);
769 spin_unlock_irqrestore(&object->
lock, flags);
779 static void object_no_scan(
unsigned long ptr)
784 object = find_and_get_object(ptr, 0);
786 kmemleak_warn(
"Not scanning unknown object at 0x%08lx\n", ptr);
792 spin_unlock_irqrestore(&object->
lock, flags);
800 static void __init log_early(
int op_type,
const void *ptr,
size_t size,
835 static void early_alloc(
struct early_log *log)
848 object = create_object((
unsigned long)log->
ptr, log->
size,
856 spin_unlock_irqrestore(&object->
lock, flags);
864 static void early_alloc_percpu(
struct early_log *log)
892 pr_debug(
"%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
894 if (
atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
895 create_object((
unsigned long)ptr, size, min_count, gfp);
914 pr_debug(
"%s(0x%p, %zu)\n", __func__, ptr, size);
920 if (
atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
922 create_object((
unsigned long)
per_cpu_ptr(ptr, cpu),
938 pr_debug(
"%s(0x%p)\n", __func__, ptr);
940 if (
atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
941 delete_object_full((
unsigned long)ptr);
958 pr_debug(
"%s(0x%p)\n", __func__, ptr);
960 if (
atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
961 delete_object_part((
unsigned long)ptr, size);
978 pr_debug(
"%s(0x%p)\n", __func__, ptr);
980 if (
atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
998 pr_debug(
"%s(0x%p)\n", __func__, ptr);
1000 if (
atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
1001 make_gray_object((
unsigned long)ptr);
1018 pr_debug(
"%s(0x%p)\n", __func__, ptr);
1020 if (
atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
1021 make_black_object((
unsigned long)ptr);
1040 pr_debug(
"%s(0x%p)\n", __func__, ptr);
1042 if (
atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr))
1043 add_scan_area((
unsigned long)ptr, size, gfp);
1060 pr_debug(
"%s(0x%p)\n", __func__, ptr);
1062 if (
atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
1063 object_no_scan((
unsigned long)ptr);
1074 u32 old_csum =
object->checksum;
1080 return object->checksum != old_csum;
1087 static int scan_should_stop(
void)
1097 return signal_pending(
current);
1108 static void scan_block(
void *_start,
void *_end,
1115 for (ptr = start; ptr <
end; ptr++) {
1117 unsigned long flags;
1122 if (scan_should_stop())
1132 object = find_and_get_object(pointer, 1);
1135 if (
object == scanned) {
1148 if (!color_white(
object)) {
1150 spin_unlock_irqrestore(&object->
lock, flags);
1162 if (color_gray(
object)) {
1164 spin_unlock_irqrestore(&object->
lock, flags);
1168 spin_unlock_irqrestore(&object->
lock, flags);
1181 unsigned long flags;
1194 void *start = (
void *)object->
pointer;
1195 void *end = (
void *)(
object->pointer +
object->size);
1203 spin_unlock_irqrestore(&object->
lock, flags);
1209 scan_block((
void *)area->start,
1210 (
void *)(area->start + area->size),
1213 spin_unlock_irqrestore(&
object->lock, flags);
1220 static
void scan_gray_list(
void)
1234 if (!scan_should_stop())
1235 scan_object(
object);
1254 static void kmemleak_scan(
void)
1256 unsigned long flags;
1273 pr_debug(
"object->use_count = %d\n",
1275 dump_object_info(
object);
1280 if (color_gray(
object) && get_object(
object))
1283 spin_unlock_irqrestore(&object->
lock, flags);
1288 scan_block(_sdata, _edata,
NULL, 1);
1289 scan_block(__bss_start, __bss_stop,
NULL, 1);
1294 scan_block(__per_cpu_start + per_cpu_offset(i),
1295 __per_cpu_end + per_cpu_offset(i),
NULL, 1);
1308 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1315 if (page_count(page) == 0)
1317 scan_block(page, page + 1, NULL, 1);
1325 if (kmemleak_stack_scan) {
1347 list_for_each_entry_rcu(
object, &object_list, object_list) {
1350 && update_checksum(
object) && get_object(
object)) {
1352 object->count =
object->min_count;
1355 spin_unlock_irqrestore(&object->
lock, flags);
1367 if (scan_should_stop())
1374 list_for_each_entry_rcu(
object, &object_list, object_list) {
1376 if (unreferenced_object(
object) &&
1381 spin_unlock_irqrestore(&object->
lock, flags);
1386 pr_info(
"%d new suspected memory leaks (see "
1387 "/sys/kernel/debug/kmemleak)\n", new_leaks);
1395 static int kmemleak_scan_thread(
void *
arg)
1397 static int first_run = 1;
1399 pr_info(
"Automatic memory scanning thread started\n");
1411 signed long timeout = jiffies_scan_wait;
1422 pr_info(
"Automatic memory scanning thread ended\n");
1431 static void start_scan_thread(
void)
1435 scan_thread =
kthread_run(kmemleak_scan_thread, NULL,
"kmemleak");
1436 if (IS_ERR(scan_thread)) {
1437 pr_warning(
"Failed to create the scan thread\n");
1446 static void stop_scan_thread(
void)
1459 static void *kmemleak_seq_start(
struct seq_file *seq, loff_t *
pos)
1467 return ERR_PTR(err);
1473 if (get_object(
object))
1485 static void *kmemleak_seq_next(
struct seq_file *seq,
void *
v, loff_t *pos)
1494 if (get_object(obj)) {
1500 put_object(prev_obj);
1507 static void kmemleak_seq_stop(
struct seq_file *seq,
void *v)
1524 static int kmemleak_seq_show(
struct seq_file *seq,
void *v)
1527 unsigned long flags;
1531 print_unreferenced(seq,
object);
1532 spin_unlock_irqrestore(&object->
lock, flags);
1537 .start = kmemleak_seq_start,
1538 .next = kmemleak_seq_next,
1539 .stop = kmemleak_seq_stop,
1540 .show = kmemleak_seq_show,
1545 return seq_open(file, &kmemleak_seq_ops);
1553 static int dump_str_object_info(
const char *
str)
1555 unsigned long flags;
1560 object = find_and_get_object(addr, 0);
1562 pr_info(
"Unknown object at 0x%08lx\n", addr);
1567 dump_object_info(
object);
1568 spin_unlock_irqrestore(&object->
lock, flags);
1580 static void kmemleak_clear(
void)
1583 unsigned long flags;
1589 unreferenced_object(
object))
1591 spin_unlock_irqrestore(&object->
lock, flags);
1611 static ssize_t kmemleak_write(
struct file *
file,
const char __user *user_buf,
1612 size_t size, loff_t *ppos)
1621 buf_size =
min(size, (
sizeof(buf) - 1));
1630 if (
strncmp(buf,
"off", 3) == 0)
1632 else if (
strncmp(buf,
"stack=on", 8) == 0)
1633 kmemleak_stack_scan = 1;
1634 else if (
strncmp(buf,
"stack=off", 9) == 0)
1635 kmemleak_stack_scan = 0;
1636 else if (
strncmp(buf,
"scan=on", 7) == 0)
1637 start_scan_thread();
1638 else if (
strncmp(buf,
"scan=off", 8) == 0)
1640 else if (
strncmp(buf,
"scan=", 5) == 0) {
1649 start_scan_thread();
1651 }
else if (
strncmp(buf,
"scan", 4) == 0)
1653 else if (
strncmp(buf,
"clear", 5) == 0)
1655 else if (
strncmp(buf,
"dump=", 5) == 0)
1656 ret = dump_str_object_info(buf + 5);
1672 .open = kmemleak_open,
1674 .write = kmemleak_write,
1676 .release = kmemleak_release,
1687 bool cleanup = scan_thread ==
NULL;
1695 delete_object_full(
object->pointer);
1701 static
DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1707 static
void kmemleak_disable(
void)
1720 pr_info(
"Kernel memory leak detector disabled\n");
1726 static int kmemleak_boot_config(
char *str)
1730 if (
strcmp(str,
"off") == 0)
1732 else if (
strcmp(str,
"on") == 0)
1733 kmemleak_skip_disable = 1;
1742 struct stack_trace
trace;
1757 unsigned long flags;
1759 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1760 if (!kmemleak_skip_disable) {
1774 pr_warning(
"Early log buffer exceeded (%d), please increase "
1775 "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
1792 for (i = 0; i < crt_early_log; i++) {
1800 early_alloc_percpu(log);
1829 print_log_trace(log);
1838 static int __init kmemleak_late_init(
void)
1858 pr_warning(
"Failed to create the debugfs kmemleak file\n");
1860 start_scan_thread();
1863 pr_info(
"Kernel memory leak detector initialized\n");