diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 1eddc0132f7f56..34134e5e8253fe 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -102,6 +102,8 @@ #include #include #include +#include +#include /* * Kmemleak configuration and common defines. @@ -143,6 +145,7 @@ struct kmemleak_object { unsigned int flags; /* object status flags */ struct list_head object_list; struct list_head gray_list; + struct list_head report_list; struct rb_node rb_node; struct rcu_head rcu; /* object_list lockless traversal */ /* object usage count; object freed when use_count == 0 */ @@ -161,11 +164,18 @@ struct kmemleak_object { struct hlist_head area_list; unsigned long trace[MAX_TRACE]; unsigned int trace_len; + u32 trace_hash; unsigned long jiffies; /* creation timestamp */ pid_t pid; /* pid of the current task */ char comm[TASK_COMM_LEN]; /* executable name */ }; +struct kmemleak_report { + u32 trace_hash; + struct list_head report_list_head; + struct rb_node rb_node_report; + struct list_head *report_pos; +}; /* flag representing the memory block allocation status */ #define OBJECT_ALLOCATED (1 << 0) /* flag set after the first reporting of an unreference object */ @@ -195,10 +205,13 @@ static LIST_HEAD(gray_list); static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE]; static int mem_pool_free_count = ARRAY_SIZE(mem_pool); static LIST_HEAD(mem_pool_free_list); +static struct kmemleak_report kr_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE]; +static int kr_pool_free_count = ARRAY_SIZE(kr_pool); /* search tree for object boundaries */ static struct rb_root object_tree_root = RB_ROOT; /* search tree for object (with OBJECT_PHYS flag) boundaries */ static struct rb_root object_phys_tree_root = RB_ROOT; +static struct rb_root object_report_tree_root = RB_ROOT; /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */ static DEFINE_RAW_SPINLOCK(kmemleak_lock); @@ -241,6 +254,7 @@ module_param_named(verbose, kmemleak_verbose, bool, 0600); static void kmemleak_disable(void); +static u32 trace_seed; /* * Print a warning and dump the stack trace. */ @@ -423,6 +437,46 @@ static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) return __lookup_object(ptr, alias, false); } +void print_unreferenced_object_list(struct seq_file *seq) +{ + unsigned long flags; + struct kmemleak_object *object; + struct kmemleak_report *object_report; + int i; + struct rb_node *rb; + bool reported_flag = false; + + rcu_read_lock(); + raw_spin_lock_irqsave(&kmemleak_lock, flags); + for (rb = rb_first(&object_report_tree_root); rb; rb = rb_next(rb)) { + object_report = rb_entry(rb, struct kmemleak_report, rb_node_report); + if (object_report && !list_empty(&object_report->report_list_head)) { + list_for_each_entry_rcu(object, &object_report->report_list_head, report_list) { + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); + raw_spin_lock_irqsave(&kmemleak_lock, flags); + if (unreferenced_object(object) && + (object->flags & OBJECT_REPORTED)) { + reported_flag = true; + warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", + object->pointer, object->size); + warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", + object->comm, object->pid, object->jiffies); + } + } + if (reported_flag) { + warn_or_seq_printf(seq, " backtrace:\n"); + object = list_prev_entry(object, report_list); + for (i = 0; i < object->trace_len; i++) { + void *ptr = (void *)object->trace[i]; + warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); + } + reported_flag = false; + } + } + } + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); + rcu_read_unlock(); +} /* * Increment the object use_count. Return 1 if successful or 0 otherwise. Note * that once an object's use_count reached 0, the RCU freeing was already @@ -568,6 +622,7 @@ static void __remove_object(struct kmemleak_object *object) &object_phys_tree_root : &object_tree_root); list_del_rcu(&object->object_list); + list_del_rcu(&object->report_list); } /* @@ -610,9 +665,11 @@ static struct kmemleak_object *__create_object(unsigned long ptr, size_t size, { unsigned long flags; struct kmemleak_object *object, *parent; + struct kmemleak_report *report, *report_parent; struct rb_node **link, *rb_parent; unsigned long untagged_ptr; unsigned long untagged_objp; + u32 trace_hash; object = mem_pool_alloc(gfp); if (!object) { @@ -623,6 +680,7 @@ static struct kmemleak_object *__create_object(unsigned long ptr, size_t size, INIT_LIST_HEAD(&object->object_list); INIT_LIST_HEAD(&object->gray_list); + INIT_LIST_HEAD(&object->report_list); INIT_HLIST_HEAD(&object->area_list); raw_spin_lock_init(&object->lock); atomic_set(&object->use_count, 1); @@ -655,6 +713,7 @@ static struct kmemleak_object *__create_object(unsigned long ptr, size_t size, /* kernel backtrace */ object->trace_len = __save_stack_trace(object->trace); + object->trace_hash = jhash2((const u32 *)object->trace, object->trace_len / sizeof(u32), trace_seed); raw_spin_lock_irqsave(&kmemleak_lock, flags); @@ -694,8 +753,42 @@ static struct kmemleak_object *__create_object(unsigned long ptr, size_t size, rb_link_node(&object->rb_node, rb_parent, link); rb_insert_color(&object->rb_node, is_phys ? &object_phys_tree_root : &object_tree_root); - list_add_tail_rcu(&object->object_list, &object_list); + + raw_spin_lock_irqsave(&kmemleak_lock, flags); + link = &object_report_tree_root.rb_node; + rb_parent = NULL; + while (*link) { + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); + raw_spin_lock_irqsave(&kmemleak_lock, flags); + rb_parent = *link; + report_parent = rb_entry(rb_parent, struct kmemleak_report, rb_node_report); + trace_hash = report_parent->trace_hash; + if (object->trace_hash < trace_hash) + link = &report_parent->rb_node_report.rb_left; + else if (trace_hash < object->trace_hash) + link = &report_parent->rb_node_report.rb_right; + else { + list_add_tail_rcu(&object->report_list, &report_parent->report_list_head); + goto out; + } + } + report = kr_pool_free_count ? &kr_pool[--kr_pool_free_count] : NULL; + if (!report) + goto out; + report->trace_hash = object->trace_hash; + /* + * report is the 1st node represent this trace_hash, init its list_head and + * insert it to object_report_tree_root + */ + INIT_LIST_HEAD(&report->report_list_head); + /* record 1st object to the list*/ + list_add_tail_rcu(&object->report_list, &report->report_list_head); + /* initialize the report_pos as report_list_head*/ + report->report_pos = &report->report_list_head; + /* add the node to rb tree*/ + rb_link_node(&report->rb_node_report, rb_parent, link); + rb_insert_color(&report->rb_node_report, &object_report_tree_root); out: raw_spin_unlock_irqrestore(&kmemleak_lock, flags); return object; @@ -1947,6 +2040,8 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, kmemleak_scan(); else if (strncmp(buf, "dump=", 5) == 0) ret = dump_str_object_info(buf + 5); + else if (strncmp(buf, "report", 6) == 0) + print_unreferenced_object_list(NULL); else ret = -EINVAL; @@ -2080,6 +2175,7 @@ void __init kmemleak_init(void) create_object((unsigned long)__start_ro_after_init, __end_ro_after_init - __start_ro_after_init, KMEMLEAK_GREY, GFP_ATOMIC); + get_random_bytes(&trace_seed, 4); } /*