/kernel/lockdep.c
C | 1997 lines | 1285 code | 307 blank | 405 comment | 178 complexity | 72ba95d84312124f7f063190a3a6e0e6 MD5 | raw file
- /*
- * kernel/lockdep.c
- *
- * Runtime locking correctness validator
- *
- * Started by Ingo Molnar:
- *
- * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
- *
- * this code maps all the lock dependencies as they occur in a live kernel
- * and will warn about the following classes of locking bugs:
- *
- * - lock inversion scenarios
- * - circular lock dependencies
- * - hardirq/softirq safe/unsafe locking bugs
- *
- * Bugs are reported even if the current locking scenario does not cause
- * any deadlock at this point.
- *
- * I.e. if anytime in the past two locks were taken in a different order,
- * even if it happened for another task, even if those were different
- * locks (but of the same class as this lock), this code will detect it.
- *
- * Thanks to Arjan van de Ven for coming up with the initial idea of
- * mapping lock dependencies runtime.
- */
- #define DISABLE_BRANCH_PROFILING
- #include <linux/mutex.h>
- #include <linux/sched.h>
- #include <linux/delay.h>
- #include <linux/module.h>
- #include <linux/proc_fs.h>
- #include <linux/seq_file.h>
- #include <linux/spinlock.h>
- #include <linux/kallsyms.h>
- #include <linux/interrupt.h>
- #include <linux/stacktrace.h>
- #include <linux/debug_locks.h>
- #include <linux/irqflags.h>
- #include <linux/utsname.h>
- #include <linux/hash.h>
- #include <linux/ftrace.h>
- #include <linux/stringify.h>
- #include <linux/bitops.h>
- #include <linux/gfp.h>
- #include <asm/sections.h>
- #include "lockdep_internals.h"
- #define CREATE_TRACE_POINTS
- #include <trace/events/lock.h>
- #ifdef CONFIG_PROVE_LOCKING
- int prove_locking = 1;
- module_param(prove_locking, int, 0644);
- #else
- #define prove_locking 0
- #endif
- #ifdef CONFIG_LOCK_STAT
- int lock_stat = 1;
- module_param(lock_stat, int, 0644);
- #else
- #define lock_stat 0
- #endif
- /*
- * lockdep_lock: protects the lockdep graph, the hashes and the
- * class/list/hash allocators.
- *
- * This is one of the rare exceptions where it's justified
- * to use a raw spinlock - we really dont want the spinlock
- * code to recurse back into the lockdep code...
- */
- static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
- static int graph_lock(void)
- {
- arch_spin_lock(&lockdep_lock);
- /*
- * Make sure that if another CPU detected a bug while
- * walking the graph we dont change it (while the other
- * CPU is busy printing out stuff with the graph lock
- * dropped already)
- */
- if (!debug_locks) {
- arch_spin_unlock(&lockdep_lock);
- return 0;
- }
- /* prevent any recursions within lockdep from causing deadlocks */
- current->lockdep_recursion++;
- return 1;
- }
- static inline int graph_unlock(void)
- {
- if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
- return DEBUG_LOCKS_WARN_ON(1);
- current->lockdep_recursion--;
- arch_spin_unlock(&lockdep_lock);
- return 0;
- }
- /*
- * Turn lock debugging off and return with 0 if it was off already,
- * and also release the graph lock:
- */
- static inline int debug_locks_off_graph_unlock(void)
- {
- int ret = debug_locks_off();
- arch_spin_unlock(&lockdep_lock);
- return ret;
- }
- static int lockdep_initialized;
- unsigned long nr_list_entries;
- static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
- /*
- * All data structures here are protected by the global debug_lock.
- *
- * Mutex key structs only get allocated, once during bootup, and never
- * get freed - this significantly simplifies the debugging code.
- */
- unsigned long nr_lock_classes;
- static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
- static inline struct lock_class *hlock_class(struct held_lock *hlock)
- {
- if (!hlock->class_idx) {
- DEBUG_LOCKS_WARN_ON(1);
- return NULL;
- }
- return lock_classes + hlock->class_idx - 1;
- }
- #ifdef CONFIG_LOCK_STAT
- static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
- cpu_lock_stats);
- static inline u64 lockstat_clock(void)
- {
- return local_clock();
- }
- static int lock_point(unsigned long points[], unsigned long ip)
- {
- int i;
- for (i = 0; i < LOCKSTAT_POINTS; i++) {
- if (points[i] == 0) {
- points[i] = ip;
- break;
- }
- if (points[i] == ip)
- break;
- }
- return i;
- }
- static void lock_time_inc(struct lock_time *lt, u64 time)
- {
- if (time > lt->max)
- lt->max = time;
- if (time < lt->min || !lt->nr)
- lt->min = time;
- lt->total += time;
- lt->nr++;
- }
- static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
- {
- if (!src->nr)
- return;
- if (src->max > dst->max)
- dst->max = src->max;
- if (src->min < dst->min || !dst->nr)
- dst->min = src->min;
- dst->total += src->total;
- dst->nr += src->nr;
- }
- struct lock_class_stats lock_stats(struct lock_class *class)
- {
- struct lock_class_stats stats;
- int cpu, i;
- memset(&stats, 0, sizeof(struct lock_class_stats));
- for_each_possible_cpu(cpu) {
- struct lock_class_stats *pcs =
- &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
- for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
- stats.contention_point[i] += pcs->contention_point[i];
- for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
- stats.contending_point[i] += pcs->contending_point[i];
- lock_time_add(&pcs->read_waittime, &stats.read_waittime);
- lock_time_add(&pcs->write_waittime, &stats.write_waittime);
- lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
- lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
- for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
- stats.bounces[i] += pcs->bounces[i];
- }
- return stats;
- }
- void clear_lock_stats(struct lock_class *class)
- {
- int cpu;
- for_each_possible_cpu(cpu) {
- struct lock_class_stats *cpu_stats =
- &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
- memset(cpu_stats, 0, sizeof(struct lock_class_stats));
- }
- memset(class->contention_point, 0, sizeof(class->contention_point));
- memset(class->contending_point, 0, sizeof(class->contending_point));
- }
- static struct lock_class_stats *get_lock_stats(struct lock_class *class)
- {
- return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
- }
- static void put_lock_stats(struct lock_class_stats *stats)
- {
- put_cpu_var(cpu_lock_stats);
- }
- static void lock_release_holdtime(struct held_lock *hlock)
- {
- struct lock_class_stats *stats;
- u64 holdtime;
- if (!lock_stat)
- return;
- holdtime = lockstat_clock() - hlock->holdtime_stamp;
- stats = get_lock_stats(hlock_class(hlock));
- if (hlock->read)
- lock_time_inc(&stats->read_holdtime, holdtime);
- else
- lock_time_inc(&stats->write_holdtime, holdtime);
- put_lock_stats(stats);
- }
- #else
- static inline void lock_release_holdtime(struct held_lock *hlock)
- {
- }
- #endif
- /*
- * We keep a global list of all lock classes. The list only grows,
- * never shrinks. The list is only accessed with the lockdep
- * spinlock lock held.
- */
- LIST_HEAD(all_lock_classes);
- /*
- * The lockdep classes are in a hash-table as well, for fast lookup:
- */
- #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
- #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
- #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
- #define classhashentry(key) (classhash_table + __classhashfn((key)))
- static struct list_head classhash_table[CLASSHASH_SIZE];
- /*
- * We put the lock dependency chains into a hash-table as well, to cache
- * their existence:
- */
- #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
- #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
- #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
- #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
- static struct list_head chainhash_table[CHAINHASH_SIZE];
- /*
- * The hash key of the lock dependency chains is a hash itself too:
- * it's a hash of all locks taken up to that lock, including that lock.
- * It's a 64-bit hash, because it's important for the keys to be
- * unique.
- */
- #define iterate_chain_key(key1, key2) \
- (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
- ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
- (key2))
- void lockdep_off(void)
- {
- current->lockdep_recursion++;
- }
- EXPORT_SYMBOL(lockdep_off);
- void lockdep_on(void)
- {
- current->lockdep_recursion--;
- }
- EXPORT_SYMBOL(lockdep_on);
- /*
- * Debugging switches:
- */
- #define VERBOSE 0
- #define VERY_VERBOSE 0
- #if VERBOSE
- # define HARDIRQ_VERBOSE 1
- # define SOFTIRQ_VERBOSE 1
- # define RECLAIM_VERBOSE 1
- #else
- # define HARDIRQ_VERBOSE 0
- # define SOFTIRQ_VERBOSE 0
- # define RECLAIM_VERBOSE 0
- #endif
- #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
- /*
- * Quick filtering for interesting events:
- */
- static int class_filter(struct lock_class *class)
- {
- #if 0
- /* Example */
- if (class->name_version == 1 &&
- !strcmp(class->name, "lockname"))
- return 1;
- if (class->name_version == 1 &&
- !strcmp(class->name, "&struct->lockfield"))
- return 1;
- #endif
- /* Filter everything else. 1 would be to allow everything else */
- return 0;
- }
- #endif
- static int verbose(struct lock_class *class)
- {
- #if VERBOSE
- return class_filter(class);
- #endif
- return 0;
- }
- /*
- * Stack-trace: tightly packed array of stack backtrace
- * addresses. Protected by the graph_lock.
- */
- unsigned long nr_stack_trace_entries;
- static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
- static int save_trace(struct stack_trace *trace)
- {
- trace->nr_entries = 0;
- trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
- trace->entries = stack_trace + nr_stack_trace_entries;
- trace->skip = 3;
- save_stack_trace(trace);
- /*
- * Some daft arches put -1 at the end to indicate its a full trace.
- *
- * <rant> this is buggy anyway, since it takes a whole extra entry so a
- * complete trace that maxes out the entries provided will be reported
- * as incomplete, friggin useless </rant>
- */
- if (trace->nr_entries != 0 &&
- trace->entries[trace->nr_entries-1] == ULONG_MAX)
- trace->nr_entries--;
- trace->max_entries = trace->nr_entries;
- nr_stack_trace_entries += trace->nr_entries;
- if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
- if (!debug_locks_off_graph_unlock())
- return 0;
- printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
- printk("turning off the locking correctness validator.\n");
- dump_stack();
- return 0;
- }
- return 1;
- }
- unsigned int nr_hardirq_chains;
- unsigned int nr_softirq_chains;
- unsigned int nr_process_chains;
- unsigned int max_lockdep_depth;
- #ifdef CONFIG_DEBUG_LOCKDEP
- /*
- * We cannot printk in early bootup code. Not even early_printk()
- * might work. So we mark any initialization errors and printk
- * about it later on, in lockdep_info().
- */
- static int lockdep_init_error;
- static unsigned long lockdep_init_trace_data[20];
- static struct stack_trace lockdep_init_trace = {
- .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
- .entries = lockdep_init_trace_data,
- };
- /*
- * Various lockdep statistics:
- */
- DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
- #endif
- /*
- * Locking printouts:
- */
- #define __USAGE(__STATE) \
- [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
- [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
- [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
- [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
- static const char *usage_str[] =
- {
- #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
- #include "lockdep_states.h"
- #undef LOCKDEP_STATE
- [LOCK_USED] = "INITIAL USE",
- };
- const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
- {
- return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
- }
- static inline unsigned long lock_flag(enum lock_usage_bit bit)
- {
- return 1UL << bit;
- }
- static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
- {
- char c = '.';
- if (class->usage_mask & lock_flag(bit + 2))
- c = '+';
- if (class->usage_mask & lock_flag(bit)) {
- c = '-';
- if (class->usage_mask & lock_flag(bit + 2))
- c = '?';
- }
- return c;
- }
- void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
- {
- int i = 0;
- #define LOCKDEP_STATE(__STATE) \
- usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
- usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
- #include "lockdep_states.h"
- #undef LOCKDEP_STATE
- usage[i] = '\0';
- }
- static int __print_lock_name(struct lock_class *class)
- {
- char str[KSYM_NAME_LEN];
- const char *name;
- name = class->name;
- if (!name)
- name = __get_key_name(class->key, str);
- return printk("%s", name);
- }
- static void print_lock_name(struct lock_class *class)
- {
- char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
- const char *name;
- get_usage_chars(class, usage);
- name = class->name;
- if (!name) {
- name = __get_key_name(class->key, str);
- printk(" (%s", name);
- } else {
- printk(" (%s", name);
- if (class->name_version > 1)
- printk("#%d", class->name_version);
- if (class->subclass)
- printk("/%d", class->subclass);
- }
- printk("){%s}", usage);
- }
- static void print_lockdep_cache(struct lockdep_map *lock)
- {
- const char *name;
- char str[KSYM_NAME_LEN];
- name = lock->name;
- if (!name)
- name = __get_key_name(lock->key->subkeys, str);
- printk("%s", name);
- }
- static void print_lock(struct held_lock *hlock)
- {
- print_lock_name(hlock_class(hlock));
- printk(", at: ");
- print_ip_sym(hlock->acquire_ip);
- }
- static void lockdep_print_held_locks(struct task_struct *curr)
- {
- int i, depth = curr->lockdep_depth;
- if (!depth) {
- printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
- return;
- }
- printk("%d lock%s held by %s/%d:\n",
- depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
- for (i = 0; i < depth; i++) {
- printk(" #%d: ", i);
- print_lock(curr->held_locks + i);
- }
- }
- static void print_kernel_version(void)
- {
- printk("%s %.*s\n", init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
- }
- static int very_verbose(struct lock_class *class)
- {
- #if VERY_VERBOSE
- return class_filter(class);
- #endif
- return 0;
- }
- /*
- * Is this the address of a static object:
- */
- static int static_obj(void *obj)
- {
- unsigned long start = (unsigned long) &_stext,
- end = (unsigned long) &_end,
- addr = (unsigned long) obj;
- /*
- * static variable?
- */
- if ((addr >= start) && (addr < end))
- return 1;
- if (arch_is_kernel_data(addr))
- return 1;
- /*
- * in-kernel percpu var?
- */
- if (is_kernel_percpu_address(addr))
- return 1;
- /*
- * module static or percpu var?
- */
- return is_module_address(addr) || is_module_percpu_address(addr);
- }
- /*
- * To make lock name printouts unique, we calculate a unique
- * class->name_version generation counter:
- */
- static int count_matching_names(struct lock_class *new_class)
- {
- struct lock_class *class;
- int count = 0;
- if (!new_class->name)
- return 0;
- list_for_each_entry(class, &all_lock_classes, lock_entry) {
- if (new_class->key - new_class->subclass == class->key)
- return class->name_version;
- if (class->name && !strcmp(class->name, new_class->name))
- count = max(count, class->name_version);
- }
- return count + 1;
- }
- /*
- * Register a lock's class in the hash-table, if the class is not present
- * yet. Otherwise we look it up. We cache the result in the lock object
- * itself, so actual lookup of the hash should be once per lock object.
- */
- static inline struct lock_class *
- look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
- {
- struct lockdep_subclass_key *key;
- struct list_head *hash_head;
- struct lock_class *class;
- #ifdef CONFIG_DEBUG_LOCKDEP
- /*
- * If the architecture calls into lockdep before initializing
- * the hashes then we'll warn about it later. (we cannot printk
- * right now)
- */
- if (unlikely(!lockdep_initialized)) {
- lockdep_init();
- lockdep_init_error = 1;
- save_stack_trace(&lockdep_init_trace);
- }
- #endif
- if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
- debug_locks_off();
- printk(KERN_ERR
- "BUG: looking up invalid subclass: %u\n", subclass);
- printk(KERN_ERR
- "turning off the locking correctness validator.\n");
- dump_stack();
- return NULL;
- }
- /*
- * Static locks do not have their class-keys yet - for them the key
- * is the lock object itself:
- */
- if (unlikely(!lock->key))
- lock->key = (void *)lock;
- /*
- * NOTE: the class-key must be unique. For dynamic locks, a static
- * lock_class_key variable is passed in through the mutex_init()
- * (or spin_lock_init()) call - which acts as the key. For static
- * locks we use the lock object itself as the key.
- */
- BUILD_BUG_ON(sizeof(struct lock_class_key) >
- sizeof(struct lockdep_map));
- key = lock->key->subkeys + subclass;
- hash_head = classhashentry(key);
- /*
- * We can walk the hash lockfree, because the hash only
- * grows, and we are careful when adding entries to the end:
- */
- list_for_each_entry(class, hash_head, hash_entry) {
- if (class->key == key) {
- WARN_ON_ONCE(class->name != lock->name);
- return class;
- }
- }
- return NULL;
- }
- /*
- * Register a lock's class in the hash-table, if the class is not present
- * yet. Otherwise we look it up. We cache the result in the lock object
- * itself, so actual lookup of the hash should be once per lock object.
- */
- static inline struct lock_class *
- register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
- {
- struct lockdep_subclass_key *key;
- struct list_head *hash_head;
- struct lock_class *class;
- unsigned long flags;
- class = look_up_lock_class(lock, subclass);
- if (likely(class))
- return class;
- /*
- * Debug-check: all keys must be persistent!
- */
- if (!static_obj(lock->key)) {
- debug_locks_off();
- printk("INFO: trying to register non-static key.\n");
- printk("the code is fine but needs lockdep annotation.\n");
- printk("turning off the locking correctness validator.\n");
- dump_stack();
- return NULL;
- }
- key = lock->key->subkeys + subclass;
- hash_head = classhashentry(key);
- raw_local_irq_save(flags);
- if (!graph_lock()) {
- raw_local_irq_restore(flags);
- return NULL;
- }
- /*
- * We have to do the hash-walk again, to avoid races
- * with another CPU:
- */
- list_for_each_entry(class, hash_head, hash_entry)
- if (class->key == key)
- goto out_unlock_set;
- /*
- * Allocate a new key from the static array, and add it to
- * the hash:
- */
- if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
- if (!debug_locks_off_graph_unlock()) {
- raw_local_irq_restore(flags);
- return NULL;
- }
- raw_local_irq_restore(flags);
- printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
- printk("turning off the locking correctness validator.\n");
- dump_stack();
- return NULL;
- }
- class = lock_classes + nr_lock_classes++;
- debug_atomic_inc(nr_unused_locks);
- class->key = key;
- class->name = lock->name;
- class->subclass = subclass;
- INIT_LIST_HEAD(&class->lock_entry);
- INIT_LIST_HEAD(&class->locks_before);
- INIT_LIST_HEAD(&class->locks_after);
- class->name_version = count_matching_names(class);
- /*
- * We use RCU's safe list-add method to make
- * parallel walking of the hash-list safe:
- */
- list_add_tail_rcu(&class->hash_entry, hash_head);
- /*
- * Add it to the global list of classes:
- */
- list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
- if (verbose(class)) {
- graph_unlock();
- raw_local_irq_restore(flags);
- printk("\nnew class %p: %s", class->key, class->name);
- if (class->name_version > 1)
- printk("#%d", class->name_version);
- printk("\n");
- dump_stack();
- raw_local_irq_save(flags);
- if (!graph_lock()) {
- raw_local_irq_restore(flags);
- return NULL;
- }
- }
- out_unlock_set:
- graph_unlock();
- raw_local_irq_restore(flags);
- if (!subclass || force)
- lock->class_cache[0] = class;
- else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
- lock->class_cache[subclass] = class;
- if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
- return NULL;
- return class;
- }
- #ifdef CONFIG_PROVE_LOCKING
- /*
- * Allocate a lockdep entry. (assumes the graph_lock held, returns
- * with NULL on failure)
- */
- static struct lock_list *alloc_list_entry(void)
- {
- if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
- if (!debug_locks_off_graph_unlock())
- return NULL;
- printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
- printk("turning off the locking correctness validator.\n");
- dump_stack();
- return NULL;
- }
- return list_entries + nr_list_entries++;
- }
- /*
- * Add a new dependency to the head of the list:
- */
- static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
- struct list_head *head, unsigned long ip,
- int distance, struct stack_trace *trace)
- {
- struct lock_list *entry;
- /*
- * Lock not present yet - get a new dependency struct and
- * add it to the list:
- */
- entry = alloc_list_entry();
- if (!entry)
- return 0;
- entry->class = this;
- entry->distance = distance;
- entry->trace = *trace;
- /*
- * Since we never remove from the dependency list, the list can
- * be walked lockless by other CPUs, it's only allocation
- * that must be protected by the spinlock. But this also means
- * we must make new entries visible only once writes to the
- * entry become visible - hence the RCU op:
- */
- list_add_tail_rcu(&entry->entry, head);
- return 1;
- }
- /*
- * For good efficiency of modular, we use power of 2
- */
- #define MAX_CIRCULAR_QUEUE_SIZE 4096UL
- #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
- /*
- * The circular_queue and helpers is used to implement the
- * breadth-first search(BFS)algorithem, by which we can build
- * the shortest path from the next lock to be acquired to the
- * previous held lock if there is a circular between them.
- */
- struct circular_queue {
- unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
- unsigned int front, rear;
- };
- static struct circular_queue lock_cq;
- unsigned int max_bfs_queue_depth;
- static unsigned int lockdep_dependency_gen_id;
- static inline void __cq_init(struct circular_queue *cq)
- {
- cq->front = cq->rear = 0;
- lockdep_dependency_gen_id++;
- }
- static inline int __cq_empty(struct circular_queue *cq)
- {
- return (cq->front == cq->rear);
- }
- static inline int __cq_full(struct circular_queue *cq)
- {
- return ((cq->rear + 1) & CQ_MASK) == cq->front;
- }
- static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
- {
- if (__cq_full(cq))
- return -1;
- cq->element[cq->rear] = elem;
- cq->rear = (cq->rear + 1) & CQ_MASK;
- return 0;
- }
- static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
- {
- if (__cq_empty(cq))
- return -1;
- *elem = cq->element[cq->front];
- cq->front = (cq->front + 1) & CQ_MASK;
- return 0;
- }
- static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
- {
- return (cq->rear - cq->front) & CQ_MASK;
- }
- static inline void mark_lock_accessed(struct lock_list *lock,
- struct lock_list *parent)
- {
- unsigned long nr;
- nr = lock - list_entries;
- WARN_ON(nr >= nr_list_entries);
- lock->parent = parent;
- lock->class->dep_gen_id = lockdep_dependency_gen_id;
- }
- static inline unsigned long lock_accessed(struct lock_list *lock)
- {
- unsigned long nr;
- nr = lock - list_entries;
- WARN_ON(nr >= nr_list_entries);
- return lock->class->dep_gen_id == lockdep_dependency_gen_id;
- }
- static inline struct lock_list *get_lock_parent(struct lock_list *child)
- {
- return child->parent;
- }
- static inline int get_lock_depth(struct lock_list *child)
- {
- int depth = 0;
- struct lock_list *parent;
- while ((parent = get_lock_parent(child))) {
- child = parent;
- depth++;
- }
- return depth;
- }
- static int __bfs(struct lock_list *source_entry,
- void *data,
- int (*match)(struct lock_list *entry, void *data),
- struct lock_list **target_entry,
- int forward)
- {
- struct lock_list *entry;
- struct list_head *head;
- struct circular_queue *cq = &lock_cq;
- int ret = 1;
- if (match(source_entry, data)) {
- *target_entry = source_entry;
- ret = 0;
- goto exit;
- }
- if (forward)
- head = &source_entry->class->locks_after;
- else
- head = &source_entry->class->locks_before;
- if (list_empty(head))
- goto exit;
- __cq_init(cq);
- __cq_enqueue(cq, (unsigned long)source_entry);
- while (!__cq_empty(cq)) {
- struct lock_list *lock;
- __cq_dequeue(cq, (unsigned long *)&lock);
- if (!lock->class) {
- ret = -2;
- goto exit;
- }
- if (forward)
- head = &lock->class->locks_after;
- else
- head = &lock->class->locks_before;
- list_for_each_entry(entry, head, entry) {
- if (!lock_accessed(entry)) {
- unsigned int cq_depth;
- mark_lock_accessed(entry, lock);
- if (match(entry, data)) {
- *target_entry = entry;
- ret = 0;
- goto exit;
- }
- if (__cq_enqueue(cq, (unsigned long)entry)) {
- ret = -1;
- goto exit;
- }
- cq_depth = __cq_get_elem_count(cq);
- if (max_bfs_queue_depth < cq_depth)
- max_bfs_queue_depth = cq_depth;
- }
- }
- }
- exit:
- return ret;
- }
- static inline int __bfs_forwards(struct lock_list *src_entry,
- void *data,
- int (*match)(struct lock_list *entry, void *data),
- struct lock_list **target_entry)
- {
- return __bfs(src_entry, data, match, target_entry, 1);
- }
- static inline int __bfs_backwards(struct lock_list *src_entry,
- void *data,
- int (*match)(struct lock_list *entry, void *data),
- struct lock_list **target_entry)
- {
- return __bfs(src_entry, data, match, target_entry, 0);
- }
- /*
- * Recursive, forwards-direction lock-dependency checking, used for
- * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
- * checking.
- */
- /*
- * Print a dependency chain entry (this is only done when a deadlock
- * has been detected):
- */
- static noinline int
- print_circular_bug_entry(struct lock_list *target, int depth)
- {
- if (debug_locks_silent)
- return 0;
- printk("\n-> #%u", depth);
- print_lock_name(target->class);
- printk(":\n");
- print_stack_trace(&target->trace, 6);
- return 0;
- }
- static void
- print_circular_lock_scenario(struct held_lock *src,
- struct held_lock *tgt,
- struct lock_list *prt)
- {
- struct lock_class *source = hlock_class(src);
- struct lock_class *target = hlock_class(tgt);
- struct lock_class *parent = prt->class;
- /*
- * A direct locking problem where unsafe_class lock is taken
- * directly by safe_class lock, then all we need to show
- * is the deadlock scenario, as it is obvious that the
- * unsafe lock is taken under the safe lock.
- *
- * But if there is a chain instead, where the safe lock takes
- * an intermediate lock (middle_class) where this lock is
- * not the same as the safe lock, then the lock chain is
- * used to describe the problem. Otherwise we would need
- * to show a different CPU case for each link in the chain
- * from the safe_class lock to the unsafe_class lock.
- */
- if (parent != source) {
- printk("Chain exists of:\n ");
- __print_lock_name(source);
- printk(" --> ");
- __print_lock_name(parent);
- printk(" --> ");
- __print_lock_name(target);
- printk("\n\n");
- }
- printk(" Possible unsafe locking scenario:\n\n");
- printk(" CPU0 CPU1\n");
- printk(" ---- ----\n");
- printk(" lock(");
- __print_lock_name(target);
- printk(");\n");
- printk(" lock(");
- __print_lock_name(parent);
- printk(");\n");
- printk(" lock(");
- __print_lock_name(target);
- printk(");\n");
- printk(" lock(");
- __print_lock_name(source);
- printk(");\n");
- printk("\n *** DEADLOCK ***\n\n");
- }
- /*
- * When a circular dependency is detected, print the
- * header first:
- */
- static noinline int
- print_circular_bug_header(struct lock_list *entry, unsigned int depth,
- struct held_lock *check_src,
- struct held_lock *check_tgt)
- {
- struct task_struct *curr = current;
- if (debug_locks_silent)
- return 0;
- printk("\n=======================================================\n");
- printk( "[ INFO: possible circular locking dependency detected ]\n");
- print_kernel_version();
- printk( "-------------------------------------------------------\n");
- printk("%s/%d is trying to acquire lock:\n",
- curr->comm, task_pid_nr(curr));
- print_lock(check_src);
- printk("\nbut task is already holding lock:\n");
- print_lock(check_tgt);
- printk("\nwhich lock already depends on the new lock.\n\n");
- printk("\nthe existing dependency chain (in reverse order) is:\n");
- print_circular_bug_entry(entry, depth);
- return 0;
- }
- static inline int class_equal(struct lock_list *entry, void *data)
- {
- return entry->class == data;
- }
- static noinline int print_circular_bug(struct lock_list *this,
- struct lock_list *target,
- struct held_lock *check_src,
- struct held_lock *check_tgt)
- {
- struct task_struct *curr = current;
- struct lock_list *parent;
- struct lock_list *first_parent;
- int depth;
- if (!debug_locks_off_graph_unlock() || debug_locks_silent)
- return 0;
- if (!save_trace(&this->trace))
- return 0;
- depth = get_lock_depth(target);
- print_circular_bug_header(target, depth, check_src, check_tgt);
- parent = get_lock_parent(target);
- first_parent = parent;
- while (parent) {
- print_circular_bug_entry(parent, --depth);
- parent = get_lock_parent(parent);
- }
- printk("\nother info that might help us debug this:\n\n");
- print_circular_lock_scenario(check_src, check_tgt,
- first_parent);
- lockdep_print_held_locks(curr);
- printk("\nstack backtrace:\n");
- dump_stack();
- return 0;
- }
- static noinline int print_bfs_bug(int ret)
- {
- if (!debug_locks_off_graph_unlock())
- return 0;
- WARN(1, "lockdep bfs error:%d\n", ret);
- return 0;
- }
- static int noop_count(struct lock_list *entry, void *data)
- {
- (*(unsigned long *)data)++;
- return 0;
- }
- unsigned long __lockdep_count_forward_deps(struct lock_list *this)
- {
- unsigned long count = 0;
- struct lock_list *uninitialized_var(target_entry);
- __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
- return count;
- }
- unsigned long lockdep_count_forward_deps(struct lock_class *class)
- {
- unsigned long ret, flags;
- struct lock_list this;
- this.parent = NULL;
- this.class = class;
- local_irq_save(flags);
- arch_spin_lock(&lockdep_lock);
- ret = __lockdep_count_forward_deps(&this);
- arch_spin_unlock(&lockdep_lock);
- local_irq_restore(flags);
- return ret;
- }
- unsigned long __lockdep_count_backward_deps(struct lock_list *this)
- {
- unsigned long count = 0;
- struct lock_list *uninitialized_var(target_entry);
- __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
- return count;
- }
- unsigned long lockdep_count_backward_deps(struct lock_class *class)
- {
- unsigned long ret, flags;
- struct lock_list this;
- this.parent = NULL;
- this.class = class;
- local_irq_save(flags);
- arch_spin_lock(&lockdep_lock);
- ret = __lockdep_count_backward_deps(&this);
- arch_spin_unlock(&lockdep_lock);
- local_irq_restore(flags);
- return ret;
- }
- /*
- * Prove that the dependency graph starting at <entry> can not
- * lead to <target>. Print an error and return 0 if it does.
- */
- static noinline int
- check_noncircular(struct lock_list *root, struct lock_class *target,
- struct lock_list **target_entry)
- {
- int result;
- debug_atomic_inc(nr_cyclic_checks);
- result = __bfs_forwards(root, target, class_equal, target_entry);
- return result;
- }
- #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
- /*
- * Forwards and backwards subgraph searching, for the purposes of
- * proving that two subgraphs can be connected by a new dependency
- * without creating any illegal irq-safe -> irq-unsafe lock dependency.
- */
- static inline int usage_match(struct lock_list *entry, void *bit)
- {
- return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
- }
- /*
- * Find a node in the forwards-direction dependency sub-graph starting
- * at @root->class that matches @bit.
- *
- * Return 0 if such a node exists in the subgraph, and put that node
- * into *@target_entry.
- *
- * Return 1 otherwise and keep *@target_entry unchanged.
- * Return <0 on error.
- */
- static int
- find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
- struct lock_list **target_entry)
- {
- int result;
- debug_atomic_inc(nr_find_usage_forwards_checks);
- result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
- return result;
- }
- /*
- * Find a node in the backwards-direction dependency sub-graph starting
- * at @root->class that matches @bit.
- *
- * Return 0 if such a node exists in the subgraph, and put that node
- * into *@target_entry.
- *
- * Return 1 otherwise and keep *@target_entry unchanged.
- * Return <0 on error.
- */
- static int
- find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
- struct lock_list **target_entry)
- {
- int result;
- debug_atomic_inc(nr_find_usage_backwards_checks);
- result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
- return result;
- }
- static void print_lock_class_header(struct lock_class *class, int depth)
- {
- int bit;
- printk("%*s->", depth, "");
- print_lock_name(class);
- printk(" ops: %lu", class->ops);
- printk(" {\n");
- for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
- if (class->usage_mask & (1 << bit)) {
- int len = depth;
- len += printk("%*s %s", depth, "", usage_str[bit]);
- len += printk(" at:\n");
- print_stack_trace(class->usage_traces + bit, len);
- }
- }
- printk("%*s }\n", depth, "");
- printk("%*s ... key at: ",depth,"");
- print_ip_sym((unsigned long)class->key);
- }
- /*
- * printk the shortest lock dependencies from @start to @end in reverse order:
- */
- static void __used
- print_shortest_lock_dependencies(struct lock_list *leaf,
- struct lock_list *root)
- {
- struct lock_list *entry = leaf;
- int depth;
- /*compute depth from generated tree by BFS*/
- depth = get_lock_depth(leaf);
- do {
- print_lock_class_header(entry->class, depth);
- printk("%*s ... acquired at:\n", depth, "");
- print_stack_trace(&entry->trace, 2);
- printk("\n");
- if (depth == 0 && (entry != root)) {
- printk("lockdep:%s bad path found in chain graph\n", __func__);
- break;
- }
- entry = get_lock_parent(entry);
- depth--;
- } while (entry && (depth >= 0));
- return;
- }
- static void
- print_irq_lock_scenario(struct lock_list *safe_entry,
- struct lock_list *unsafe_entry,
- struct lock_class *prev_class,
- struct lock_class *next_class)
- {
- struct lock_class *safe_class = safe_entry->class;
- struct lock_class *unsafe_class = unsafe_entry->class;
- struct lock_class *middle_class = prev_class;
- if (middle_class == safe_class)
- middle_class = next_class;
- /*
- * A direct locking problem where unsafe_class lock is taken
- * directly by safe_class lock, then all we need to show
- * is the deadlock scenario, as it is obvious that the
- * unsafe lock is taken under the safe lock.
- *
- * But if there is a chain instead, where the safe lock takes
- * an intermediate lock (middle_class) where this lock is
- * not the same as the safe lock, then the lock chain is
- * used to describe the problem. Otherwise we would need
- * to show a different CPU case for each link in the chain
- * from the safe_class lock to the unsafe_class lock.
- */
- if (middle_class != unsafe_class) {
- printk("Chain exists of:\n ");
- __print_lock_name(safe_class);
- printk(" --> ");
- __print_lock_name(middle_class);
- printk(" --> ");
- __print_lock_name(unsafe_class);
- printk("\n\n");
- }
- printk(" Possible interrupt unsafe locking scenario:\n\n");
- printk(" CPU0 CPU1\n");
- printk(" ---- ----\n");
- printk(" lock(");
- __print_lock_name(unsafe_class);
- printk(");\n");
- printk(" local_irq_disable();\n");
- printk(" lock(");
- __print_lock_name(safe_class);
- printk(");\n");
- printk(" lock(");
- __print_lock_name(middle_class);
- printk(");\n");
- printk(" <Interrupt>\n");
- printk(" lock(");
- __print_lock_name(safe_class);
- printk(");\n");
- printk("\n *** DEADLOCK ***\n\n");
- }
- static int
- print_bad_irq_dependency(struct task_struct *curr,
- struct lock_list *prev_root,
- struct lock_list *next_root,
- struct lock_list *backwards_entry,
- struct lock_list *forwards_entry,
- struct held_lock *prev,
- struct held_lock *next,
- enum lock_usage_bit bit1,
- enum lock_usage_bit bit2,
- const char *irqclass)
- {
- if (!debug_locks_off_graph_unlock() || debug_locks_silent)
- return 0;
- printk("\n======================================================\n");
- printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
- irqclass, irqclass);
- print_kernel_version();
- printk( "------------------------------------------------------\n");
- printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
- curr->comm, task_pid_nr(curr),
- curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
- curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
- curr->hardirqs_enabled,
- curr->softirqs_enabled);
- print_lock(next);
- printk("\nand this task is already holding:\n");
- print_lock(prev);
- printk("which would create a new lock dependency:\n");
- print_lock_name(hlock_class(prev));
- printk(" ->");
- print_lock_name(hlock_class(next));
- printk("\n");
- printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
- irqclass);
- print_lock_name(backwards_entry->class);
- printk("\n... which became %s-irq-safe at:\n", irqclass);
- print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
- printk("\nto a %s-irq-unsafe lock:\n", irqclass);
- print_lock_name(forwards_entry->class);
- printk("\n... which became %s-irq-unsafe at:\n", irqclass);
- printk("...");
- print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
- printk("\nother info that might help us debug this:\n\n");
- print_irq_lock_scenario(backwards_entry, forwards_entry,
- hlock_class(prev), hlock_class(next));
- lockdep_print_held_locks(curr);
- printk("\nthe dependencies between %s-irq-safe lock", irqclass);
- printk(" and the holding lock:\n");
- if (!save_trace(&prev_root->trace))
- return 0;
- print_shortest_lock_dependencies(backwards_entry, prev_root);
- printk("\nthe dependencies between the lock to be acquired");
- printk(" and %s-irq-unsafe lock:\n", irqclass);
- if (!save_trace(&next_root->trace))
- return 0;
- print_shortest_lock_dependencies(forwards_entry, next_root);
- printk("\nstack backtrace:\n");
- dump_stack();
- return 0;
- }
- static int
- check_usage(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next, enum lock_usage_bit bit_backwards,
- enum lock_usage_bit bit_forwards, const char *irqclass)
- {
- int ret;
- struct lock_list this, that;
- struct lock_list *uninitialized_var(target_entry);
- struct lock_list *uninitialized_var(target_entry1);
- this.parent = NULL;
- this.class = hlock_class(prev);
- ret = find_usage_backwards(&this, bit_backwards, &target_entry);
- if (ret < 0)
- return print_bfs_bug(ret);
- if (ret == 1)
- return ret;
- that.parent = NULL;
- that.class = hlock_class(next);
- ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
- if (ret < 0)
- return print_bfs_bug(ret);
- if (ret == 1)
- return ret;
- return print_bad_irq_dependency(curr, &this, &that,
- target_entry, target_entry1,
- prev, next,
- bit_backwards, bit_forwards, irqclass);
- }
- static const char *state_names[] = {
- #define LOCKDEP_STATE(__STATE) \
- __stringify(__STATE),
- #include "lockdep_states.h"
- #undef LOCKDEP_STATE
- };
- static const char *state_rnames[] = {
- #define LOCKDEP_STATE(__STATE) \
- __stringify(__STATE)"-READ",
- #include "lockdep_states.h"
- #undef LOCKDEP_STATE
- };
- static inline const char *state_name(enum lock_usage_bit bit)
- {
- return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
- }
- static int exclusive_bit(int new_bit)
- {
- /*
- * USED_IN
- * USED_IN_READ
- * ENABLED
- * ENABLED_READ
- *
- * bit 0 - write/read
- * bit 1 - used_in/enabled
- * bit 2+ state
- */
- int state = new_bit & ~3;
- int dir = new_bit & 2;
- /*
- * keep state, bit flip the direction and strip read.
- */
- return state | (dir ^ 2);
- }
- static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next, enum lock_usage_bit bit)
- {
- /*
- * Prove that the new dependency does not connect a hardirq-safe
- * lock with a hardirq-unsafe lock - to achieve this we search
- * the backwards-subgraph starting at <prev>, and the
- * forwards-subgraph starting at <next>:
- */
- if (!check_usage(curr, prev, next, bit,
- exclusive_bit(bit), state_name(bit)))
- return 0;
- bit++; /* _READ */
- /*
- * Prove that the new dependency does not connect a hardirq-safe-read
- * lock with a hardirq-unsafe lock - to achieve this we search
- * the backwards-subgraph starting at <prev>, and the
- * forwards-subgraph starting at <next>:
- */
- if (!check_usage(curr, prev, next, bit,
- exclusive_bit(bit), state_name(bit)))
- return 0;
- return 1;
- }
- static int
- check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next)
- {
- #define LOCKDEP_STATE(__STATE) \
- if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
- return 0;
- #include "lockdep_states.h"
- #undef LOCKDEP_STATE
- return 1;
- }
- static void inc_chains(void)
- {
- if (current->hardirq_context)
- nr_hardirq_chains++;
- else {
- if (current->softirq_context)
- nr_softirq_chains++;
- else
- nr_process_chains++;
- }
- }
- #else
- static inline int
- check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next)
- {
- return 1;
- }
- static inline void inc_chains(void)
- {
- nr_process_chains++;
- }
- #endif
- static void
- print_deadlock_scenario(struct held_lock *nxt,
- struct held_lock *prv)
- {
- struct lock_class *next = hlock_class(nxt);
- struct lock_class *prev = hlock_class(prv);
- printk(" Possible unsafe locking scenario:\n\n");
- printk(" CPU0\n");
- printk(" ----\n");
- printk(" lock(");
- __print_lock_name(prev);
- printk(");\n");
- printk(" lock(");
- __print_lock_name(next);
- printk(");\n");
- printk("\n *** DEADLOCK ***\n\n");
- printk(" May be due to missing lock nesting notation\n\n");
- }
- static int
- print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next)
- {
- if (!debug_locks_off_graph_unlock() || debug_locks_silent)
- return 0;
- printk("\n=============================================\n");
- printk( "[ INFO: possible recursive locking detected ]\n");
- print_kernel_version();
- printk( "---------------------------------------------\n");
- printk("%s/%d is trying to acquire lock:\n",
- curr->comm, task_pid_nr(curr));
- print_lock(next);
- printk("\nbut task is already holding lock:\n");
- print_lock(prev);
- printk("\nother info that might help us debug this:\n");
- print_deadlock_scenario(next, prev);
- lockdep_print_held_locks(curr);
- printk("\nstack backtrace:\n");
- dump_stack();
- return 0;
- }
- /*
- * Check whether we are holding such a class already.
- *
- * (Note that this has to be done separately, because the graph cannot
- * detect such classes of deadlocks.)
- *
- * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
- */
- static int
- check_deadlock(struct task_struct *curr, struct held_lock *next,
- struct lockdep_map *next_instance, int read)
- {
- struct held_lock *prev;
- struct held_lock *nest = NULL;
- int i;
- for (i = 0; i < curr->lockdep_depth; i++) {
- prev = curr->held_locks + i;
- if (prev->instance == next->nest_lock)
- nest = prev;
- if (hlock_class(prev) != hlock_class(next))
- continue;
- /*
- * Allow read-after-read recursion of the same
- * lock class (i.e. read_lock(lock)+read_lock(lock)):
- */
- if ((read == 2) && prev->read)
- return 2;
- /*
- * We're holding the nest_lock, which serializes this lock's
- * nesting behaviour.
- */
- if (nest)
- return 2;
- return print_deadlock_bug(curr, prev, next);
- }
- return 1;
- }
- /*
- * There was a chain-cache miss, and we are about to add a new dependency
- * to a previous lock. We recursively validate the following rules:
- *
- * - would the adding of the <prev> -> <next> dependency create a
- * circular dependency in the graph? [== circular deadlock]
- *
- * - does the new prev->next dependency connect any hardirq-safe lock
- * (in the full backwards-subgraph starting at <prev>) with any
- * hardirq-unsafe lock (in the full forwards-subgraph starting at
- * <next>)? [== illegal lock inversion with hardirq contexts]
- *
- * - does the new prev->next dependency connect any softirq-safe lock
- * (in the full backwards-subgraph starting at <prev>) with any
- * softirq-unsafe lock (in the full forwards-subgraph starting at
- * <next>)? [== illegal lock inversion with softirq contexts]
- *
- * any of these scenarios could lead to a deadlock.
- *
- * Then if all the validations pass, we add the forwards and backwards
- * dependency.
- */
- static int
- check_prev_add(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next, int distance, int trylock_loop)
- {
- struct lock_list *entry;
- int ret;
- struct lock_list this;
- struct lock_list *uninitialized_var(target_entry);
- /*
- * Static variable, serialized by the graph_lock().
- *
- * We use this static variable to save the stack trace in case
- * we call into this function multiple times due to encountering
- * trylocks in the held lock stack.
- */
- static struct stack_trace trace;
- /*
- * Prove that the new <prev> -> <next> dependency would not
- * create a circular dependency in the graph. (We do this by
- * forward-recursing into the graph starting at <next>, and
- * checking whether we can reach <prev>.)
- *
- * We are using global variables to control the recursion, to
- * keep the stackframe size of the recursive functions low:
- */
- this.class = hlock_class(next);
- this.parent = NULL;
- ret = check_noncircular(&this, hlock_class(prev), &target_entry);
- if (unlikely(!ret))
- return print_circular_bug(&this, target_entry, next, prev);
- else if (unlikely(ret < 0))
- return print_bfs_bug(ret);
- if (!check_prev_add_irq(curr, prev, next))
- return 0;
- /*
- * For recursive read-locks we do all the dependency checks,
- * but we dont store read-triggered dependencies (only
- * write-triggered dependencies). This ensures that only the
- * write-side dependencies matter, and that if for example a
- * write-lock never takes any other locks, then the reads are
- * equivalent to a NOP.
- */
- if (next->read == 2 || prev->read == 2)
- return 1;
- /*
- * Is the <prev> -> <next> dependency already present?
- *
- * (this may occur even though this is a new chain: consider
- * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
- * chains - the second one will be new, but L1 already has
- * L2 added to its dependency list, due to the first chain.)
- */
- list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
- if (entry->class == hlock_class(next)) {
- if (distance == 1)
- entry->distance = 1;
- return 2;
- }
- }
- if (!trylock_loop && !save_trace(&trace))
- return 0;
- /*
- * Ok, all validations passed, add the new lock
- * to the previous lock's dependency list:
- */
- ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
- &hlock_class(prev)->locks_after,
- next->acquire_ip, distance, &trace);
- if (!ret)
- return 0;
- ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
- &hlock_class(next)->locks_before,
- next->acquire_ip, distance, &trace);
- if (!ret)
- return 0;
- /*
- * Debugging printouts:
- */
- if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
- graph_unlock();
- printk("\n new dependency: ");
- print_lock_name(hlock_class(prev));
- printk(" => ");
- print_lock_name(hlock_class(next));
- printk("\n");
- dump_stack();
- return graph_lock();
- }
- return 1;
- }
- /*
- * Add the dependency to all directly-previous locks that are 'relevant'.
- * The ones that are relevant are (in increasing distance from curr):
- * all consecutive trylock entries and the final non-trylock entry - or
- * the end of this context's lock-chain - whichever comes first.
- */
- static int
- check_prevs_add(struct task_struct *curr, struct held_lock *next)
- {
- int depth = curr->lockdep_depth;
- int trylock_loop = 0;
- struct held_lock *hlock;
- /*
- * Debugging checks.
- *
- * Depth must not be zero for a non-head lock:
- */
- if (!depth)
- goto out_bug;
- /*
- * At least two relevant locks must exist for this
- * to be a head:
- */
- if (curr->held_locks[depth].irq_context !=
- curr->held_locks[depth-1].irq_context)
- goto out_bug;
- for (;;) {
- int distance = curr->lockdep_depth - depth + 1;
- hlock = curr->held_locks + depth-1;
- /*
- * Only non-recursive-read entries get new dependencies
- * added:
- */
- if (hlock->read != 2) {
- if (!check_prev_add(curr, hlock, next,
- distance, trylock_loop))
- return 0;
- /*
- * Stop after the first non-trylock entry,
- * as non-trylock entries have added their
- * own direct dependencies already, so this
- * lock is connected to them indirectly:
- */
- if (!hlock->trylock)
- break;
- }
- depth--;
- /*
- * End of lock-stack?
- */
- if (!depth)
- break;
- /*
- * Stop the search if we cross into another context:
- */
- if (curr->held_locks[depth].irq_context !=
- curr->held_locks[depth-1].irq_context)
- break;
- trylock_loop = 1;
- }
- return 1;
- out_bug:
- if (!debug_locks_off_graph_unlock())
- return 0;
- WARN_ON(1);
- return 0;
- }
- unsigned long nr_lock_chains;
- struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
- int nr_chain_hlocks;
- static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
- struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
- {
- return lock_classes + chain_hlocks[chain->base + i];
- }
- /*
- * Look up a dependency chain. If the key is not present yet then
- * add it and return 1 - in this case the new dependency chain is
- * validated. If the key is already hashed, return 0.
- * (On return with 1 graph_lock is held.)
- */
- static inline int lookup_chain_cache(struct task_struct *curr,
- struct held_lock *hlock,
- u64 chain_key)
- {
- struct lock_class *class = hlock_class(hlock);
- struct list_head *hash_head = chainhashentry(chain_key);
- struct lock_chain *chain;
- struct held_lock *hlock_curr, *hlock_next;
- int i, j;
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
- return 0;
- /*
- * We can walk it lock-free, because entries only get added
- * to the hash:
- */
- list_for_each_entry(chain, hash_head, entry) {
- if (chain->chain_key == chain_key) {
- cache_hit:
- debug_atomic_inc(chain_lookup_hits);
- if (very_verbose(class))
- printk("\nhash chain already cached, key: "
- "%016Lx tail class: [%p] %s\n",
- (unsigned long long)chain_key,
- class->key, class->name);
- return 0;
- }
- }
- if (very_verbose(class))
- printk("\nnew hash chain, k