diff options
-rw-r--r-- | include/linux/lockdep.h | 17 | ||||
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/lockdep.c | 229 | ||||
-rw-r--r-- | kernel/lockdep_internals.h | 3 | ||||
-rw-r--r-- | kernel/lockdep_proc.c | 6 | ||||
-rw-r--r-- | mm/page_alloc.c | 5 | ||||
-rw-r--r-- | mm/slab.c | 4 | ||||
-rw-r--r-- | mm/slob.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 1 | ||||
-rw-r--r-- | mm/vmscan.c | 3 |
10 files changed, 254 insertions, 17 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 23bf02fb124f..cc97bdbc7969 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -27,12 +27,16 @@ enum lock_usage_bit LOCK_USED = 0, LOCK_USED_IN_HARDIRQ, LOCK_USED_IN_SOFTIRQ, + LOCK_USED_IN_RECLAIM_FS, LOCK_ENABLED_SOFTIRQS, LOCK_ENABLED_HARDIRQS, + LOCK_HELD_OVER_RECLAIM_FS, LOCK_USED_IN_HARDIRQ_READ, LOCK_USED_IN_SOFTIRQ_READ, + LOCK_USED_IN_RECLAIM_FS_READ, LOCK_ENABLED_SOFTIRQS_READ, LOCK_ENABLED_HARDIRQS_READ, + LOCK_HELD_OVER_RECLAIM_FS_READ, LOCK_USAGE_STATES }; @@ -42,16 +46,20 @@ enum lock_usage_bit #define LOCKF_USED (1 << LOCK_USED) #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) +#define LOCKF_USED_IN_RECLAIM_FS (1 << LOCK_USED_IN_RECLAIM_FS) #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) +#define LOCKF_HELD_OVER_RECLAIM_FS (1 << LOCK_HELD_OVER_RECLAIM_FS) #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) +#define LOCKF_USED_IN_RECLAIM_FS_READ (1 << LOCK_USED_IN_RECLAIM_FS_READ) #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) +#define LOCKF_HELD_OVER_RECLAIM_FS_READ (1 << LOCK_HELD_OVER_RECLAIM_FS_READ) #define LOCKF_ENABLED_IRQS_READ \ (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) @@ -324,7 +332,11 @@ static inline void lock_set_subclass(struct lockdep_map *lock, lock_set_class(lock, lock->name, lock->key, subclass, ip); } -# define INIT_LOCKDEP .lockdep_recursion = 0, +extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); +extern void lockdep_clear_current_reclaim_state(void); +extern void lockdep_trace_alloc(gfp_t mask); + +# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) @@ -342,6 +354,9 @@ static inline void lockdep_on(void) # define lock_release(l, n, i) do { } while (0) # define lock_set_class(l, n, k, s, i) do { } while (0) # define lock_set_subclass(l, s, i) do { } while (0) +# define lockdep_set_current_reclaim_state(g) do { } while (0) +# define lockdep_clear_current_reclaim_state() do { } while (0) +# define lockdep_trace_alloc(g) do { } while (0) # define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 4efb552aca47..b00a77f4999e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1313,6 +1313,7 @@ struct task_struct { int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[MAX_LOCK_DEPTH]; + gfp_t lockdep_reclaim_gfp; #endif /* journalling filesystem info */ diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 06b0c3568f0b..977f940fd562 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -310,12 +310,14 @@ EXPORT_SYMBOL(lockdep_on); #if VERBOSE # define HARDIRQ_VERBOSE 1 # define SOFTIRQ_VERBOSE 1 +# define RECLAIM_VERBOSE 1 #else # define HARDIRQ_VERBOSE 0 # define SOFTIRQ_VERBOSE 0 +# define RECLAIM_VERBOSE 0 #endif -#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE +#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE /* * Quick filtering for interesting events: */ @@ -454,6 +456,10 @@ static const char *usage_str[] = [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R", [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R", [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R", + [LOCK_USED_IN_RECLAIM_FS] = "in-reclaim-W", + [LOCK_USED_IN_RECLAIM_FS_READ] = "in-reclaim-R", + [LOCK_HELD_OVER_RECLAIM_FS] = "ov-reclaim-W", + [LOCK_HELD_OVER_RECLAIM_FS_READ] = "ov-reclaim-R", }; const char * __get_key_name(struct lockdep_subclass_key *key, char *str) @@ -462,9 +468,10 @@ const char * __get_key_name(struct lockdep_subclass_key *key, char *str) } void -get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4) +get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, + char *c4, char *c5, char *c6) { - *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.'; + *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.', *c5 = '.', *c6 = '.'; if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) *c1 = '+'; @@ -493,14 +500,29 @@ get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) *c4 = '?'; } + + if (class->usage_mask & LOCKF_USED_IN_RECLAIM_FS) + *c5 = '+'; + else + if (class->usage_mask & LOCKF_HELD_OVER_RECLAIM_FS) + *c5 = '-'; + + if (class->usage_mask & LOCKF_HELD_OVER_RECLAIM_FS_READ) + *c6 = '-'; + if (class->usage_mask & LOCKF_USED_IN_RECLAIM_FS_READ) { + *c6 = '+'; + if (class->usage_mask & LOCKF_HELD_OVER_RECLAIM_FS_READ) + *c6 = '?'; + } + } static void print_lock_name(struct lock_class *class) { - char str[KSYM_NAME_LEN], c1, c2, c3, c4; + char str[KSYM_NAME_LEN], c1, c2, c3, c4, c5, c6; const char *name; - get_usage_chars(class, &c1, &c2, &c3, &c4); + get_usage_chars(class, &c1, &c2, &c3, &c4, &c5, &c6); name = class->name; if (!name) { @@ -513,7 +535,7 @@ static void print_lock_name(struct lock_class *class) if (class->subclass) printk("/%d", class->subclass); } - printk("){%c%c%c%c}", c1, c2, c3, c4); + printk("){%c%c%c%c%c%c}", c1, c2, c3, c4, c5, c6); } static void print_lockdep_cache(struct lockdep_map *lock) @@ -1306,6 +1328,26 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, LOCK_ENABLED_SOFTIRQS, "soft")) return 0; + /* + * Prove that the new dependency does not connect a reclaim-fs-safe + * lock with a reclaim-fs-unsafe lock - to achieve this we search + * the backwards-subgraph starting at <prev>, and the + * forwards-subgraph starting at <next>: + */ + if (!check_usage(curr, prev, next, LOCK_USED_IN_RECLAIM_FS, + LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs")) + return 0; + + /* + * Prove that the new dependency does not connect a reclaim-fs-safe-read + * lock with a reclaim-fs-unsafe lock - to achieve this we search + * the backwards-subgraph starting at <prev>, and the + * forwards-subgraph starting at <next>: + */ + if (!check_usage(curr, prev, next, LOCK_USED_IN_RECLAIM_FS_READ, + LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs-read")) + return 0; + return 1; } @@ -1949,6 +1991,14 @@ static int softirq_verbose(struct lock_class *class) return 0; } +static int reclaim_verbose(struct lock_class *class) +{ +#if RECLAIM_VERBOSE + return class_filter(class); +#endif + return 0; +} + #define STRICT_READ_CHECKS 1 static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, @@ -2007,6 +2057,31 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (softirq_verbose(hlock_class(this))) ret = 2; break; + case LOCK_USED_IN_RECLAIM_FS: + if (!valid_state(curr, this, new_bit, LOCK_HELD_OVER_RECLAIM_FS)) + return 0; + if (!valid_state(curr, this, new_bit, + LOCK_HELD_OVER_RECLAIM_FS_READ)) + return 0; + /* + * just marked it reclaim-fs-safe, check that this lock + * took no reclaim-fs-unsafe lock in the past: + */ + if (!check_usage_forwards(curr, this, + LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs")) + return 0; +#if STRICT_READ_CHECKS + /* + * just marked it reclaim-fs-safe, check that this lock + * took no reclaim-fs-unsafe-read lock in the past: + */ + if (!check_usage_forwards(curr, this, + LOCK_HELD_OVER_RECLAIM_FS_READ, "reclaim-fs-read")) + return 0; +#endif + if (reclaim_verbose(hlock_class(this))) + ret = 2; + break; case LOCK_USED_IN_HARDIRQ_READ: if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) return 0; @@ -2033,6 +2108,19 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (softirq_verbose(hlock_class(this))) ret = 2; break; + case LOCK_USED_IN_RECLAIM_FS_READ: + if (!valid_state(curr, this, new_bit, LOCK_HELD_OVER_RECLAIM_FS)) + return 0; + /* + * just marked it reclaim-fs-read-safe, check that this lock + * took no reclaim-fs-unsafe lock in the past: + */ + if (!check_usage_forwards(curr, this, + LOCK_HELD_OVER_RECLAIM_FS, "reclaim-fs")) + return 0; + if (reclaim_verbose(hlock_class(this))) + ret = 2; + break; case LOCK_ENABLED_HARDIRQS: if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) return 0; @@ -2085,6 +2173,32 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (softirq_verbose(hlock_class(this))) ret = 2; break; + case LOCK_HELD_OVER_RECLAIM_FS: + if (!valid_state(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS)) + return 0; + if (!valid_state(curr, this, new_bit, + LOCK_USED_IN_RECLAIM_FS_READ)) + return 0; + /* + * just marked it reclaim-fs-unsafe, check that no reclaim-fs-safe + * lock in the system ever took it in the past: + */ + if (!check_usage_backwards(curr, this, + LOCK_USED_IN_RECLAIM_FS, "reclaim-fs")) + return 0; +#if STRICT_READ_CHECKS + /* + * just marked it softirq-unsafe, check that no + * softirq-safe-read lock in the system ever took + * it in the past: + */ + if (!check_usage_backwards(curr, this, + LOCK_USED_IN_RECLAIM_FS_READ, "reclaim-fs-read")) + return 0; +#endif + if (reclaim_verbose(hlock_class(this))) + ret = 2; + break; case LOCK_ENABLED_HARDIRQS_READ: if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) return 0; @@ -2115,6 +2229,21 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, if (softirq_verbose(hlock_class(this))) ret = 2; break; + case LOCK_HELD_OVER_RECLAIM_FS_READ: + if (!valid_state(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS)) + return 0; +#if STRICT_READ_CHECKS + /* + * just marked it reclaim-fs-read-unsafe, check that no + * reclaim-fs-safe lock in the system ever took it in the past: + */ + if (!check_usage_backwards(curr, this, + LOCK_USED_IN_RECLAIM_FS, "reclaim-fs")) + return 0; +#endif + if (reclaim_verbose(hlock_class(this))) + ret = 2; + break; default: WARN_ON(1); break; @@ -2123,11 +2252,17 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, return ret; } +enum mark_type { + HARDIRQ, + SOFTIRQ, + RECLAIM_FS, +}; + /* * Mark all held locks with a usage bit: */ static int -mark_held_locks(struct task_struct *curr, int hardirq) +mark_held_locks(struct task_struct *curr, enum mark_type mark) { enum lock_usage_bit usage_bit; struct held_lock *hlock; @@ -2136,17 +2271,32 @@ mark_held_locks(struct task_struct *curr, int hardirq) for (i = 0; i < curr->lockdep_depth; i++) { hlock = curr->held_locks + i; - if (hardirq) { + switch (mark) { + case HARDIRQ: if (hlock->read) usage_bit = LOCK_ENABLED_HARDIRQS_READ; else usage_bit = LOCK_ENABLED_HARDIRQS; - } else { + break; + + case SOFTIRQ: if (hlock->read) usage_bit = LOCK_ENABLED_SOFTIRQS_READ; else usage_bit = LOCK_ENABLED_SOFTIRQS; + break; + + case RECLAIM_FS: + if (hlock->read) + usage_bit = LOCK_HELD_OVER_RECLAIM_FS_READ; + else + usage_bit = LOCK_HELD_OVER_RECLAIM_FS; + break; + + default: + BUG(); } + if (!mark_lock(curr, hlock, usage_bit)) return 0; } @@ -2200,7 +2350,7 @@ void trace_hardirqs_on_caller(unsigned long ip) * We are going to turn hardirqs on, so set the * usage bit for all held locks: */ - if (!mark_held_locks(curr, 1)) + if (!mark_held_locks(curr, HARDIRQ)) return; /* * If we have softirqs enabled, then set the usage @@ -2208,7 +2358,7 @@ void trace_hardirqs_on_caller(unsigned long ip) * this bit from being set before) */ if (curr->softirqs_enabled) - if (!mark_held_locks(curr, 0)) + if (!mark_held_locks(curr, SOFTIRQ)) return; curr->hardirq_enable_ip = ip; @@ -2288,7 +2438,7 @@ void trace_softirqs_on(unsigned long ip) * enabled too: */ if (curr->hardirqs_enabled) - mark_held_locks(curr, 0); + mark_held_locks(curr, SOFTIRQ); } /* @@ -2317,6 +2467,31 @@ void trace_softirqs_off(unsigned long ip) debug_atomic_inc(&redundant_softirqs_off); } +void lockdep_trace_alloc(gfp_t gfp_mask) +{ + struct task_struct *curr = current; + + if (unlikely(!debug_locks)) + return; + + /* no reclaim without waiting on it */ + if (!(gfp_mask & __GFP_WAIT)) + return; + + /* this guy won't enter reclaim */ + if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) + return; + + /* We're only interested __GFP_FS allocations for now */ + if (!(gfp_mask & __GFP_FS)) + return; + + if (DEBUG_LOCKS_WARN_ON(irqs_disabled())) + return; + + mark_held_locks(curr, RECLAIM_FS); +} + static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) { /* @@ -2362,6 +2537,22 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) } } + /* + * We reuse the irq context infrastructure more broadly as a general + * context checking code. This tests GFP_FS recursion (a lock taken + * during reclaim for a GFP_FS allocation is held over a GFP_FS + * allocation). + */ + if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) { + if (hlock->read) { + if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ)) + return 0; + } else { + if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS)) + return 0; + } + } + return 1; } @@ -2453,6 +2644,10 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, case LOCK_ENABLED_SOFTIRQS: case LOCK_ENABLED_HARDIRQS_READ: case LOCK_ENABLED_SOFTIRQS_READ: + case LOCK_USED_IN_RECLAIM_FS: + case LOCK_USED_IN_RECLAIM_FS_READ: + case LOCK_HELD_OVER_RECLAIM_FS: + case LOCK_HELD_OVER_RECLAIM_FS_READ: ret = mark_lock_irq(curr, this, new_bit); if (!ret) return 0; @@ -2966,6 +3161,16 @@ void lock_release(struct lockdep_map *lock, int nested, } EXPORT_SYMBOL_GPL(lock_release); +void lockdep_set_current_reclaim_state(gfp_t gfp_mask) +{ + current->lockdep_reclaim_gfp = gfp_mask; +} + +void lockdep_clear_current_reclaim_state(void) +{ + current->lockdep_reclaim_gfp = 0; +} + #ifdef CONFIG_LOCK_STAT static int print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index 56b196932c08..e887b783244f 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h @@ -32,7 +32,8 @@ extern struct list_head all_lock_classes; extern struct lock_chain lock_chains[]; extern void -get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4); +get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, + char *c4, char *c5, char *c6); extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index 13716b813896..b84a1dfa9077 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c @@ -84,7 +84,7 @@ static int l_show(struct seq_file *m, void *v) { struct lock_class *class = v; struct lock_list *entry; - char c1, c2, c3, c4; + char c1, c2, c3, c4, c5, c6; if (v == SEQ_START_TOKEN) { seq_printf(m, "all lock classes:\n"); @@ -100,8 +100,8 @@ static int l_show(struct seq_file *m, void *v) seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); #endif - get_usage_chars(class, &c1, &c2, &c3, &c4); - seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); + get_usage_chars(class, &c1, &c2, &c3, &c4, &c5, &c6); + seq_printf(m, " %c%c%c%c%c%c", c1, c2, c3, c4, c5, c6); seq_printf(m, ": "); print_name(m, class); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5675b3073854..22b15a4cde8a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1479,6 +1479,8 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, unsigned long did_some_progress; unsigned long pages_reclaimed = 0; + lockdep_trace_alloc(gfp_mask); + might_sleep_if(wait); if (should_fail_alloc_page(gfp_mask, order)) @@ -1578,12 +1580,15 @@ nofail_alloc: */ cpuset_update_task_memory_state(); p->flags |= PF_MEMALLOC; + + lockdep_set_current_reclaim_state(gfp_mask); reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; did_some_progress = try_to_free_pages(zonelist, order, gfp_mask); p->reclaim_state = NULL; + lockdep_clear_current_reclaim_state(); p->flags &= ~PF_MEMALLOC; cond_resched(); diff --git a/mm/slab.c b/mm/slab.c index ddc41f337d58..6b61de8543ec 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3318,6 +3318,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, unsigned long save_flags; void *ptr; + lockdep_trace_alloc(flags); + if (slab_should_failslab(cachep, flags)) return NULL; @@ -3394,6 +3396,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) unsigned long save_flags; void *objp; + lockdep_trace_alloc(flags); + if (slab_should_failslab(cachep, flags)) return NULL; diff --git a/mm/slob.c b/mm/slob.c index bf7e8fc3aed8..1264799df5d1 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -464,6 +464,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) unsigned int *m; int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + lockdep_trace_alloc(flags); + if (size < PAGE_SIZE - align) { if (!size) return ZERO_SIZE_PTR; diff --git a/mm/slub.c b/mm/slub.c index bdc9abb08a23..214eb207c513 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1596,6 +1596,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, unsigned long flags; unsigned int objsize; + lockdep_trace_alloc(gfpflags); might_sleep_if(gfpflags & __GFP_WAIT); if (should_failslab(s->objsize, gfpflags)) diff --git a/mm/vmscan.c b/mm/vmscan.c index 9a27c44aa327..303eb658b50b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1963,6 +1963,9 @@ static int kswapd(void *p) struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; + + lockdep_set_current_reclaim_state(GFP_KERNEL); + node_to_cpumask_ptr(cpumask, pgdat->node_id); if (!cpumask_empty(cpumask)) |