Skip to content

Commit 8a5fd56

Browse files
dvyukovIngo Molnar
authored andcommitted
locking/lockdep: Fix stack trace caching logic
check_prev_add() caches saved stack trace in static trace variable to avoid duplicate save_trace() calls in dependencies involving trylocks. But that caching logic contains a bug. We may not save trace on first iteration due to early return from check_prev_add(). Then on the second iteration when we actually need the trace we don't save it because we think that we've already saved it. Let check_prev_add() itself control when stack is saved. There is another bug. Trace variable is protected by graph lock. But we can temporary release graph lock during printing. Fix this by invalidating cached stack trace when we release graph lock. Signed-off-by: Dmitry Vyukov <dvyukov@google.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: glider@google.com Cc: kcc@google.com Cc: peter@hurleysoftware.com Cc: sasha.levin@oracle.com Link: http://lkml.kernel.org/r/1454593240-121647-1-git-send-email-dvyukov@google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 765bdb4 commit 8a5fd56

File tree

1 file changed

+10
-6
lines changed

1 file changed

+10
-6
lines changed

kernel/locking/lockdep.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1822,7 +1822,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
18221822
*/
18231823
static int
18241824
check_prev_add(struct task_struct *curr, struct held_lock *prev,
1825-
struct held_lock *next, int distance, int trylock_loop)
1825+
struct held_lock *next, int distance, int *stack_saved)
18261826
{
18271827
struct lock_list *entry;
18281828
int ret;
@@ -1883,8 +1883,11 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
18831883
}
18841884
}
18851885

1886-
if (!trylock_loop && !save_trace(&trace))
1887-
return 0;
1886+
if (!*stack_saved) {
1887+
if (!save_trace(&trace))
1888+
return 0;
1889+
*stack_saved = 1;
1890+
}
18881891

18891892
/*
18901893
* Ok, all validations passed, add the new lock
@@ -1907,6 +1910,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
19071910
* Debugging printouts:
19081911
*/
19091912
if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1913+
/* We drop graph lock, so another thread can overwrite trace. */
1914+
*stack_saved = 0;
19101915
graph_unlock();
19111916
printk("\n new dependency: ");
19121917
print_lock_name(hlock_class(prev));
@@ -1929,7 +1934,7 @@ static int
19291934
check_prevs_add(struct task_struct *curr, struct held_lock *next)
19301935
{
19311936
int depth = curr->lockdep_depth;
1932-
int trylock_loop = 0;
1937+
int stack_saved = 0;
19331938
struct held_lock *hlock;
19341939

19351940
/*
@@ -1956,7 +1961,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
19561961
*/
19571962
if (hlock->read != 2 && hlock->check) {
19581963
if (!check_prev_add(curr, hlock, next,
1959-
distance, trylock_loop))
1964+
distance, &stack_saved))
19601965
return 0;
19611966
/*
19621967
* Stop after the first non-trylock entry,
@@ -1979,7 +1984,6 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
19791984
if (curr->held_locks[depth].irq_context !=
19801985
curr->held_locks[depth-1].irq_context)
19811986
break;
1982-
trylock_loop = 1;
19831987
}
19841988
return 1;
19851989
out_bug:

0 commit comments

Comments
 (0)