Skip to content

Commit 0fa7271

Browse files
committed
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fix from Ingo Molnar: "A module unload lockdep race fix" * 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: lockdep: Fix the module unload key range freeing logic
2 parents 713d25d + 35a9393 commit 0fa7271

File tree

2 files changed

+59
-30
lines changed

2 files changed

+59
-30
lines changed

kernel/locking/lockdep.c

Lines changed: 55 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class)
633633
if (!new_class->name)
634634
return 0;
635635

636-
list_for_each_entry(class, &all_lock_classes, lock_entry) {
636+
list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
637637
if (new_class->key - new_class->subclass == class->key)
638638
return class->name_version;
639639
if (class->name && !strcmp(class->name, new_class->name))
@@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
700700
hash_head = classhashentry(key);
701701

702702
/*
703-
* We can walk the hash lockfree, because the hash only
704-
* grows, and we are careful when adding entries to the end:
703+
* We do an RCU walk of the hash, see lockdep_free_key_range().
705704
*/
706-
list_for_each_entry(class, hash_head, hash_entry) {
705+
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
706+
return NULL;
707+
708+
list_for_each_entry_rcu(class, hash_head, hash_entry) {
707709
if (class->key == key) {
708710
/*
709711
* Huh! same key, different name? Did someone trample
@@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
728730
struct lockdep_subclass_key *key;
729731
struct list_head *hash_head;
730732
struct lock_class *class;
731-
unsigned long flags;
733+
734+
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
732735

733736
class = look_up_lock_class(lock, subclass);
734737
if (likely(class))
@@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
750753
key = lock->key->subkeys + subclass;
751754
hash_head = classhashentry(key);
752755

753-
raw_local_irq_save(flags);
754756
if (!graph_lock()) {
755-
raw_local_irq_restore(flags);
756757
return NULL;
757758
}
758759
/*
759760
* We have to do the hash-walk again, to avoid races
760761
* with another CPU:
761762
*/
762-
list_for_each_entry(class, hash_head, hash_entry)
763+
list_for_each_entry_rcu(class, hash_head, hash_entry) {
763764
if (class->key == key)
764765
goto out_unlock_set;
766+
}
767+
765768
/*
766769
* Allocate a new key from the static array, and add it to
767770
* the hash:
768771
*/
769772
if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
770773
if (!debug_locks_off_graph_unlock()) {
771-
raw_local_irq_restore(flags);
772774
return NULL;
773775
}
774-
raw_local_irq_restore(flags);
775776

776777
print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
777778
dump_stack();
@@ -798,23 +799,19 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
798799

799800
if (verbose(class)) {
800801
graph_unlock();
801-
raw_local_irq_restore(flags);
802802

803803
printk("\nnew class %p: %s", class->key, class->name);
804804
if (class->name_version > 1)
805805
printk("#%d", class->name_version);
806806
printk("\n");
807807
dump_stack();
808808

809-
raw_local_irq_save(flags);
810809
if (!graph_lock()) {
811-
raw_local_irq_restore(flags);
812810
return NULL;
813811
}
814812
}
815813
out_unlock_set:
816814
graph_unlock();
817-
raw_local_irq_restore(flags);
818815

819816
out_set_class_cache:
820817
if (!subclass || force)
@@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
870867
entry->distance = distance;
871868
entry->trace = *trace;
872869
/*
873-
* Since we never remove from the dependency list, the list can
874-
* be walked lockless by other CPUs, it's only allocation
875-
* that must be protected by the spinlock. But this also means
876-
* we must make new entries visible only once writes to the
877-
* entry become visible - hence the RCU op:
870+
* Both allocation and removal are done under the graph lock; but
871+
* iteration is under RCU-sched; see look_up_lock_class() and
872+
* lockdep_free_key_range().
878873
*/
879874
list_add_tail_rcu(&entry->entry, head);
880875

@@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry,
10251020
else
10261021
head = &lock->class->locks_before;
10271022

1028-
list_for_each_entry(entry, head, entry) {
1023+
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1024+
1025+
list_for_each_entry_rcu(entry, head, entry) {
10291026
if (!lock_accessed(entry)) {
10301027
unsigned int cq_depth;
10311028
mark_lock_accessed(entry, lock);
@@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
20222019
* We can walk it lock-free, because entries only get added
20232020
* to the hash:
20242021
*/
2025-
list_for_each_entry(chain, hash_head, entry) {
2022+
list_for_each_entry_rcu(chain, hash_head, entry) {
20262023
if (chain->chain_key == chain_key) {
20272024
cache_hit:
20282025
debug_atomic_inc(chain_lookup_hits);
@@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
29962993
if (unlikely(!debug_locks))
29972994
return;
29982995

2999-
if (subclass)
2996+
if (subclass) {
2997+
unsigned long flags;
2998+
2999+
if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3000+
return;
3001+
3002+
raw_local_irq_save(flags);
3003+
current->lockdep_recursion = 1;
30003004
register_lock_class(lock, subclass, 1);
3005+
current->lockdep_recursion = 0;
3006+
raw_local_irq_restore(flags);
3007+
}
30013008
}
30023009
EXPORT_SYMBOL_GPL(lockdep_init_map);
30033010

@@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size)
38873894
return addr >= start && addr < start + size;
38883895
}
38893896

3897+
/*
3898+
* Used in module.c to remove lock classes from memory that is going to be
3899+
* freed; and possibly re-used by other modules.
3900+
*
3901+
* We will have had one sync_sched() before getting here, so we're guaranteed
3902+
* nobody will look up these exact classes -- they're properly dead but still
3903+
* allocated.
3904+
*/
38903905
void lockdep_free_key_range(void *start, unsigned long size)
38913906
{
3892-
struct lock_class *class, *next;
3907+
struct lock_class *class;
38933908
struct list_head *head;
38943909
unsigned long flags;
38953910
int i;
@@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
39053920
head = classhash_table + i;
39063921
if (list_empty(head))
39073922
continue;
3908-
list_for_each_entry_safe(class, next, head, hash_entry) {
3923+
list_for_each_entry_rcu(class, head, hash_entry) {
39093924
if (within(class->key, start, size))
39103925
zap_class(class);
39113926
else if (within(class->name, start, size))
@@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size)
39163931
if (locked)
39173932
graph_unlock();
39183933
raw_local_irq_restore(flags);
3934+
3935+
/*
3936+
* Wait for any possible iterators from look_up_lock_class() to pass
3937+
* before continuing to free the memory they refer to.
3938+
*
3939+
* sync_sched() is sufficient because the read-side is IRQ disable.
3940+
*/
3941+
synchronize_sched();
3942+
3943+
/*
3944+
* XXX at this point we could return the resources to the pool;
3945+
* instead we leak them. We would need to change to bitmap allocators
3946+
* instead of the linear allocators we have now.
3947+
*/
39193948
}
39203949

39213950
void lockdep_reset_lock(struct lockdep_map *lock)
39223951
{
3923-
struct lock_class *class, *next;
3952+
struct lock_class *class;
39243953
struct list_head *head;
39253954
unsigned long flags;
39263955
int i, j;
@@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
39483977
head = classhash_table + i;
39493978
if (list_empty(head))
39503979
continue;
3951-
list_for_each_entry_safe(class, next, head, hash_entry) {
3980+
list_for_each_entry_rcu(class, head, hash_entry) {
39523981
int match = 0;
39533982

39543983
for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)

kernel/module.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1865,7 +1865,7 @@ static void free_module(struct module *mod)
18651865
kfree(mod->args);
18661866
percpu_modfree(mod);
18671867

1868-
/* Free lock-classes: */
1868+
/* Free lock-classes; relies on the preceding sync_rcu(). */
18691869
lockdep_free_key_range(mod->module_core, mod->core_size);
18701870

18711871
/* Finally, free the core (containing the module structure) */
@@ -3349,9 +3349,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
33493349
module_bug_cleanup(mod);
33503350
mutex_unlock(&module_mutex);
33513351

3352-
/* Free lock-classes: */
3353-
lockdep_free_key_range(mod->module_core, mod->core_size);
3354-
33553352
/* we can't deallocate the module until we clear memory protection */
33563353
unset_module_init_ro_nx(mod);
33573354
unset_module_core_ro_nx(mod);
@@ -3375,6 +3372,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
33753372
synchronize_rcu();
33763373
mutex_unlock(&module_mutex);
33773374
free_module:
3375+
/* Free lock-classes; relies on the preceding sync_rcu() */
3376+
lockdep_free_key_range(mod->module_core, mod->core_size);
3377+
33783378
module_deallocate(mod, info);
33793379
free_copy:
33803380
free_copy(info);

0 commit comments

Comments
 (0)