@@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class)
633
633
if (!new_class -> name )
634
634
return 0 ;
635
635
636
- list_for_each_entry (class , & all_lock_classes , lock_entry ) {
636
+ list_for_each_entry_rcu (class , & all_lock_classes , lock_entry ) {
637
637
if (new_class -> key - new_class -> subclass == class -> key )
638
638
return class -> name_version ;
639
639
if (class -> name && !strcmp (class -> name , new_class -> name ))
@@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
700
700
hash_head = classhashentry (key );
701
701
702
702
/*
703
- * We can walk the hash lockfree, because the hash only
704
- * grows, and we are careful when adding entries to the end:
703
+ * We do an RCU walk of the hash, see lockdep_free_key_range().
705
704
*/
706
- list_for_each_entry (class , hash_head , hash_entry ) {
705
+ if (DEBUG_LOCKS_WARN_ON (!irqs_disabled ()))
706
+ return NULL ;
707
+
708
+ list_for_each_entry_rcu (class , hash_head , hash_entry ) {
707
709
if (class -> key == key ) {
708
710
/*
709
711
* Huh! same key, different name? Did someone trample
@@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
728
730
struct lockdep_subclass_key * key ;
729
731
struct list_head * hash_head ;
730
732
struct lock_class * class ;
731
- unsigned long flags ;
733
+
734
+ DEBUG_LOCKS_WARN_ON (!irqs_disabled ());
732
735
733
736
class = look_up_lock_class (lock , subclass );
734
737
if (likely (class ))
@@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
750
753
key = lock -> key -> subkeys + subclass ;
751
754
hash_head = classhashentry (key );
752
755
753
- raw_local_irq_save (flags );
754
756
if (!graph_lock ()) {
755
- raw_local_irq_restore (flags );
756
757
return NULL ;
757
758
}
758
759
/*
759
760
* We have to do the hash-walk again, to avoid races
760
761
* with another CPU:
761
762
*/
762
- list_for_each_entry (class , hash_head , hash_entry )
763
+ list_for_each_entry_rcu (class , hash_head , hash_entry ) {
763
764
if (class -> key == key )
764
765
goto out_unlock_set ;
766
+ }
767
+
765
768
/*
766
769
* Allocate a new key from the static array, and add it to
767
770
* the hash:
768
771
*/
769
772
if (nr_lock_classes >= MAX_LOCKDEP_KEYS ) {
770
773
if (!debug_locks_off_graph_unlock ()) {
771
- raw_local_irq_restore (flags );
772
774
return NULL ;
773
775
}
774
- raw_local_irq_restore (flags );
775
776
776
777
print_lockdep_off ("BUG: MAX_LOCKDEP_KEYS too low!" );
777
778
dump_stack ();
@@ -798,23 +799,19 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
798
799
799
800
if (verbose (class )) {
800
801
graph_unlock ();
801
- raw_local_irq_restore (flags );
802
802
803
803
printk ("\nnew class %p: %s" , class -> key , class -> name );
804
804
if (class -> name_version > 1 )
805
805
printk ("#%d" , class -> name_version );
806
806
printk ("\n" );
807
807
dump_stack ();
808
808
809
- raw_local_irq_save (flags );
810
809
if (!graph_lock ()) {
811
- raw_local_irq_restore (flags );
812
810
return NULL ;
813
811
}
814
812
}
815
813
out_unlock_set :
816
814
graph_unlock ();
817
- raw_local_irq_restore (flags );
818
815
819
816
out_set_class_cache :
820
817
if (!subclass || force )
@@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
870
867
entry -> distance = distance ;
871
868
entry -> trace = * trace ;
872
869
/*
873
- * Since we never remove from the dependency list, the list can
874
- * be walked lockless by other CPUs, it's only allocation
875
- * that must be protected by the spinlock. But this also means
876
- * we must make new entries visible only once writes to the
877
- * entry become visible - hence the RCU op:
870
+ * Both allocation and removal are done under the graph lock; but
871
+ * iteration is under RCU-sched; see look_up_lock_class() and
872
+ * lockdep_free_key_range().
878
873
*/
879
874
list_add_tail_rcu (& entry -> entry , head );
880
875
@@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry,
1025
1020
else
1026
1021
head = & lock -> class -> locks_before ;
1027
1022
1028
- list_for_each_entry (entry , head , entry ) {
1023
+ DEBUG_LOCKS_WARN_ON (!irqs_disabled ());
1024
+
1025
+ list_for_each_entry_rcu (entry , head , entry ) {
1029
1026
if (!lock_accessed (entry )) {
1030
1027
unsigned int cq_depth ;
1031
1028
mark_lock_accessed (entry , lock );
@@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
2022
2019
* We can walk it lock-free, because entries only get added
2023
2020
* to the hash:
2024
2021
*/
2025
- list_for_each_entry (chain , hash_head , entry ) {
2022
+ list_for_each_entry_rcu (chain , hash_head , entry ) {
2026
2023
if (chain -> chain_key == chain_key ) {
2027
2024
cache_hit :
2028
2025
debug_atomic_inc (chain_lookup_hits );
@@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2996
2993
if (unlikely (!debug_locks ))
2997
2994
return ;
2998
2995
2999
- if (subclass )
2996
+ if (subclass ) {
2997
+ unsigned long flags ;
2998
+
2999
+ if (DEBUG_LOCKS_WARN_ON (current -> lockdep_recursion ))
3000
+ return ;
3001
+
3002
+ raw_local_irq_save (flags );
3003
+ current -> lockdep_recursion = 1 ;
3000
3004
register_lock_class (lock , subclass , 1 );
3005
+ current -> lockdep_recursion = 0 ;
3006
+ raw_local_irq_restore (flags );
3007
+ }
3001
3008
}
3002
3009
EXPORT_SYMBOL_GPL (lockdep_init_map );
3003
3010
@@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size)
3887
3894
return addr >= start && addr < start + size ;
3888
3895
}
3889
3896
3897
+ /*
3898
+ * Used in module.c to remove lock classes from memory that is going to be
3899
+ * freed; and possibly re-used by other modules.
3900
+ *
3901
+ * We will have had one sync_sched() before getting here, so we're guaranteed
3902
+ * nobody will look up these exact classes -- they're properly dead but still
3903
+ * allocated.
3904
+ */
3890
3905
void lockdep_free_key_range (void * start , unsigned long size )
3891
3906
{
3892
- struct lock_class * class , * next ;
3907
+ struct lock_class * class ;
3893
3908
struct list_head * head ;
3894
3909
unsigned long flags ;
3895
3910
int i ;
@@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
3905
3920
head = classhash_table + i ;
3906
3921
if (list_empty (head ))
3907
3922
continue ;
3908
- list_for_each_entry_safe (class , next , head , hash_entry ) {
3923
+ list_for_each_entry_rcu (class , head , hash_entry ) {
3909
3924
if (within (class -> key , start , size ))
3910
3925
zap_class (class );
3911
3926
else if (within (class -> name , start , size ))
@@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size)
3916
3931
if (locked )
3917
3932
graph_unlock ();
3918
3933
raw_local_irq_restore (flags );
3934
+
3935
+ /*
3936
+ * Wait for any possible iterators from look_up_lock_class() to pass
3937
+ * before continuing to free the memory they refer to.
3938
+ *
3939
+ * sync_sched() is sufficient because the read-side is IRQ disable.
3940
+ */
3941
+ synchronize_sched ();
3942
+
3943
+ /*
3944
+ * XXX at this point we could return the resources to the pool;
3945
+ * instead we leak them. We would need to change to bitmap allocators
3946
+ * instead of the linear allocators we have now.
3947
+ */
3919
3948
}
3920
3949
3921
3950
void lockdep_reset_lock (struct lockdep_map * lock )
3922
3951
{
3923
- struct lock_class * class , * next ;
3952
+ struct lock_class * class ;
3924
3953
struct list_head * head ;
3925
3954
unsigned long flags ;
3926
3955
int i , j ;
@@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
3948
3977
head = classhash_table + i ;
3949
3978
if (list_empty (head ))
3950
3979
continue ;
3951
- list_for_each_entry_safe (class , next , head , hash_entry ) {
3980
+ list_for_each_entry_rcu (class , head , hash_entry ) {
3952
3981
int match = 0 ;
3953
3982
3954
3983
for (j = 0 ; j < NR_LOCKDEP_CACHING_CLASSES ; j ++ )
0 commit comments