@@ -410,7 +410,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
410
410
static __always_inline int __sched
411
411
__mutex_lock_common (struct mutex * lock , long state , unsigned int subclass ,
412
412
struct lockdep_map * nest_lock , unsigned long ip ,
413
- struct ww_acquire_ctx * ww_ctx )
413
+ struct ww_acquire_ctx * ww_ctx , const bool use_ww_ctx )
414
414
{
415
415
struct task_struct * task = current ;
416
416
struct mutex_waiter waiter ;
@@ -450,7 +450,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
450
450
struct task_struct * owner ;
451
451
struct mspin_node node ;
452
452
453
- if (! __builtin_constant_p ( ww_ctx == NULL ) && ww_ctx -> acquired > 0 ) {
453
+ if (use_ww_ctx && ww_ctx -> acquired > 0 ) {
454
454
struct ww_mutex * ww ;
455
455
456
456
ww = container_of (lock , struct ww_mutex , base );
@@ -480,7 +480,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
480
480
if ((atomic_read (& lock -> count ) == 1 ) &&
481
481
(atomic_cmpxchg (& lock -> count , 1 , 0 ) == 1 )) {
482
482
lock_acquired (& lock -> dep_map , ip );
483
- if (! __builtin_constant_p ( ww_ctx == NULL ) ) {
483
+ if (use_ww_ctx ) {
484
484
struct ww_mutex * ww ;
485
485
ww = container_of (lock , struct ww_mutex , base );
486
486
@@ -551,7 +551,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
551
551
goto err ;
552
552
}
553
553
554
- if (! __builtin_constant_p ( ww_ctx == NULL ) && ww_ctx -> acquired > 0 ) {
554
+ if (use_ww_ctx && ww_ctx -> acquired > 0 ) {
555
555
ret = __mutex_lock_check_stamp (lock , ww_ctx );
556
556
if (ret )
557
557
goto err ;
@@ -575,7 +575,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
575
575
lock_acquired (& lock -> dep_map , ip );
576
576
mutex_set_owner (lock );
577
577
578
- if (! __builtin_constant_p ( ww_ctx == NULL ) ) {
578
+ if (use_ww_ctx ) {
579
579
struct ww_mutex * ww = container_of (lock , struct ww_mutex , base );
580
580
struct mutex_waiter * cur ;
581
581
@@ -615,7 +615,7 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
615
615
{
616
616
might_sleep ();
617
617
__mutex_lock_common (lock , TASK_UNINTERRUPTIBLE ,
618
- subclass , NULL , _RET_IP_ , NULL );
618
+ subclass , NULL , _RET_IP_ , NULL , 0 );
619
619
}
620
620
621
621
EXPORT_SYMBOL_GPL (mutex_lock_nested );
@@ -625,7 +625,7 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
625
625
{
626
626
might_sleep ();
627
627
__mutex_lock_common (lock , TASK_UNINTERRUPTIBLE ,
628
- 0 , nest , _RET_IP_ , NULL );
628
+ 0 , nest , _RET_IP_ , NULL , 0 );
629
629
}
630
630
631
631
EXPORT_SYMBOL_GPL (_mutex_lock_nest_lock );
@@ -635,7 +635,7 @@ mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
635
635
{
636
636
might_sleep ();
637
637
return __mutex_lock_common (lock , TASK_KILLABLE ,
638
- subclass , NULL , _RET_IP_ , NULL );
638
+ subclass , NULL , _RET_IP_ , NULL , 0 );
639
639
}
640
640
EXPORT_SYMBOL_GPL (mutex_lock_killable_nested );
641
641
@@ -644,7 +644,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
644
644
{
645
645
might_sleep ();
646
646
return __mutex_lock_common (lock , TASK_INTERRUPTIBLE ,
647
- subclass , NULL , _RET_IP_ , NULL );
647
+ subclass , NULL , _RET_IP_ , NULL , 0 );
648
648
}
649
649
650
650
EXPORT_SYMBOL_GPL (mutex_lock_interruptible_nested );
@@ -682,7 +682,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
682
682
683
683
might_sleep ();
684
684
ret = __mutex_lock_common (& lock -> base , TASK_UNINTERRUPTIBLE ,
685
- 0 , & ctx -> dep_map , _RET_IP_ , ctx );
685
+ 0 , & ctx -> dep_map , _RET_IP_ , ctx , 1 );
686
686
if (!ret && ctx -> acquired > 1 )
687
687
return ww_mutex_deadlock_injection (lock , ctx );
688
688
@@ -697,7 +697,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
697
697
698
698
might_sleep ();
699
699
ret = __mutex_lock_common (& lock -> base , TASK_INTERRUPTIBLE ,
700
- 0 , & ctx -> dep_map , _RET_IP_ , ctx );
700
+ 0 , & ctx -> dep_map , _RET_IP_ , ctx , 1 );
701
701
702
702
if (!ret && ctx -> acquired > 1 )
703
703
return ww_mutex_deadlock_injection (lock , ctx );
@@ -809,36 +809,36 @@ __mutex_lock_slowpath(atomic_t *lock_count)
809
809
struct mutex * lock = container_of (lock_count , struct mutex , count );
810
810
811
811
__mutex_lock_common (lock , TASK_UNINTERRUPTIBLE , 0 ,
812
- NULL , _RET_IP_ , NULL );
812
+ NULL , _RET_IP_ , NULL , 0 );
813
813
}
814
814
815
815
static noinline int __sched
816
816
__mutex_lock_killable_slowpath (struct mutex * lock )
817
817
{
818
818
return __mutex_lock_common (lock , TASK_KILLABLE , 0 ,
819
- NULL , _RET_IP_ , NULL );
819
+ NULL , _RET_IP_ , NULL , 0 );
820
820
}
821
821
822
822
static noinline int __sched
823
823
__mutex_lock_interruptible_slowpath (struct mutex * lock )
824
824
{
825
825
return __mutex_lock_common (lock , TASK_INTERRUPTIBLE , 0 ,
826
- NULL , _RET_IP_ , NULL );
826
+ NULL , _RET_IP_ , NULL , 0 );
827
827
}
828
828
829
829
static noinline int __sched
830
830
__ww_mutex_lock_slowpath (struct ww_mutex * lock , struct ww_acquire_ctx * ctx )
831
831
{
832
832
return __mutex_lock_common (& lock -> base , TASK_UNINTERRUPTIBLE , 0 ,
833
- NULL , _RET_IP_ , ctx );
833
+ NULL , _RET_IP_ , ctx , 1 );
834
834
}
835
835
836
836
static noinline int __sched
837
837
__ww_mutex_lock_interruptible_slowpath (struct ww_mutex * lock ,
838
838
struct ww_acquire_ctx * ctx )
839
839
{
840
840
return __mutex_lock_common (& lock -> base , TASK_INTERRUPTIBLE , 0 ,
841
- NULL , _RET_IP_ , ctx );
841
+ NULL , _RET_IP_ , ctx , 1 );
842
842
}
843
843
844
844
#endif
0 commit comments