@@ -410,7 +410,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
410410static __always_inline int __sched
411411__mutex_lock_common (struct mutex * lock , long state , unsigned int subclass ,
412412 struct lockdep_map * nest_lock , unsigned long ip ,
413- struct ww_acquire_ctx * ww_ctx )
413+ struct ww_acquire_ctx * ww_ctx , const bool use_ww_ctx )
414414{
415415 struct task_struct * task = current ;
416416 struct mutex_waiter waiter ;
@@ -450,7 +450,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
450450 struct task_struct * owner ;
451451 struct mspin_node node ;
452452
453- if (! __builtin_constant_p ( ww_ctx == NULL ) && ww_ctx -> acquired > 0 ) {
453+ if (use_ww_ctx && ww_ctx -> acquired > 0 ) {
454454 struct ww_mutex * ww ;
455455
456456 ww = container_of (lock , struct ww_mutex , base );
@@ -480,7 +480,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
480480 if ((atomic_read (& lock -> count ) == 1 ) &&
481481 (atomic_cmpxchg (& lock -> count , 1 , 0 ) == 1 )) {
482482 lock_acquired (& lock -> dep_map , ip );
483- if (! __builtin_constant_p ( ww_ctx == NULL ) ) {
483+ if (use_ww_ctx ) {
484484 struct ww_mutex * ww ;
485485 ww = container_of (lock , struct ww_mutex , base );
486486
@@ -551,7 +551,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
551551 goto err ;
552552 }
553553
554- if (! __builtin_constant_p ( ww_ctx == NULL ) && ww_ctx -> acquired > 0 ) {
554+ if (use_ww_ctx && ww_ctx -> acquired > 0 ) {
555555 ret = __mutex_lock_check_stamp (lock , ww_ctx );
556556 if (ret )
557557 goto err ;
@@ -575,7 +575,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
575575 lock_acquired (& lock -> dep_map , ip );
576576 mutex_set_owner (lock );
577577
578- if (! __builtin_constant_p ( ww_ctx == NULL ) ) {
578+ if (use_ww_ctx ) {
579579 struct ww_mutex * ww = container_of (lock , struct ww_mutex , base );
580580 struct mutex_waiter * cur ;
581581
@@ -615,7 +615,7 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
615615{
616616 might_sleep ();
617617 __mutex_lock_common (lock , TASK_UNINTERRUPTIBLE ,
618- subclass , NULL , _RET_IP_ , NULL );
618+ subclass , NULL , _RET_IP_ , NULL , 0 );
619619}
620620
621621EXPORT_SYMBOL_GPL (mutex_lock_nested );
@@ -625,7 +625,7 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
625625{
626626 might_sleep ();
627627 __mutex_lock_common (lock , TASK_UNINTERRUPTIBLE ,
628- 0 , nest , _RET_IP_ , NULL );
628+ 0 , nest , _RET_IP_ , NULL , 0 );
629629}
630630
631631EXPORT_SYMBOL_GPL (_mutex_lock_nest_lock );
@@ -635,7 +635,7 @@ mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
635635{
636636 might_sleep ();
637637 return __mutex_lock_common (lock , TASK_KILLABLE ,
638- subclass , NULL , _RET_IP_ , NULL );
638+ subclass , NULL , _RET_IP_ , NULL , 0 );
639639}
640640EXPORT_SYMBOL_GPL (mutex_lock_killable_nested );
641641
@@ -644,7 +644,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
644644{
645645 might_sleep ();
646646 return __mutex_lock_common (lock , TASK_INTERRUPTIBLE ,
647- subclass , NULL , _RET_IP_ , NULL );
647+ subclass , NULL , _RET_IP_ , NULL , 0 );
648648}
649649
650650EXPORT_SYMBOL_GPL (mutex_lock_interruptible_nested );
@@ -682,7 +682,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
682682
683683 might_sleep ();
684684 ret = __mutex_lock_common (& lock -> base , TASK_UNINTERRUPTIBLE ,
685- 0 , & ctx -> dep_map , _RET_IP_ , ctx );
685+ 0 , & ctx -> dep_map , _RET_IP_ , ctx , 1 );
686686 if (!ret && ctx -> acquired > 1 )
687687 return ww_mutex_deadlock_injection (lock , ctx );
688688
@@ -697,7 +697,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
697697
698698 might_sleep ();
699699 ret = __mutex_lock_common (& lock -> base , TASK_INTERRUPTIBLE ,
700- 0 , & ctx -> dep_map , _RET_IP_ , ctx );
700+ 0 , & ctx -> dep_map , _RET_IP_ , ctx , 1 );
701701
702702 if (!ret && ctx -> acquired > 1 )
703703 return ww_mutex_deadlock_injection (lock , ctx );
@@ -809,36 +809,36 @@ __mutex_lock_slowpath(atomic_t *lock_count)
809809 struct mutex * lock = container_of (lock_count , struct mutex , count );
810810
811811 __mutex_lock_common (lock , TASK_UNINTERRUPTIBLE , 0 ,
812- NULL , _RET_IP_ , NULL );
812+ NULL , _RET_IP_ , NULL , 0 );
813813}
814814
815815static noinline int __sched
816816__mutex_lock_killable_slowpath (struct mutex * lock )
817817{
818818 return __mutex_lock_common (lock , TASK_KILLABLE , 0 ,
819- NULL , _RET_IP_ , NULL );
819+ NULL , _RET_IP_ , NULL , 0 );
820820}
821821
822822static noinline int __sched
823823__mutex_lock_interruptible_slowpath (struct mutex * lock )
824824{
825825 return __mutex_lock_common (lock , TASK_INTERRUPTIBLE , 0 ,
826- NULL , _RET_IP_ , NULL );
826+ NULL , _RET_IP_ , NULL , 0 );
827827}
828828
829829static noinline int __sched
830830__ww_mutex_lock_slowpath (struct ww_mutex * lock , struct ww_acquire_ctx * ctx )
831831{
832832 return __mutex_lock_common (& lock -> base , TASK_UNINTERRUPTIBLE , 0 ,
833- NULL , _RET_IP_ , ctx );
833+ NULL , _RET_IP_ , ctx , 1 );
834834}
835835
836836static noinline int __sched
837837__ww_mutex_lock_interruptible_slowpath (struct ww_mutex * lock ,
838838 struct ww_acquire_ctx * ctx )
839839{
840840 return __mutex_lock_common (& lock -> base , TASK_INTERRUPTIBLE , 0 ,
841- NULL , _RET_IP_ , ctx );
841+ NULL , _RET_IP_ , ctx , 1 );
842842}
843843
844844#endif
0 commit comments