Skip to content

Commit cfafcd1

Browse files
Peter ZijlstraKAGA-KOKO
authored andcommitted
futex: Rework futex_lock_pi() to use rt_mutex_*_proxy_lock()
By changing futex_lock_pi() to use rt_mutex_*_proxy_lock() all wait_list modifications are done under both hb->lock and wait_lock. This closes the obvious interleave pattern between futex_lock_pi() and futex_unlock_pi(), but not entirely so. See below: Before: futex_lock_pi() futex_unlock_pi() unlock hb->lock lock hb->lock unlock hb->lock lock rt_mutex->wait_lock unlock rt_mutex_wait_lock -EAGAIN lock rt_mutex->wait_lock list_add unlock rt_mutex->wait_lock schedule() lock rt_mutex->wait_lock list_del unlock rt_mutex->wait_lock <idem> -EAGAIN lock hb->lock After: futex_lock_pi() futex_unlock_pi() lock hb->lock lock rt_mutex->wait_lock list_add unlock rt_mutex->wait_lock unlock hb->lock schedule() lock hb->lock unlock hb->lock lock hb->lock lock rt_mutex->wait_lock list_del unlock rt_mutex->wait_lock lock rt_mutex->wait_lock unlock rt_mutex_wait_lock -EAGAIN unlock hb->lock It does however solve the earlier starvation/live-lock scenario which got introduced with the -EAGAIN since unlike the before scenario; where the -EAGAIN happens while futex_unlock_pi() doesn't hold any locks; in the after scenario it happens while futex_unlock_pi() actually holds a lock, and then it is serialized on that lock. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: juri.lelli@arm.com Cc: bigeasy@linutronix.de Cc: xlpang@redhat.com Cc: rostedt@goodmis.org Cc: mathieu.desnoyers@efficios.com Cc: jdesfossez@efficios.com Cc: dvhart@infradead.org Cc: bristot@redhat.com Link: http://lkml.kernel.org/r/20170322104152.062785528@infradead.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 parent 38d589f commit cfafcd1

File tree

3 files changed

+62
-42
lines changed

3 files changed

+62
-42
lines changed

kernel/futex.c

Lines changed: 55 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -2099,20 +2099,7 @@ queue_unlock(struct futex_hash_bucket *hb)
20992099
hb_waiters_dec(hb);
21002100
}
21012101

2102-
/**
2103-
* queue_me() - Enqueue the futex_q on the futex_hash_bucket
2104-
* @q: The futex_q to enqueue
2105-
* @hb: The destination hash bucket
2106-
*
2107-
* The hb->lock must be held by the caller, and is released here. A call to
2108-
* queue_me() is typically paired with exactly one call to unqueue_me(). The
2109-
* exceptions involve the PI related operations, which may use unqueue_me_pi()
2110-
* or nothing if the unqueue is done as part of the wake process and the unqueue
2111-
* state is implicit in the state of woken task (see futex_wait_requeue_pi() for
2112-
* an example).
2113-
*/
2114-
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2115-
__releases(&hb->lock)
2102+
static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
21162103
{
21172104
int prio;
21182105

@@ -2129,6 +2116,24 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
21292116
plist_node_init(&q->list, prio);
21302117
plist_add(&q->list, &hb->chain);
21312118
q->task = current;
2119+
}
2120+
2121+
/**
2122+
* queue_me() - Enqueue the futex_q on the futex_hash_bucket
2123+
* @q: The futex_q to enqueue
2124+
* @hb: The destination hash bucket
2125+
*
2126+
* The hb->lock must be held by the caller, and is released here. A call to
2127+
* queue_me() is typically paired with exactly one call to unqueue_me(). The
2128+
* exceptions involve the PI related operations, which may use unqueue_me_pi()
2129+
* or nothing if the unqueue is done as part of the wake process and the unqueue
2130+
* state is implicit in the state of woken task (see futex_wait_requeue_pi() for
2131+
* an example).
2132+
*/
2133+
static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2134+
__releases(&hb->lock)
2135+
{
2136+
__queue_me(q, hb);
21322137
spin_unlock(&hb->lock);
21332138
}
21342139

@@ -2587,6 +2592,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
25872592
{
25882593
struct hrtimer_sleeper timeout, *to = NULL;
25892594
struct futex_pi_state *pi_state = NULL;
2595+
struct rt_mutex_waiter rt_waiter;
25902596
struct futex_hash_bucket *hb;
25912597
struct futex_q q = futex_q_init;
25922598
int res, ret;
@@ -2639,24 +2645,51 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
26392645
}
26402646
}
26412647

2648+
WARN_ON(!q.pi_state);
2649+
26422650
/*
26432651
* Only actually queue now that the atomic ops are done:
26442652
*/
2645-
queue_me(&q, hb);
2653+
__queue_me(&q, hb);
26462654

2647-
WARN_ON(!q.pi_state);
2648-
/*
2649-
* Block on the PI mutex:
2650-
*/
2651-
if (!trylock) {
2652-
ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
2653-
} else {
2655+
if (trylock) {
26542656
ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
26552657
/* Fixup the trylock return value: */
26562658
ret = ret ? 0 : -EWOULDBLOCK;
2659+
goto no_block;
2660+
}
2661+
2662+
/*
2663+
* We must add ourselves to the rt_mutex waitlist while holding hb->lock
2664+
* such that the hb and rt_mutex wait lists match.
2665+
*/
2666+
rt_mutex_init_waiter(&rt_waiter);
2667+
ret = rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
2668+
if (ret) {
2669+
if (ret == 1)
2670+
ret = 0;
2671+
2672+
goto no_block;
26572673
}
26582674

2675+
spin_unlock(q.lock_ptr);
2676+
2677+
if (unlikely(to))
2678+
hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
2679+
2680+
ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
2681+
26592682
spin_lock(q.lock_ptr);
2683+
/*
2684+
* If we failed to acquire the lock (signal/timeout), we must
2685+
* first acquire the hb->lock before removing the lock from the
2686+
* rt_mutex waitqueue, such that we can keep the hb and rt_mutex
2687+
* wait lists consistent.
2688+
*/
2689+
if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
2690+
ret = 0;
2691+
2692+
no_block:
26602693
/*
26612694
* Fixup the pi_state owner and possibly acquire the lock if we
26622695
* haven't already.

kernel/locking/rtmutex.c

Lines changed: 7 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1492,19 +1492,6 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
14921492
}
14931493
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
14941494

1495-
/*
1496-
* Futex variant with full deadlock detection.
1497-
* Futex variants must not use the fast-path, see __rt_mutex_futex_unlock().
1498-
*/
1499-
int __sched rt_mutex_timed_futex_lock(struct rt_mutex *lock,
1500-
struct hrtimer_sleeper *timeout)
1501-
{
1502-
might_sleep();
1503-
1504-
return rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE,
1505-
timeout, RT_MUTEX_FULL_CHAINWALK);
1506-
}
1507-
15081495
/*
15091496
* Futex variant, must not use fastpath.
15101497
*/
@@ -1782,12 +1769,6 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
17821769
/* sleep on the mutex */
17831770
ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
17841771

1785-
/*
1786-
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1787-
* have to fix that up.
1788-
*/
1789-
fixup_rt_mutex_waiters(lock);
1790-
17911772
raw_spin_unlock_irq(&lock->wait_lock);
17921773

17931774
return ret;
@@ -1827,6 +1808,13 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
18271808
fixup_rt_mutex_waiters(lock);
18281809
cleanup = true;
18291810
}
1811+
1812+
/*
1813+
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1814+
* have to fix that up.
1815+
*/
1816+
fixup_rt_mutex_waiters(lock);
1817+
18301818
raw_spin_unlock_irq(&lock->wait_lock);
18311819

18321820
return cleanup;

kernel/locking/rtmutex_common.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,6 @@ extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
113113
extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
114114
struct rt_mutex_waiter *waiter);
115115

116-
extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
117116
extern int rt_mutex_futex_trylock(struct rt_mutex *l);
118117

119118
extern void rt_mutex_futex_unlock(struct rt_mutex *lock);

0 commit comments

Comments
 (0)