Skip to content

Commit d3132b3

Browse files
committed
xen: fix xen_qlock_wait()
Commit a856531 ("xen: make xen_qlock_wait() nestable") introduced a regression for Xen guests running fully virtualized (HVM or PVH mode). The Xen hypervisor wouldn't return from the poll hypercall with interrupts disabled in case of an interrupt (for PV guests it does). So instead of disabling interrupts in xen_qlock_wait() use a nesting counter to avoid calling xen_clear_irq_pending() in case xen_qlock_wait() is nested. Fixes: a856531 ("xen: make xen_qlock_wait() nestable") Cc: stable@vger.kernel.org Reported-by: Sander Eikelenboom <linux@eikelenboom.it> Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Tested-by: Sander Eikelenboom <linux@eikelenboom.it> Signed-off-by: Juergen Gross <jgross@suse.com>
1 parent 1457d8c commit d3132b3

File tree

1 file changed

+8
-6
lines changed

1 file changed

+8
-6
lines changed

arch/x86/xen/spinlock.c

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <linux/log2.h>
1010
#include <linux/gfp.h>
1111
#include <linux/slab.h>
12+
#include <linux/atomic.h>
1213

1314
#include <asm/paravirt.h>
1415
#include <asm/qspinlock.h>
@@ -21,6 +22,7 @@
2122

2223
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
2324
static DEFINE_PER_CPU(char *, irq_name);
25+
static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
2426
static bool xen_pvspin = true;
2527

2628
static void xen_qlock_kick(int cpu)
@@ -39,25 +41,25 @@ static void xen_qlock_kick(int cpu)
3941
*/
4042
static void xen_qlock_wait(u8 *byte, u8 val)
4143
{
42-
unsigned long flags;
4344
int irq = __this_cpu_read(lock_kicker_irq);
45+
atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
4446

4547
/* If kicker interrupts not initialized yet, just spin */
4648
if (irq == -1 || in_nmi())
4749
return;
4850

49-
/* Guard against reentry. */
50-
local_irq_save(flags);
51+
/* Detect reentry. */
52+
atomic_inc(nest_cnt);
5153

52-
/* If irq pending already clear it. */
53-
if (xen_test_irq_pending(irq)) {
54+
/* If irq pending already and no nested call clear it. */
55+
if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
5456
xen_clear_irq_pending(irq);
5557
} else if (READ_ONCE(*byte) == val) {
5658
/* Block until irq becomes pending (or a spurious wakeup) */
5759
xen_poll_irq(irq);
5860
}
5961

60-
local_irq_restore(flags);
62+
atomic_dec(nest_cnt);
6163
}
6264

6365
static irqreturn_t dummy_handler(int irq, void *dev_id)

0 commit comments

Comments
 (0)