Skip to content

Commit 2c5b7e1

Browse files
aagittorvalds
authored andcommitted
userfaultfd: avoid missing wakeups during refile in userfaultfd_read
During the refile in userfaultfd_read both waitqueues could look empty to the lockless wake_userfault(). Use a seqcount to prevent this false negative that could leave an userfault blocked. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 230c92a commit 2c5b7e1

File tree

1 file changed

+24
-2
lines changed

1 file changed

+24
-2
lines changed

fs/userfaultfd.c

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,8 @@ struct userfaultfd_ctx {
4545
wait_queue_head_t fault_wqh;
4646
/* waitqueue head for the pseudo fd to wakeup poll/read */
4747
wait_queue_head_t fd_wqh;
48+
/* a refile sequence protected by fault_pending_wqh lock */
49+
struct seqcount refile_seq;
4850
/* pseudo fd refcounting */
4951
atomic_t refcount;
5052
/* userfaultfd syscall flags */
@@ -546,6 +548,15 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
546548
spin_lock(&ctx->fault_pending_wqh.lock);
547549
uwq = find_userfault(ctx);
548550
if (uwq) {
551+
/*
552+
* Use a seqcount to repeat the lockless check
553+
* in wake_userfault() to avoid missing
554+
* wakeups because during the refile both
555+
* waitqueue could become empty if this is the
556+
* only userfault.
557+
*/
558+
write_seqcount_begin(&ctx->refile_seq);
559+
549560
/*
550561
* The fault_pending_wqh.lock prevents the uwq
551562
* to disappear from under us.
@@ -570,6 +581,8 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
570581
list_del(&uwq->wq.task_list);
571582
__add_wait_queue(&ctx->fault_wqh, &uwq->wq);
572583

584+
write_seqcount_end(&ctx->refile_seq);
585+
573586
/* careful to always initialize msg if ret == 0 */
574587
*msg = uwq->msg;
575588
spin_unlock(&ctx->fault_pending_wqh.lock);
@@ -647,6 +660,9 @@ static void __wake_userfault(struct userfaultfd_ctx *ctx,
647660
static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
648661
struct userfaultfd_wake_range *range)
649662
{
663+
unsigned seq;
664+
bool need_wakeup;
665+
650666
/*
651667
* To be sure waitqueue_active() is not reordered by the CPU
652668
* before the pagetable update, use an explicit SMP memory
@@ -662,8 +678,13 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
662678
* userfaults yet. So we take the spinlock only when we're
663679
* sure we've userfaults to wake.
664680
*/
665-
if (waitqueue_active(&ctx->fault_pending_wqh) ||
666-
waitqueue_active(&ctx->fault_wqh))
681+
do {
682+
seq = read_seqcount_begin(&ctx->refile_seq);
683+
need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
684+
waitqueue_active(&ctx->fault_wqh);
685+
cond_resched();
686+
} while (read_seqcount_retry(&ctx->refile_seq, seq));
687+
if (need_wakeup)
667688
__wake_userfault(ctx, range);
668689
}
669690

@@ -1219,6 +1240,7 @@ static void init_once_userfaultfd_ctx(void *mem)
12191240
init_waitqueue_head(&ctx->fault_pending_wqh);
12201241
init_waitqueue_head(&ctx->fault_wqh);
12211242
init_waitqueue_head(&ctx->fd_wqh);
1243+
seqcount_init(&ctx->refile_seq);
12221244
}
12231245

12241246
/**

0 commit comments

Comments
 (0)