Skip to content

Commit cc4f11e

Browse files
jankaratorvalds
authored andcommitted
mm: migrate: lock buffers before migrate_page_move_mapping()
Lock buffers before calling into migrate_page_move_mapping() so that that function doesn't have to know about buffers (which is somewhat unexpected anyway) and all the buffer head logic is in buffer_migrate_page(). Link: http://lkml.kernel.org/r/20181211172143.7358-3-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 0b3901b commit cc4f11e

File tree

1 file changed

+13
-26
lines changed

1 file changed

+13
-26
lines changed

mm/migrate.c

Lines changed: 13 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -486,20 +486,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
486486
return -EAGAIN;
487487
}
488488

489-
/*
490-
* In the async migration case of moving a page with buffers, lock the
491-
* buffers using trylock before the mapping is moved. If the mapping
492-
* was moved, we later failed to lock the buffers and could not move
493-
* the mapping back due to an elevated page count, we would have to
494-
* block waiting on other references to be dropped.
495-
*/
496-
if (mode == MIGRATE_ASYNC && head &&
497-
!buffer_migrate_lock_buffers(head, mode)) {
498-
page_ref_unfreeze(page, expected_count);
499-
xas_unlock_irq(&xas);
500-
return -EAGAIN;
501-
}
502-
503489
/*
504490
* Now we know that no one else is looking at the page:
505491
* no turning back from here.
@@ -775,24 +761,23 @@ int buffer_migrate_page(struct address_space *mapping,
775761
{
776762
struct buffer_head *bh, *head;
777763
int rc;
764+
int expected_count;
778765

779766
if (!page_has_buffers(page))
780767
return migrate_page(mapping, newpage, page, mode);
781768

782-
head = page_buffers(page);
769+
/* Check whether page does not have extra refs before we do more work */
770+
expected_count = expected_page_refs(page);
771+
if (page_count(page) != expected_count)
772+
return -EAGAIN;
783773

784-
rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
774+
head = page_buffers(page);
775+
if (!buffer_migrate_lock_buffers(head, mode))
776+
return -EAGAIN;
785777

778+
rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
786779
if (rc != MIGRATEPAGE_SUCCESS)
787-
return rc;
788-
789-
/*
790-
* In the async case, migrate_page_move_mapping locked the buffers
791-
* with an IRQ-safe spinlock held. In the sync case, the buffers
792-
* need to be locked now
793-
*/
794-
if (mode != MIGRATE_ASYNC)
795-
BUG_ON(!buffer_migrate_lock_buffers(head, mode));
780+
goto unlock_buffers;
796781

797782
ClearPagePrivate(page);
798783
set_page_private(newpage, page_private(page));
@@ -814,6 +799,8 @@ int buffer_migrate_page(struct address_space *mapping,
814799
else
815800
migrate_page_states(newpage, page);
816801

802+
rc = MIGRATEPAGE_SUCCESS;
803+
unlock_buffers:
817804
bh = head;
818805
do {
819806
unlock_buffer(bh);
@@ -822,7 +809,7 @@ int buffer_migrate_page(struct address_space *mapping,
822809

823810
} while (bh != head);
824811

825-
return MIGRATEPAGE_SUCCESS;
812+
return rc;
826813
}
827814
EXPORT_SYMBOL(buffer_migrate_page);
828815
#endif

0 commit comments

Comments
 (0)