17
17
#include <linux/prefetch.h>
18
18
#include <linux/buffer_head.h> /* for inode_has_buffers */
19
19
#include <linux/ratelimit.h>
20
+ #include <linux/list_lru.h>
20
21
#include "internal.h"
21
22
22
23
/*
23
24
* Inode locking rules:
24
25
*
25
26
* inode->i_lock protects:
26
27
* inode->i_state, inode->i_hash, __iget()
27
- * inode->i_sb->s_inode_lru_lock protects :
28
+ * Inode LRU list locks protect :
28
29
* inode->i_sb->s_inode_lru, inode->i_lru
29
30
* inode_sb_list_lock protects:
30
31
* sb->s_inodes, inode->i_sb_list
37
38
*
38
39
* inode_sb_list_lock
39
40
* inode->i_lock
40
- * inode->i_sb->s_inode_lru_lock
41
+ * Inode LRU list locks
41
42
*
42
43
* bdi->wb.list_lock
43
44
* inode->i_lock
@@ -401,13 +402,8 @@ EXPORT_SYMBOL(ihold);
401
402
402
403
static void inode_lru_list_add (struct inode * inode )
403
404
{
404
- spin_lock (& inode -> i_sb -> s_inode_lru_lock );
405
- if (list_empty (& inode -> i_lru )) {
406
- list_add (& inode -> i_lru , & inode -> i_sb -> s_inode_lru );
407
- inode -> i_sb -> s_nr_inodes_unused ++ ;
405
+ if (list_lru_add (& inode -> i_sb -> s_inode_lru , & inode -> i_lru ))
408
406
this_cpu_inc (nr_unused );
409
- }
410
- spin_unlock (& inode -> i_sb -> s_inode_lru_lock );
411
407
}
412
408
413
409
/*
@@ -425,13 +421,9 @@ void inode_add_lru(struct inode *inode)
425
421
426
422
static void inode_lru_list_del (struct inode * inode )
427
423
{
428
- spin_lock (& inode -> i_sb -> s_inode_lru_lock );
429
- if (!list_empty (& inode -> i_lru )) {
430
- list_del_init (& inode -> i_lru );
431
- inode -> i_sb -> s_nr_inodes_unused -- ;
424
+
425
+ if (list_lru_del (& inode -> i_sb -> s_inode_lru , & inode -> i_lru ))
432
426
this_cpu_dec (nr_unused );
433
- }
434
- spin_unlock (& inode -> i_sb -> s_inode_lru_lock );
435
427
}
436
428
437
429
/**
@@ -675,24 +667,8 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
675
667
return busy ;
676
668
}
677
669
678
- static int can_unuse (struct inode * inode )
679
- {
680
- if (inode -> i_state & ~I_REFERENCED )
681
- return 0 ;
682
- if (inode_has_buffers (inode ))
683
- return 0 ;
684
- if (atomic_read (& inode -> i_count ))
685
- return 0 ;
686
- if (inode -> i_data .nrpages )
687
- return 0 ;
688
- return 1 ;
689
- }
690
-
691
670
/*
692
- * Walk the superblock inode LRU for freeable inodes and attempt to free them.
693
- * This is called from the superblock shrinker function with a number of inodes
694
- * to trim from the LRU. Inodes to be freed are moved to a temporary list and
695
- * then are freed outside inode_lock by dispose_list().
671
+ * Isolate the inode from the LRU in preparation for freeing it.
696
672
*
697
673
* Any inodes which are pinned purely because of attached pagecache have their
698
674
* pagecache removed. If the inode has metadata buffers attached to
@@ -706,90 +682,79 @@ static int can_unuse(struct inode *inode)
706
682
* LRU does not have strict ordering. Hence we don't want to reclaim inodes
707
683
* with this flag set because they are the inodes that are out of order.
708
684
*/
709
- long prune_icache_sb (struct super_block * sb , unsigned long nr_to_scan )
685
+ static enum lru_status
686
+ inode_lru_isolate (struct list_head * item , spinlock_t * lru_lock , void * arg )
710
687
{
711
- LIST_HEAD (freeable );
712
- long nr_scanned ;
713
- long freed = 0 ;
714
- unsigned long reap = 0 ;
688
+ struct list_head * freeable = arg ;
689
+ struct inode * inode = container_of (item , struct inode , i_lru );
715
690
716
- spin_lock (& sb -> s_inode_lru_lock );
717
- for (nr_scanned = nr_to_scan ; nr_scanned >= 0 ; nr_scanned -- ) {
718
- struct inode * inode ;
691
+ /*
692
+ * we are inverting the lru lock/inode->i_lock here, so use a trylock.
693
+ * If we fail to get the lock, just skip it.
694
+ */
695
+ if (!spin_trylock (& inode -> i_lock ))
696
+ return LRU_SKIP ;
719
697
720
- if (list_empty (& sb -> s_inode_lru ))
721
- break ;
698
+ /*
699
+ * Referenced or dirty inodes are still in use. Give them another pass
700
+ * through the LRU as we canot reclaim them now.
701
+ */
702
+ if (atomic_read (& inode -> i_count ) ||
703
+ (inode -> i_state & ~I_REFERENCED )) {
704
+ list_del_init (& inode -> i_lru );
705
+ spin_unlock (& inode -> i_lock );
706
+ this_cpu_dec (nr_unused );
707
+ return LRU_REMOVED ;
708
+ }
722
709
723
- inode = list_entry (sb -> s_inode_lru .prev , struct inode , i_lru );
710
+ /* recently referenced inodes get one more pass */
711
+ if (inode -> i_state & I_REFERENCED ) {
712
+ inode -> i_state &= ~I_REFERENCED ;
713
+ spin_unlock (& inode -> i_lock );
714
+ return LRU_ROTATE ;
715
+ }
724
716
725
- /*
726
- * we are inverting the sb->s_inode_lru_lock/inode->i_lock here,
727
- * so use a trylock. If we fail to get the lock, just move the
728
- * inode to the back of the list so we don't spin on it.
729
- */
730
- if (!spin_trylock (& inode -> i_lock )) {
731
- list_move (& inode -> i_lru , & sb -> s_inode_lru );
732
- continue ;
717
+ if (inode_has_buffers (inode ) || inode -> i_data .nrpages ) {
718
+ __iget (inode );
719
+ spin_unlock (& inode -> i_lock );
720
+ spin_unlock (lru_lock );
721
+ if (remove_inode_buffers (inode )) {
722
+ unsigned long reap ;
723
+ reap = invalidate_mapping_pages (& inode -> i_data , 0 , -1 );
724
+ if (current_is_kswapd ())
725
+ __count_vm_events (KSWAPD_INODESTEAL , reap );
726
+ else
727
+ __count_vm_events (PGINODESTEAL , reap );
728
+ if (current -> reclaim_state )
729
+ current -> reclaim_state -> reclaimed_slab += reap ;
733
730
}
731
+ iput (inode );
732
+ spin_lock (lru_lock );
733
+ return LRU_RETRY ;
734
+ }
734
735
735
- /*
736
- * Referenced or dirty inodes are still in use. Give them
737
- * another pass through the LRU as we canot reclaim them now.
738
- */
739
- if (atomic_read (& inode -> i_count ) ||
740
- (inode -> i_state & ~I_REFERENCED )) {
741
- list_del_init (& inode -> i_lru );
742
- spin_unlock (& inode -> i_lock );
743
- sb -> s_nr_inodes_unused -- ;
744
- this_cpu_dec (nr_unused );
745
- continue ;
746
- }
736
+ WARN_ON (inode -> i_state & I_NEW );
737
+ inode -> i_state |= I_FREEING ;
738
+ spin_unlock (& inode -> i_lock );
747
739
748
- /* recently referenced inodes get one more pass */
749
- if (inode -> i_state & I_REFERENCED ) {
750
- inode -> i_state &= ~I_REFERENCED ;
751
- list_move (& inode -> i_lru , & sb -> s_inode_lru );
752
- spin_unlock (& inode -> i_lock );
753
- continue ;
754
- }
755
- if (inode_has_buffers (inode ) || inode -> i_data .nrpages ) {
756
- __iget (inode );
757
- spin_unlock (& inode -> i_lock );
758
- spin_unlock (& sb -> s_inode_lru_lock );
759
- if (remove_inode_buffers (inode ))
760
- reap += invalidate_mapping_pages (& inode -> i_data ,
761
- 0 , -1 );
762
- iput (inode );
763
- spin_lock (& sb -> s_inode_lru_lock );
764
-
765
- if (inode != list_entry (sb -> s_inode_lru .next ,
766
- struct inode , i_lru ))
767
- continue ; /* wrong inode or list_empty */
768
- /* avoid lock inversions with trylock */
769
- if (!spin_trylock (& inode -> i_lock ))
770
- continue ;
771
- if (!can_unuse (inode )) {
772
- spin_unlock (& inode -> i_lock );
773
- continue ;
774
- }
775
- }
776
- WARN_ON (inode -> i_state & I_NEW );
777
- inode -> i_state |= I_FREEING ;
778
- spin_unlock (& inode -> i_lock );
740
+ list_move (& inode -> i_lru , freeable );
741
+ this_cpu_dec (nr_unused );
742
+ return LRU_REMOVED ;
743
+ }
779
744
780
- list_move (& inode -> i_lru , & freeable );
781
- sb -> s_nr_inodes_unused -- ;
782
- this_cpu_dec (nr_unused );
783
- freed ++ ;
784
- }
785
- if (current_is_kswapd ())
786
- __count_vm_events (KSWAPD_INODESTEAL , reap );
787
- else
788
- __count_vm_events (PGINODESTEAL , reap );
789
- spin_unlock (& sb -> s_inode_lru_lock );
790
- if (current -> reclaim_state )
791
- current -> reclaim_state -> reclaimed_slab += reap ;
745
+ /*
746
+ * Walk the superblock inode LRU for freeable inodes and attempt to free them.
747
+ * This is called from the superblock shrinker function with a number of inodes
748
+ * to trim from the LRU. Inodes to be freed are moved to a temporary list and
749
+ * then are freed outside inode_lock by dispose_list().
750
+ */
751
+ long prune_icache_sb (struct super_block * sb , unsigned long nr_to_scan )
752
+ {
753
+ LIST_HEAD (freeable );
754
+ long freed ;
792
755
756
+ freed = list_lru_walk (& sb -> s_inode_lru , inode_lru_isolate ,
757
+ & freeable , nr_to_scan );
793
758
dispose_list (& freeable );
794
759
return freed ;
795
760
}
0 commit comments