Skip to content

Commit a38e408

Browse files
Dave ChinnerAl Viro
authored andcommitted
list: add a new LRU list type
Several subsystems use the same construct for LRU lists - a list head, a spin lock and and item count. They also use exactly the same code for adding and removing items from the LRU. Create a generic type for these LRU lists. This is the beginning of generic, node aware LRUs for shrinkers to work with. [glommer@openvz.org: enum defined constants for lru. Suggested by gthelen, don't relock over retry] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Reviewed-by: Greg Thelen <gthelen@google.com> Acked-by: Mel Gorman <mgorman@suse.de> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
1 parent 0a234c6 commit a38e408

File tree

3 files changed

+233
-1
lines changed

3 files changed

+233
-1
lines changed

include/linux/list_lru.h

Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
/*
2+
* Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3+
* Authors: David Chinner and Glauber Costa
4+
*
5+
* Generic LRU infrastructure
6+
*/
7+
#ifndef _LRU_LIST_H
8+
#define _LRU_LIST_H
9+
10+
#include <linux/list.h>
11+
12+
/* list_lru_walk_cb has to always return one of those */
13+
enum lru_status {
14+
LRU_REMOVED, /* item removed from list */
15+
LRU_ROTATE, /* item referenced, give another pass */
16+
LRU_SKIP, /* item cannot be locked, skip */
17+
LRU_RETRY, /* item not freeable. May drop the lock
18+
internally, but has to return locked. */
19+
};
20+
21+
struct list_lru {
22+
spinlock_t lock;
23+
struct list_head list;
24+
/* kept as signed so we can catch imbalance bugs */
25+
long nr_items;
26+
};
27+
28+
int list_lru_init(struct list_lru *lru);
29+
30+
/**
31+
* list_lru_add: add an element to the lru list's tail
32+
* @list_lru: the lru pointer
33+
* @item: the item to be added.
34+
*
35+
* If the element is already part of a list, this function returns doing
36+
* nothing. Therefore the caller does not need to keep state about whether or
37+
* not the element already belongs in the list and is allowed to lazy update
38+
* it. Note however that this is valid for *a* list, not *this* list. If
39+
* the caller organize itself in a way that elements can be in more than
40+
* one type of list, it is up to the caller to fully remove the item from
41+
* the previous list (with list_lru_del() for instance) before moving it
42+
* to @list_lru
43+
*
44+
* Return value: true if the list was updated, false otherwise
45+
*/
46+
bool list_lru_add(struct list_lru *lru, struct list_head *item);
47+
48+
/**
49+
* list_lru_del: delete an element to the lru list
50+
* @list_lru: the lru pointer
51+
* @item: the item to be deleted.
52+
*
53+
* This function works analogously as list_lru_add in terms of list
54+
* manipulation. The comments about an element already pertaining to
55+
* a list are also valid for list_lru_del.
56+
*
57+
* Return value: true if the list was updated, false otherwise
58+
*/
59+
bool list_lru_del(struct list_lru *lru, struct list_head *item);
60+
61+
/**
62+
* list_lru_count: return the number of objects currently held by @lru
63+
* @lru: the lru pointer.
64+
*
65+
* Always return a non-negative number, 0 for empty lists. There is no
66+
* guarantee that the list is not updated while the count is being computed.
67+
* Callers that want such a guarantee need to provide an outer lock.
68+
*/
69+
static inline unsigned long list_lru_count(struct list_lru *lru)
70+
{
71+
return lru->nr_items;
72+
}
73+
74+
typedef enum lru_status
75+
(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg);
76+
/**
77+
* list_lru_walk: walk a list_lru, isolating and disposing freeable items.
78+
* @lru: the lru pointer.
79+
* @isolate: callback function that is resposible for deciding what to do with
80+
* the item currently being scanned
81+
* @cb_arg: opaque type that will be passed to @isolate
82+
* @nr_to_walk: how many items to scan.
83+
*
84+
* This function will scan all elements in a particular list_lru, calling the
85+
* @isolate callback for each of those items, along with the current list
86+
* spinlock and a caller-provided opaque. The @isolate callback can choose to
87+
* drop the lock internally, but *must* return with the lock held. The callback
88+
* will return an enum lru_status telling the list_lru infrastructure what to
89+
* do with the object being scanned.
90+
*
91+
* Please note that nr_to_walk does not mean how many objects will be freed,
92+
* just how many objects will be scanned.
93+
*
94+
* Return value: the number of objects effectively removed from the LRU.
95+
*/
96+
unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
97+
void *cb_arg, unsigned long nr_to_walk);
98+
99+
typedef void (*list_lru_dispose_cb)(struct list_head *dispose_list);
100+
/**
101+
* list_lru_dispose_all: forceably flush all elements in an @lru
102+
* @lru: the lru pointer
103+
* @dispose: callback function to be called for each lru list.
104+
*
105+
* This function will forceably isolate all elements into the dispose list, and
106+
* call the @dispose callback to flush the list. Please note that the callback
107+
* should expect items in any state, clean or dirty, and be able to flush all of
108+
* them.
109+
*
110+
* Return value: how many objects were freed. It should be equal to all objects
111+
* in the list_lru.
112+
*/
113+
unsigned long
114+
list_lru_dispose_all(struct list_lru *lru, list_lru_dispose_cb dispose);
115+
#endif /* _LRU_LIST_H */

mm/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
1717
util.o mmzone.o vmstat.o backing-dev.o \
1818
mm_init.o mmu_context.o percpu.o slab_common.o \
1919
compaction.o balloon_compaction.o \
20-
interval_tree.o $(mmu-y)
20+
interval_tree.o list_lru.o $(mmu-y)
2121

2222
obj-y += init-mm.o
2323

mm/list_lru.c

Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
/*
2+
* Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3+
* Authors: David Chinner and Glauber Costa
4+
*
5+
* Generic LRU infrastructure
6+
*/
7+
#include <linux/kernel.h>
8+
#include <linux/module.h>
9+
#include <linux/list_lru.h>
10+
11+
bool list_lru_add(struct list_lru *lru, struct list_head *item)
12+
{
13+
spin_lock(&lru->lock);
14+
if (list_empty(item)) {
15+
list_add_tail(item, &lru->list);
16+
lru->nr_items++;
17+
spin_unlock(&lru->lock);
18+
return true;
19+
}
20+
spin_unlock(&lru->lock);
21+
return false;
22+
}
23+
EXPORT_SYMBOL_GPL(list_lru_add);
24+
25+
bool list_lru_del(struct list_lru *lru, struct list_head *item)
26+
{
27+
spin_lock(&lru->lock);
28+
if (!list_empty(item)) {
29+
list_del_init(item);
30+
lru->nr_items--;
31+
spin_unlock(&lru->lock);
32+
return true;
33+
}
34+
spin_unlock(&lru->lock);
35+
return false;
36+
}
37+
EXPORT_SYMBOL_GPL(list_lru_del);
38+
39+
unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
40+
void *cb_arg, unsigned long nr_to_walk)
41+
{
42+
struct list_head *item, *n;
43+
unsigned long removed = 0;
44+
/*
45+
* If we don't keep state of at which pass we are, we can loop at
46+
* LRU_RETRY, since we have no guarantees that the caller will be able
47+
* to do something other than retry on the next pass. We handle this by
48+
* allowing at most one retry per object. This should not be altered
49+
* by any condition other than LRU_RETRY.
50+
*/
51+
bool first_pass = true;
52+
53+
spin_lock(&lru->lock);
54+
restart:
55+
list_for_each_safe(item, n, &lru->list) {
56+
enum lru_status ret;
57+
ret = isolate(item, &lru->lock, cb_arg);
58+
switch (ret) {
59+
case LRU_REMOVED:
60+
lru->nr_items--;
61+
removed++;
62+
break;
63+
case LRU_ROTATE:
64+
list_move_tail(item, &lru->list);
65+
break;
66+
case LRU_SKIP:
67+
break;
68+
case LRU_RETRY:
69+
if (!first_pass) {
70+
first_pass = true;
71+
break;
72+
}
73+
first_pass = false;
74+
goto restart;
75+
default:
76+
BUG();
77+
}
78+
79+
if (nr_to_walk-- == 0)
80+
break;
81+
82+
}
83+
spin_unlock(&lru->lock);
84+
return removed;
85+
}
86+
EXPORT_SYMBOL_GPL(list_lru_walk);
87+
88+
unsigned long list_lru_dispose_all(struct list_lru *lru,
89+
list_lru_dispose_cb dispose)
90+
{
91+
unsigned long disposed = 0;
92+
LIST_HEAD(dispose_list);
93+
94+
spin_lock(&lru->lock);
95+
while (!list_empty(&lru->list)) {
96+
list_splice_init(&lru->list, &dispose_list);
97+
disposed += lru->nr_items;
98+
lru->nr_items = 0;
99+
spin_unlock(&lru->lock);
100+
101+
dispose(&dispose_list);
102+
103+
spin_lock(&lru->lock);
104+
}
105+
spin_unlock(&lru->lock);
106+
return disposed;
107+
}
108+
109+
int list_lru_init(struct list_lru *lru)
110+
{
111+
spin_lock_init(&lru->lock);
112+
INIT_LIST_HEAD(&lru->list);
113+
lru->nr_items = 0;
114+
115+
return 0;
116+
}
117+
EXPORT_SYMBOL_GPL(list_lru_init);

0 commit comments

Comments
 (0)