|
| 1 | +/* |
| 2 | + * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. |
| 3 | + * Authors: David Chinner and Glauber Costa |
| 4 | + * |
| 5 | + * Generic LRU infrastructure |
| 6 | + */ |
| 7 | +#ifndef _LRU_LIST_H |
| 8 | +#define _LRU_LIST_H |
| 9 | + |
| 10 | +#include <linux/list.h> |
| 11 | + |
| 12 | +/* list_lru_walk_cb has to always return one of those */ |
| 13 | +enum lru_status { |
| 14 | + LRU_REMOVED, /* item removed from list */ |
| 15 | + LRU_ROTATE, /* item referenced, give another pass */ |
| 16 | + LRU_SKIP, /* item cannot be locked, skip */ |
| 17 | + LRU_RETRY, /* item not freeable. May drop the lock |
| 18 | + internally, but has to return locked. */ |
| 19 | +}; |
| 20 | + |
| 21 | +struct list_lru { |
| 22 | + spinlock_t lock; |
| 23 | + struct list_head list; |
| 24 | + /* kept as signed so we can catch imbalance bugs */ |
| 25 | + long nr_items; |
| 26 | +}; |
| 27 | + |
| 28 | +int list_lru_init(struct list_lru *lru); |
| 29 | + |
| 30 | +/** |
| 31 | + * list_lru_add: add an element to the lru list's tail |
| 32 | + * @list_lru: the lru pointer |
| 33 | + * @item: the item to be added. |
| 34 | + * |
| 35 | + * If the element is already part of a list, this function returns doing |
| 36 | + * nothing. Therefore the caller does not need to keep state about whether or |
| 37 | + * not the element already belongs in the list and is allowed to lazy update |
| 38 | + * it. Note however that this is valid for *a* list, not *this* list. If |
| 39 | + * the caller organize itself in a way that elements can be in more than |
| 40 | + * one type of list, it is up to the caller to fully remove the item from |
| 41 | + * the previous list (with list_lru_del() for instance) before moving it |
| 42 | + * to @list_lru |
| 43 | + * |
| 44 | + * Return value: true if the list was updated, false otherwise |
| 45 | + */ |
| 46 | +bool list_lru_add(struct list_lru *lru, struct list_head *item); |
| 47 | + |
| 48 | +/** |
| 49 | + * list_lru_del: delete an element to the lru list |
| 50 | + * @list_lru: the lru pointer |
| 51 | + * @item: the item to be deleted. |
| 52 | + * |
| 53 | + * This function works analogously as list_lru_add in terms of list |
| 54 | + * manipulation. The comments about an element already pertaining to |
| 55 | + * a list are also valid for list_lru_del. |
| 56 | + * |
| 57 | + * Return value: true if the list was updated, false otherwise |
| 58 | + */ |
| 59 | +bool list_lru_del(struct list_lru *lru, struct list_head *item); |
| 60 | + |
| 61 | +/** |
| 62 | + * list_lru_count: return the number of objects currently held by @lru |
| 63 | + * @lru: the lru pointer. |
| 64 | + * |
| 65 | + * Always return a non-negative number, 0 for empty lists. There is no |
| 66 | + * guarantee that the list is not updated while the count is being computed. |
| 67 | + * Callers that want such a guarantee need to provide an outer lock. |
| 68 | + */ |
| 69 | +static inline unsigned long list_lru_count(struct list_lru *lru) |
| 70 | +{ |
| 71 | + return lru->nr_items; |
| 72 | +} |
| 73 | + |
| 74 | +typedef enum lru_status |
| 75 | +(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg); |
| 76 | +/** |
| 77 | + * list_lru_walk: walk a list_lru, isolating and disposing freeable items. |
| 78 | + * @lru: the lru pointer. |
| 79 | + * @isolate: callback function that is resposible for deciding what to do with |
| 80 | + * the item currently being scanned |
| 81 | + * @cb_arg: opaque type that will be passed to @isolate |
| 82 | + * @nr_to_walk: how many items to scan. |
| 83 | + * |
| 84 | + * This function will scan all elements in a particular list_lru, calling the |
| 85 | + * @isolate callback for each of those items, along with the current list |
| 86 | + * spinlock and a caller-provided opaque. The @isolate callback can choose to |
| 87 | + * drop the lock internally, but *must* return with the lock held. The callback |
| 88 | + * will return an enum lru_status telling the list_lru infrastructure what to |
| 89 | + * do with the object being scanned. |
| 90 | + * |
| 91 | + * Please note that nr_to_walk does not mean how many objects will be freed, |
| 92 | + * just how many objects will be scanned. |
| 93 | + * |
| 94 | + * Return value: the number of objects effectively removed from the LRU. |
| 95 | + */ |
| 96 | +unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, |
| 97 | + void *cb_arg, unsigned long nr_to_walk); |
| 98 | + |
| 99 | +typedef void (*list_lru_dispose_cb)(struct list_head *dispose_list); |
| 100 | +/** |
| 101 | + * list_lru_dispose_all: forceably flush all elements in an @lru |
| 102 | + * @lru: the lru pointer |
| 103 | + * @dispose: callback function to be called for each lru list. |
| 104 | + * |
| 105 | + * This function will forceably isolate all elements into the dispose list, and |
| 106 | + * call the @dispose callback to flush the list. Please note that the callback |
| 107 | + * should expect items in any state, clean or dirty, and be able to flush all of |
| 108 | + * them. |
| 109 | + * |
| 110 | + * Return value: how many objects were freed. It should be equal to all objects |
| 111 | + * in the list_lru. |
| 112 | + */ |
| 113 | +unsigned long |
| 114 | +list_lru_dispose_all(struct list_lru *lru, list_lru_dispose_cb dispose); |
| 115 | +#endif /* _LRU_LIST_H */ |
0 commit comments