Skip to content

Commit 7dc19d5

Browse files
Dave ChinnerAl Viro
authored andcommitted
drivers: convert shrinkers to new count/scan API
Convert the driver shrinkers to the new API. Most changes are compile tested only because I either don't have the hardware or it's staging stuff. FWIW, the md and android code is pretty good, but the rest of it makes me want to claw my eyes out. The amount of broken code I just encountered is mind boggling. I've added comments explaining what is broken, but I fear that some of the code would be best dealt with by being dragged behind the bike shed, burying in mud up to it's neck and then run over repeatedly with a blunt lawn mower. Special mention goes to the zcache/zcache2 drivers. They can't co-exist in the build at the same time, they are under different menu options in menuconfig, they only show up when you've got the right set of mm subsystem options configured and so even compile testing is an exercise in pulling teeth. And that doesn't even take into account the horrible, broken code... [glommer@openvz.org: fixes for i915, android lowmem, zcache, bcache] Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Glauber Costa <glommer@openvz.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Kent Overstreet <koverstreet@google.com> Cc: John Stultz <john.stultz@linaro.org> Cc: David Rientjes <rientjes@google.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
1 parent 1ab6c49 commit 7dc19d5

File tree

9 files changed

+236
-136
lines changed

9 files changed

+236
-136
lines changed

drivers/gpu/drm/i915/i915_dma.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1667,7 +1667,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
16671667
return 0;
16681668

16691669
out_gem_unload:
1670-
if (dev_priv->mm.inactive_shrinker.shrink)
1670+
if (dev_priv->mm.inactive_shrinker.scan_objects)
16711671
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
16721672

16731673
if (dev->pdev->msi_enabled)
@@ -1706,7 +1706,7 @@ int i915_driver_unload(struct drm_device *dev)
17061706

17071707
i915_teardown_sysfs(dev);
17081708

1709-
if (dev_priv->mm.inactive_shrinker.shrink)
1709+
if (dev_priv->mm.inactive_shrinker.scan_objects)
17101710
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
17111711

17121712
mutex_lock(&dev->struct_mutex);

drivers/gpu/drm/i915/i915_gem.c

Lines changed: 55 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -57,10 +57,12 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
5757
struct drm_i915_fence_reg *fence,
5858
bool enable);
5959

60-
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
61-
struct shrink_control *sc);
60+
static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
61+
struct shrink_control *sc);
62+
static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
63+
struct shrink_control *sc);
6264
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
63-
static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
65+
static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
6466
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
6567

6668
static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -1736,16 +1738,21 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
17361738
return __i915_gem_shrink(dev_priv, target, true);
17371739
}
17381740

1739-
static void
1741+
static long
17401742
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
17411743
{
17421744
struct drm_i915_gem_object *obj, *next;
1745+
long freed = 0;
17431746

17441747
i915_gem_evict_everything(dev_priv->dev);
17451748

17461749
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
1747-
global_list)
1750+
global_list) {
1751+
if (obj->pages_pin_count == 0)
1752+
freed += obj->base.size >> PAGE_SHIFT;
17481753
i915_gem_object_put_pages(obj);
1754+
}
1755+
return freed;
17491756
}
17501757

17511758
static int
@@ -4526,7 +4533,8 @@ i915_gem_load(struct drm_device *dev)
45264533

45274534
dev_priv->mm.interruptible = true;
45284535

4529-
dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4536+
dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
4537+
dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
45304538
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
45314539
register_shrinker(&dev_priv->mm.inactive_shrinker);
45324540
}
@@ -4749,18 +4757,17 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
47494757
#endif
47504758
}
47514759

4752-
static int
4753-
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4760+
static unsigned long
4761+
i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
47544762
{
47554763
struct drm_i915_private *dev_priv =
47564764
container_of(shrinker,
47574765
struct drm_i915_private,
47584766
mm.inactive_shrinker);
47594767
struct drm_device *dev = dev_priv->dev;
47604768
struct drm_i915_gem_object *obj;
4761-
int nr_to_scan = sc->nr_to_scan;
47624769
bool unlock = true;
4763-
int cnt;
4770+
unsigned long count;
47644771

47654772
if (!mutex_trylock(&dev->struct_mutex)) {
47664773
if (!mutex_is_locked_by(&dev->struct_mutex, current))
@@ -4772,31 +4779,22 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
47724779
unlock = false;
47734780
}
47744781

4775-
if (nr_to_scan) {
4776-
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4777-
if (nr_to_scan > 0)
4778-
nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
4779-
false);
4780-
if (nr_to_scan > 0)
4781-
i915_gem_shrink_all(dev_priv);
4782-
}
4783-
4784-
cnt = 0;
4782+
count = 0;
47854783
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
47864784
if (obj->pages_pin_count == 0)
4787-
cnt += obj->base.size >> PAGE_SHIFT;
4785+
count += obj->base.size >> PAGE_SHIFT;
47884786

47894787
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
47904788
if (obj->active)
47914789
continue;
47924790

47934791
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4794-
cnt += obj->base.size >> PAGE_SHIFT;
4792+
count += obj->base.size >> PAGE_SHIFT;
47954793
}
47964794

47974795
if (unlock)
47984796
mutex_unlock(&dev->struct_mutex);
4799-
return cnt;
4797+
return count;
48004798
}
48014799

48024800
/* All the new VM stuff */
@@ -4860,6 +4858,40 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
48604858
return 0;
48614859
}
48624860

4861+
static unsigned long
4862+
i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4863+
{
4864+
struct drm_i915_private *dev_priv =
4865+
container_of(shrinker,
4866+
struct drm_i915_private,
4867+
mm.inactive_shrinker);
4868+
struct drm_device *dev = dev_priv->dev;
4869+
int nr_to_scan = sc->nr_to_scan;
4870+
unsigned long freed;
4871+
bool unlock = true;
4872+
4873+
if (!mutex_trylock(&dev->struct_mutex)) {
4874+
if (!mutex_is_locked_by(&dev->struct_mutex, current))
4875+
return 0;
4876+
4877+
if (dev_priv->mm.shrinker_no_lock_stealing)
4878+
return 0;
4879+
4880+
unlock = false;
4881+
}
4882+
4883+
freed = i915_gem_purge(dev_priv, nr_to_scan);
4884+
if (freed < nr_to_scan)
4885+
freed += __i915_gem_shrink(dev_priv, nr_to_scan,
4886+
false);
4887+
if (freed < nr_to_scan)
4888+
freed += i915_gem_shrink_all(dev_priv);
4889+
4890+
if (unlock)
4891+
mutex_unlock(&dev->struct_mutex);
4892+
return freed;
4893+
}
4894+
48634895
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
48644896
struct i915_address_space *vm)
48654897
{

drivers/gpu/drm/ttm/ttm_page_alloc.c

Lines changed: 28 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -377,28 +377,26 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
377377
return nr_free;
378378
}
379379

380-
/* Get good estimation how many pages are free in pools */
381-
static int ttm_pool_get_num_unused_pages(void)
382-
{
383-
unsigned i;
384-
int total = 0;
385-
for (i = 0; i < NUM_POOLS; ++i)
386-
total += _manager->pools[i].npages;
387-
388-
return total;
389-
}
390-
391380
/**
392381
* Callback for mm to request pool to reduce number of page held.
382+
*
383+
* XXX: (dchinner) Deadlock warning!
384+
*
385+
* ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
386+
* this can deadlock when called a sc->gfp_mask that is not equal to
387+
* GFP_KERNEL.
388+
*
389+
* This code is crying out for a shrinker per pool....
393390
*/
394-
static int ttm_pool_mm_shrink(struct shrinker *shrink,
395-
struct shrink_control *sc)
391+
static unsigned long
392+
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
396393
{
397394
static atomic_t start_pool = ATOMIC_INIT(0);
398395
unsigned i;
399396
unsigned pool_offset = atomic_add_return(1, &start_pool);
400397
struct ttm_page_pool *pool;
401398
int shrink_pages = sc->nr_to_scan;
399+
unsigned long freed = 0;
402400

403401
pool_offset = pool_offset % NUM_POOLS;
404402
/* select start pool in round robin fashion */
@@ -408,14 +406,28 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
408406
break;
409407
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
410408
shrink_pages = ttm_page_pool_free(pool, nr_free);
409+
freed += nr_free - shrink_pages;
411410
}
412-
/* return estimated number of unused pages in pool */
413-
return ttm_pool_get_num_unused_pages();
411+
return freed;
412+
}
413+
414+
415+
static unsigned long
416+
ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
417+
{
418+
unsigned i;
419+
unsigned long count = 0;
420+
421+
for (i = 0; i < NUM_POOLS; ++i)
422+
count += _manager->pools[i].npages;
423+
424+
return count;
414425
}
415426

416427
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
417428
{
418-
manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
429+
manager->mm_shrink.count_objects = ttm_pool_shrink_count;
430+
manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
419431
manager->mm_shrink.seeks = 1;
420432
register_shrinker(&manager->mm_shrink);
421433
}

drivers/gpu/drm/ttm/ttm_page_alloc_dma.c

Lines changed: 32 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -918,19 +918,6 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
918918
}
919919
EXPORT_SYMBOL_GPL(ttm_dma_populate);
920920

921-
/* Get good estimation how many pages are free in pools */
922-
static int ttm_dma_pool_get_num_unused_pages(void)
923-
{
924-
struct device_pools *p;
925-
unsigned total = 0;
926-
927-
mutex_lock(&_manager->lock);
928-
list_for_each_entry(p, &_manager->pools, pools)
929-
total += p->pool->npages_free;
930-
mutex_unlock(&_manager->lock);
931-
return total;
932-
}
933-
934921
/* Put all pages in pages list to correct pool to wait for reuse */
935922
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
936923
{
@@ -1002,18 +989,29 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1002989

1003990
/**
1004991
* Callback for mm to request pool to reduce number of page held.
992+
*
993+
* XXX: (dchinner) Deadlock warning!
994+
*
995+
* ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
996+
* needs to be paid to sc->gfp_mask to determine if this can be done or not.
997+
* GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
998+
* bad.
999+
*
1000+
* I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1001+
* shrinkers
10051002
*/
1006-
static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
1007-
struct shrink_control *sc)
1003+
static unsigned long
1004+
ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
10081005
{
10091006
static atomic_t start_pool = ATOMIC_INIT(0);
10101007
unsigned idx = 0;
10111008
unsigned pool_offset = atomic_add_return(1, &start_pool);
10121009
unsigned shrink_pages = sc->nr_to_scan;
10131010
struct device_pools *p;
1011+
unsigned long freed = 0;
10141012

10151013
if (list_empty(&_manager->pools))
1016-
return 0;
1014+
return SHRINK_STOP;
10171015

10181016
mutex_lock(&_manager->lock);
10191017
pool_offset = pool_offset % _manager->npools;
@@ -1029,18 +1027,33 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
10291027
continue;
10301028
nr_free = shrink_pages;
10311029
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
1030+
freed += nr_free - shrink_pages;
1031+
10321032
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
10331033
p->pool->dev_name, p->pool->name, current->pid,
10341034
nr_free, shrink_pages);
10351035
}
10361036
mutex_unlock(&_manager->lock);
1037-
/* return estimated number of unused pages in pool */
1038-
return ttm_dma_pool_get_num_unused_pages();
1037+
return freed;
1038+
}
1039+
1040+
static unsigned long
1041+
ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1042+
{
1043+
struct device_pools *p;
1044+
unsigned long count = 0;
1045+
1046+
mutex_lock(&_manager->lock);
1047+
list_for_each_entry(p, &_manager->pools, pools)
1048+
count += p->pool->npages_free;
1049+
mutex_unlock(&_manager->lock);
1050+
return count;
10391051
}
10401052

10411053
static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
10421054
{
1043-
manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
1055+
manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
1056+
manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
10441057
manager->mm_shrink.seeks = 1;
10451058
register_shrinker(&manager->mm_shrink);
10461059
}

0 commit comments

Comments
 (0)