Skip to content

Commit 4a8c7bb

Browse files
Nathan Zimmertorvalds
authored andcommitted
mm/mempolicy.c: convert the shared_policy lock to a rwlock
When running the SPECint_rate gcc on some very large boxes it was noticed that the system was spending lots of time in mpol_shared_policy_lookup(). The gamess benchmark can also show it and is what I mostly used to chase down the issue since the setup for that I found to be easier. To be clear the binaries were on tmpfs because of disk I/O requirements. We then used text replication to avoid icache misses and having all the copies banging on the memory where the instruction code resides. This results in us hitting a bottleneck in mpol_shared_policy_lookup() since lookup is serialised by the shared_policy lock. I have only reproduced this on very large (3k+ cores) boxes. The problem starts showing up at just a few hundred ranks getting worse until it threatens to livelock once it gets large enough. For example on the gamess benchmark at 128 ranks this area consumes only ~1% of time, at 512 ranks it consumes nearly 13%, and at 2k ranks it is over 90%. To alleviate the contention in this area I converted the spinlock to an rwlock. This allows a large number of lookups to happen simultaneously. The results were quite good reducing this consumtion at max ranks to around 2%. [akpm@linux-foundation.org: tidy up code comments] Signed-off-by: Nathan Zimmer <nzimmer@sgi.com> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Nadia Yvette Chambers <nyc@holomorphy.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Mel Gorman <mgorman@suse.de> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 8f235d1 commit 4a8c7bb

File tree

3 files changed

+19
-15
lines changed

3 files changed

+19
-15
lines changed

fs/hugetlbfs/inode.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -738,7 +738,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
738738
/*
739739
* The policy is initialized here even if we are creating a
740740
* private inode because initialization simply creates an
741-
* an empty rb tree and calls spin_lock_init(), later when we
741+
* an empty rb tree and calls rwlock_init(), later when we
742742
* call mpol_free_shared_policy() it will just return because
743743
* the rb tree will still be empty.
744744
*/

include/linux/mempolicy.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ struct sp_node {
122122

123123
struct shared_policy {
124124
struct rb_root root;
125-
spinlock_t lock;
125+
rwlock_t lock;
126126
};
127127

128128
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);

mm/mempolicy.c

Lines changed: 17 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2142,12 +2142,14 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
21422142
*
21432143
* Remember policies even when nobody has shared memory mapped.
21442144
* The policies are kept in Red-Black tree linked from the inode.
2145-
* They are protected by the sp->lock spinlock, which should be held
2145+
* They are protected by the sp->lock rwlock, which should be held
21462146
* for any accesses to the tree.
21472147
*/
21482148

2149-
/* lookup first element intersecting start-end */
2150-
/* Caller holds sp->lock */
2149+
/*
2150+
* lookup first element intersecting start-end. Caller holds sp->lock for
2151+
* reading or for writing
2152+
*/
21512153
static struct sp_node *
21522154
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
21532155
{
@@ -2178,8 +2180,10 @@ sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
21782180
return rb_entry(n, struct sp_node, nd);
21792181
}
21802182

2181-
/* Insert a new shared policy into the list. */
2182-
/* Caller holds sp->lock */
2183+
/*
2184+
* Insert a new shared policy into the list. Caller holds sp->lock for
2185+
* writing.
2186+
*/
21832187
static void sp_insert(struct shared_policy *sp, struct sp_node *new)
21842188
{
21852189
struct rb_node **p = &sp->root.rb_node;
@@ -2211,13 +2215,13 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
22112215

22122216
if (!sp->root.rb_node)
22132217
return NULL;
2214-
spin_lock(&sp->lock);
2218+
read_lock(&sp->lock);
22152219
sn = sp_lookup(sp, idx, idx+1);
22162220
if (sn) {
22172221
mpol_get(sn->policy);
22182222
pol = sn->policy;
22192223
}
2220-
spin_unlock(&sp->lock);
2224+
read_unlock(&sp->lock);
22212225
return pol;
22222226
}
22232227

@@ -2360,7 +2364,7 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
23602364
int ret = 0;
23612365

23622366
restart:
2363-
spin_lock(&sp->lock);
2367+
write_lock(&sp->lock);
23642368
n = sp_lookup(sp, start, end);
23652369
/* Take care of old policies in the same range. */
23662370
while (n && n->start < end) {
@@ -2393,7 +2397,7 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
23932397
}
23942398
if (new)
23952399
sp_insert(sp, new);
2396-
spin_unlock(&sp->lock);
2400+
write_unlock(&sp->lock);
23972401
ret = 0;
23982402

23992403
err_out:
@@ -2405,7 +2409,7 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
24052409
return ret;
24062410

24072411
alloc_new:
2408-
spin_unlock(&sp->lock);
2412+
write_unlock(&sp->lock);
24092413
ret = -ENOMEM;
24102414
n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
24112415
if (!n_new)
@@ -2431,7 +2435,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
24312435
int ret;
24322436

24332437
sp->root = RB_ROOT; /* empty tree == default mempolicy */
2434-
spin_lock_init(&sp->lock);
2438+
rwlock_init(&sp->lock);
24352439

24362440
if (mpol) {
24372441
struct vm_area_struct pvma;
@@ -2497,14 +2501,14 @@ void mpol_free_shared_policy(struct shared_policy *p)
24972501

24982502
if (!p->root.rb_node)
24992503
return;
2500-
spin_lock(&p->lock);
2504+
write_lock(&p->lock);
25012505
next = rb_first(&p->root);
25022506
while (next) {
25032507
n = rb_entry(next, struct sp_node, nd);
25042508
next = rb_next(&n->nd);
25052509
sp_delete(p, n);
25062510
}
2507-
spin_unlock(&p->lock);
2511+
write_unlock(&p->lock);
25082512
}
25092513

25102514
#ifdef CONFIG_NUMA_BALANCING

0 commit comments

Comments
 (0)