Skip to content

Commit 866be88

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "9 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/vmstat: fix overflow in mod_zone_page_state() ocfs2/dlm: clear migration_pending when migration target goes down mm/memory_hotplug.c: check for missing sections in test_pages_in_a_zone() ocfs2: fix flock panic issue m32r: add io*_rep helpers m32r: fix build failure arch/x86/xen/suspend.c: include xen/xen.h mm: memcontrol: fix possible memcg leak due to interrupted reclaim ocfs2: fix BUG when calculate new backup super
2 parents e25bd6c + 6cdb18a commit 866be88

File tree

10 files changed

+102
-39
lines changed

10 files changed

+102
-39
lines changed

arch/m32r/include/asm/Kbuild

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ generic-y += clkdev.h
33
generic-y += cputime.h
44
generic-y += exec.h
55
generic-y += irq_work.h
6+
generic-y += kvm_para.h
67
generic-y += mcs_spinlock.h
78
generic-y += mm-arch-hooks.h
89
generic-y += module.h

arch/m32r/include/asm/io.h

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -168,13 +168,21 @@ static inline void _writel(unsigned long l, unsigned long addr)
168168
#define writew_relaxed writew
169169
#define writel_relaxed writel
170170

171-
#define ioread8 read
171+
#define ioread8 readb
172172
#define ioread16 readw
173173
#define ioread32 readl
174174
#define iowrite8 writeb
175175
#define iowrite16 writew
176176
#define iowrite32 writel
177177

178+
#define ioread8_rep(p, dst, count) insb((unsigned long)(p), (dst), (count))
179+
#define ioread16_rep(p, dst, count) insw((unsigned long)(p), (dst), (count))
180+
#define ioread32_rep(p, dst, count) insl((unsigned long)(p), (dst), (count))
181+
182+
#define iowrite8_rep(p, src, count) outsb((unsigned long)(p), (src), (count))
183+
#define iowrite16_rep(p, src, count) outsw((unsigned long)(p), (src), (count))
184+
#define iowrite32_rep(p, src, count) outsl((unsigned long)(p), (src), (count))
185+
178186
#define ioread16be(addr) be16_to_cpu(readw(addr))
179187
#define ioread32be(addr) be32_to_cpu(readl(addr))
180188
#define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr))

arch/x86/xen/suspend.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
#include <linux/types.h>
22
#include <linux/tick.h>
33

4+
#include <xen/xen.h>
45
#include <xen/interface/xen.h>
56
#include <xen/grant_table.h>
67
#include <xen/events.h>

fs/ocfs2/dlm/dlmmaster.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2843,6 +2843,8 @@ static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
28432843
res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
28442844
if (!ret)
28452845
BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2846+
else
2847+
res->migration_pending = 0;
28462848
spin_unlock(&res->spinlock);
28472849

28482850
/*

fs/ocfs2/locks.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,10 @@ static int ocfs2_do_flock(struct file *file, struct inode *inode,
6767
*/
6868

6969
locks_lock_file_wait(file,
70-
&(struct file_lock){.fl_type = F_UNLCK});
70+
&(struct file_lock) {
71+
.fl_type = F_UNLCK,
72+
.fl_flags = FL_FLOCK
73+
});
7174

7275
ocfs2_file_unlock(file);
7376
}

fs/ocfs2/resize.c

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -54,11 +54,12 @@
5454
static u16 ocfs2_calc_new_backup_super(struct inode *inode,
5555
struct ocfs2_group_desc *gd,
5656
u16 cl_cpg,
57+
u16 old_bg_clusters,
5758
int set)
5859
{
5960
int i;
6061
u16 backups = 0;
61-
u32 cluster;
62+
u32 cluster, lgd_cluster;
6263
u64 blkno, gd_blkno, lgd_blkno = le64_to_cpu(gd->bg_blkno);
6364

6465
for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
@@ -71,6 +72,12 @@ static u16 ocfs2_calc_new_backup_super(struct inode *inode,
7172
else if (gd_blkno > lgd_blkno)
7273
break;
7374

75+
/* check if already done backup super */
76+
lgd_cluster = ocfs2_blocks_to_clusters(inode->i_sb, lgd_blkno);
77+
lgd_cluster += old_bg_clusters;
78+
if (lgd_cluster >= cluster)
79+
continue;
80+
7481
if (set)
7582
ocfs2_set_bit(cluster % cl_cpg,
7683
(unsigned long *)gd->bg_bitmap);
@@ -99,6 +106,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
99106
u16 chain, num_bits, backups = 0;
100107
u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
101108
u16 cl_cpg = le16_to_cpu(cl->cl_cpg);
109+
u16 old_bg_clusters;
102110

103111
trace_ocfs2_update_last_group_and_inode(new_clusters,
104112
first_new_cluster);
@@ -112,6 +120,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
112120

113121
group = (struct ocfs2_group_desc *)group_bh->b_data;
114122

123+
old_bg_clusters = le16_to_cpu(group->bg_bits) / cl_bpc;
115124
/* update the group first. */
116125
num_bits = new_clusters * cl_bpc;
117126
le16_add_cpu(&group->bg_bits, num_bits);
@@ -125,7 +134,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
125134
OCFS2_FEATURE_COMPAT_BACKUP_SB)) {
126135
backups = ocfs2_calc_new_backup_super(bm_inode,
127136
group,
128-
cl_cpg, 1);
137+
cl_cpg, old_bg_clusters, 1);
129138
le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
130139
}
131140

@@ -163,7 +172,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
163172
if (ret < 0) {
164173
ocfs2_calc_new_backup_super(bm_inode,
165174
group,
166-
cl_cpg, 0);
175+
cl_cpg, old_bg_clusters, 0);
167176
le16_add_cpu(&group->bg_free_bits_count, backups);
168177
le16_add_cpu(&group->bg_bits, -1 * num_bits);
169178
le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);

include/linux/vmstat.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -176,11 +176,11 @@ extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
176176
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
177177

178178
#ifdef CONFIG_SMP
179-
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
179+
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
180180
void __inc_zone_page_state(struct page *, enum zone_stat_item);
181181
void __dec_zone_page_state(struct page *, enum zone_stat_item);
182182

183-
void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
183+
void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
184184
void inc_zone_page_state(struct page *, enum zone_stat_item);
185185
void dec_zone_page_state(struct page *, enum zone_stat_item);
186186

@@ -205,7 +205,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
205205
* The functions directly modify the zone and global counters.
206206
*/
207207
static inline void __mod_zone_page_state(struct zone *zone,
208-
enum zone_stat_item item, int delta)
208+
enum zone_stat_item item, long delta)
209209
{
210210
zone_page_state_add(delta, zone, item);
211211
}

mm/memcontrol.c

Lines changed: 46 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -903,14 +903,20 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
903903
if (prev && reclaim->generation != iter->generation)
904904
goto out_unlock;
905905

906-
do {
906+
while (1) {
907907
pos = READ_ONCE(iter->position);
908+
if (!pos || css_tryget(&pos->css))
909+
break;
908910
/*
909-
* A racing update may change the position and
910-
* put the last reference, hence css_tryget(),
911-
* or retry to see the updated position.
911+
* css reference reached zero, so iter->position will
912+
* be cleared by ->css_released. However, we should not
913+
* rely on this happening soon, because ->css_released
914+
* is called from a work queue, and by busy-waiting we
915+
* might block it. So we clear iter->position right
916+
* away.
912917
*/
913-
} while (pos && !css_tryget(&pos->css));
918+
(void)cmpxchg(&iter->position, pos, NULL);
919+
}
914920
}
915921

916922
if (pos)
@@ -956,17 +962,13 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
956962
}
957963

958964
if (reclaim) {
959-
if (cmpxchg(&iter->position, pos, memcg) == pos) {
960-
if (memcg)
961-
css_get(&memcg->css);
962-
if (pos)
963-
css_put(&pos->css);
964-
}
965-
966965
/*
967-
* pairs with css_tryget when dereferencing iter->position
968-
* above.
966+
* The position could have already been updated by a competing
967+
* thread, so check that the value hasn't changed since we read
968+
* it to avoid reclaiming from the same cgroup twice.
969969
*/
970+
(void)cmpxchg(&iter->position, pos, memcg);
971+
970972
if (pos)
971973
css_put(&pos->css);
972974

@@ -999,6 +1001,28 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
9991001
css_put(&prev->css);
10001002
}
10011003

1004+
static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1005+
{
1006+
struct mem_cgroup *memcg = dead_memcg;
1007+
struct mem_cgroup_reclaim_iter *iter;
1008+
struct mem_cgroup_per_zone *mz;
1009+
int nid, zid;
1010+
int i;
1011+
1012+
while ((memcg = parent_mem_cgroup(memcg))) {
1013+
for_each_node(nid) {
1014+
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1015+
mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
1016+
for (i = 0; i <= DEF_PRIORITY; i++) {
1017+
iter = &mz->iter[i];
1018+
cmpxchg(&iter->position,
1019+
dead_memcg, NULL);
1020+
}
1021+
}
1022+
}
1023+
}
1024+
}
1025+
10021026
/*
10031027
* Iteration constructs for visiting all cgroups (under a tree). If
10041028
* loops are exited prematurely (break), mem_cgroup_iter_break() must
@@ -4324,6 +4348,13 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
43244348
wb_memcg_offline(memcg);
43254349
}
43264350

4351+
static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4352+
{
4353+
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4354+
4355+
invalidate_reclaim_iterators(memcg);
4356+
}
4357+
43274358
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
43284359
{
43294360
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
@@ -5185,6 +5216,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
51855216
.css_alloc = mem_cgroup_css_alloc,
51865217
.css_online = mem_cgroup_css_online,
51875218
.css_offline = mem_cgroup_css_offline,
5219+
.css_released = mem_cgroup_css_released,
51885220
.css_free = mem_cgroup_css_free,
51895221
.css_reset = mem_cgroup_css_reset,
51905222
.can_attach = mem_cgroup_can_attach,

mm/memory_hotplug.c

Lines changed: 19 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1375,23 +1375,30 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
13751375
*/
13761376
int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
13771377
{
1378-
unsigned long pfn;
1378+
unsigned long pfn, sec_end_pfn;
13791379
struct zone *zone = NULL;
13801380
struct page *page;
13811381
int i;
1382-
for (pfn = start_pfn;
1382+
for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
13831383
pfn < end_pfn;
1384-
pfn += MAX_ORDER_NR_PAGES) {
1385-
i = 0;
1386-
/* This is just a CONFIG_HOLES_IN_ZONE check.*/
1387-
while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
1388-
i++;
1389-
if (i == MAX_ORDER_NR_PAGES)
1384+
pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
1385+
/* Make sure the memory section is present first */
1386+
if (!present_section_nr(pfn_to_section_nr(pfn)))
13901387
continue;
1391-
page = pfn_to_page(pfn + i);
1392-
if (zone && page_zone(page) != zone)
1393-
return 0;
1394-
zone = page_zone(page);
1388+
for (; pfn < sec_end_pfn && pfn < end_pfn;
1389+
pfn += MAX_ORDER_NR_PAGES) {
1390+
i = 0;
1391+
/* This is just a CONFIG_HOLES_IN_ZONE check.*/
1392+
while ((i < MAX_ORDER_NR_PAGES) &&
1393+
!pfn_valid_within(pfn + i))
1394+
i++;
1395+
if (i == MAX_ORDER_NR_PAGES)
1396+
continue;
1397+
page = pfn_to_page(pfn + i);
1398+
if (zone && page_zone(page) != zone)
1399+
return 0;
1400+
zone = page_zone(page);
1401+
}
13951402
}
13961403
return 1;
13971404
}

mm/vmstat.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
219219
* particular counter cannot be updated from interrupt context.
220220
*/
221221
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
222-
int delta)
222+
long delta)
223223
{
224224
struct per_cpu_pageset __percpu *pcp = zone->pageset;
225225
s8 __percpu *p = pcp->vm_stat_diff + item;
@@ -318,8 +318,8 @@ EXPORT_SYMBOL(__dec_zone_page_state);
318318
* 1 Overstepping half of threshold
319319
* -1 Overstepping minus half of threshold
320320
*/
321-
static inline void mod_state(struct zone *zone,
322-
enum zone_stat_item item, int delta, int overstep_mode)
321+
static inline void mod_state(struct zone *zone, enum zone_stat_item item,
322+
long delta, int overstep_mode)
323323
{
324324
struct per_cpu_pageset __percpu *pcp = zone->pageset;
325325
s8 __percpu *p = pcp->vm_stat_diff + item;
@@ -357,7 +357,7 @@ static inline void mod_state(struct zone *zone,
357357
}
358358

359359
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
360-
int delta)
360+
long delta)
361361
{
362362
mod_state(zone, item, delta, 0);
363363
}
@@ -384,7 +384,7 @@ EXPORT_SYMBOL(dec_zone_page_state);
384384
* Use interrupt disable to serialize counter updates
385385
*/
386386
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
387-
int delta)
387+
long delta)
388388
{
389389
unsigned long flags;
390390

0 commit comments

Comments
 (0)