Skip to content

Commit 050eef3

Browse files
Martin SchwidefskyMartin Schwidefsky
authored andcommitted
[S390] fix tlb flushing vs. concurrent /proc accesses
The tlb flushing code uses the mm_users field of the mm_struct to decide if each page table entry needs to be flushed individually with IPTE or if a global flush for the mm_struct is sufficient after all page table updates have been done. The comment for mm_users says "How many users with user space?" but the /proc code increases mm_users after it found the process structure by pid without creating a new user process. Which makes mm_users useless for the decision between the two tlb flusing methods. The current code can be confused to not flush tlb entries by a concurrent access to /proc files if e.g. a fork is in progres. The solution for this problem is to make the tlb flushing logic independent from the mm_users field. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
1 parent 7af048d commit 050eef3

File tree

8 files changed

+28
-6
lines changed

8 files changed

+28
-6
lines changed

arch/s390/include/asm/hugetlb.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
9797
{
9898
pte_t pte = huge_ptep_get(ptep);
9999

100+
mm->context.flush_mm = 1;
100101
pmd_clear((pmd_t *) ptep);
101102
return pte;
102103
}
@@ -167,7 +168,8 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
167168
({ \
168169
pte_t __pte = huge_ptep_get(__ptep); \
169170
if (pte_write(__pte)) { \
170-
if (atomic_read(&(__mm)->mm_users) > 1 || \
171+
(__mm)->context.flush_mm = 1; \
172+
if (atomic_read(&(__mm)->context.attach_count) > 1 || \
171173
(__mm) != current->active_mm) \
172174
huge_ptep_invalidate(__mm, __addr, __ptep); \
173175
set_huge_pte_at(__mm, __addr, __ptep, \

arch/s390/include/asm/mmu.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22
#define __MMU_H
33

44
typedef struct {
5+
atomic_t attach_count;
6+
unsigned int flush_mm;
57
spinlock_t list_lock;
68
struct list_head crst_list;
79
struct list_head pgtable_list;

arch/s390/include/asm/mmu_context.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,14 @@
1111

1212
#include <asm/pgalloc.h>
1313
#include <asm/uaccess.h>
14+
#include <asm/tlbflush.h>
1415
#include <asm-generic/mm_hooks.h>
1516

1617
static inline int init_new_context(struct task_struct *tsk,
1718
struct mm_struct *mm)
1819
{
20+
atomic_set(&mm->context.attach_count, 0);
21+
mm->context.flush_mm = 0;
1922
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
2023
#ifdef CONFIG_64BIT
2124
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
@@ -76,6 +79,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
7679
{
7780
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
7881
update_mm(next, tsk);
82+
atomic_dec(&prev->context.attach_count);
83+
WARN_ON(atomic_read(&prev->context.attach_count) < 0);
84+
atomic_inc(&next->context.attach_count);
85+
/* Check for TLBs not flushed yet */
86+
if (next->context.flush_mm)
87+
__tlb_flush_mm(next);
7988
}
8089

8190
#define enter_lazy_tlb(mm,tsk) do { } while (0)

arch/s390/include/asm/pgtable.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -880,7 +880,8 @@ static inline void ptep_invalidate(struct mm_struct *mm,
880880
#define ptep_get_and_clear(__mm, __address, __ptep) \
881881
({ \
882882
pte_t __pte = *(__ptep); \
883-
if (atomic_read(&(__mm)->mm_users) > 1 || \
883+
(__mm)->context.flush_mm = 1; \
884+
if (atomic_read(&(__mm)->context.attach_count) > 1 || \
884885
(__mm) != current->active_mm) \
885886
ptep_invalidate(__mm, __address, __ptep); \
886887
else \
@@ -923,7 +924,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
923924
({ \
924925
pte_t __pte = *(__ptep); \
925926
if (pte_write(__pte)) { \
926-
if (atomic_read(&(__mm)->mm_users) > 1 || \
927+
(__mm)->context.flush_mm = 1; \
928+
if (atomic_read(&(__mm)->context.attach_count) > 1 || \
927929
(__mm) != current->active_mm) \
928930
ptep_invalidate(__mm, __addr, __ptep); \
929931
set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \

arch/s390/include/asm/tlb.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,7 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
5050
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
5151

5252
tlb->mm = mm;
53-
tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) ||
54-
(atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm);
53+
tlb->fullmm = full_mm_flush;
5554
tlb->nr_ptes = 0;
5655
tlb->nr_pxds = TLB_NR_PTRS;
5756
if (tlb->fullmm)

arch/s390/include/asm/tlbflush.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,8 +94,12 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
9494

9595
static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
9696
{
97-
if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm)
97+
spin_lock(&mm->page_table_lock);
98+
if (mm->context.flush_mm) {
9899
__tlb_flush_mm(mm);
100+
mm->context.flush_mm = 0;
101+
}
102+
spin_unlock(&mm->page_table_lock);
99103
}
100104

101105
/*

arch/s390/kernel/smp.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -583,6 +583,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
583583
sf->gprs[9] = (unsigned long) sf;
584584
cpu_lowcore->save_area[15] = (unsigned long) sf;
585585
__ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
586+
atomic_inc(&init_mm.context.attach_count);
586587
asm volatile(
587588
" stam 0,15,0(%0)"
588589
: : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
@@ -659,6 +660,7 @@ void __cpu_die(unsigned int cpu)
659660
while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
660661
udelay(10);
661662
smp_free_lowcore(cpu);
663+
atomic_dec(&init_mm.context.attach_count);
662664
pr_info("Processor %d stopped\n", cpu);
663665
}
664666

arch/s390/mm/init.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,8 @@ void __init paging_init(void)
7474
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
7575
__raw_local_irq_ssm(ssm_mask);
7676

77+
atomic_set(&init_mm.context.attach_count, 1);
78+
7779
sparse_memory_present_with_active_regions(MAX_NUMNODES);
7880
sparse_init();
7981
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));

0 commit comments

Comments
 (0)