Skip to content

Commit ff075d6

Browse files
Peter Zijlstratorvalds
authored andcommitted
um: mmu_gather rework
Fix up the um mmu_gather code to conform to the new API. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Tony Luck <tony.luck@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 7a95a2c commit ff075d6

File tree

1 file changed

+11
-18
lines changed

1 file changed

+11
-18
lines changed

arch/um/include/asm/tlb.h

Lines changed: 11 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,6 @@ struct mmu_gather {
2222
unsigned int fullmm; /* non-zero means full mm flush */
2323
};
2424

25-
/* Users of the generic TLB shootdown code must declare this storage space. */
26-
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
27-
2825
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
2926
unsigned long address)
3027
{
@@ -47,27 +44,20 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
4744
}
4845
}
4946

50-
/* tlb_gather_mmu
51-
* Return a pointer to an initialized struct mmu_gather.
52-
*/
53-
static inline struct mmu_gather *
54-
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
47+
static inline void
48+
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
5549
{
56-
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
57-
5850
tlb->mm = mm;
5951
tlb->fullmm = full_mm_flush;
6052

6153
init_tlb_gather(tlb);
62-
63-
return tlb;
6454
}
6555

6656
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
6757
unsigned long end);
6858

6959
static inline void
70-
tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
60+
tlb_flush_mmu(struct mmu_gather *tlb)
7161
{
7262
if (!tlb->need_flush)
7363
return;
@@ -83,24 +73,27 @@ tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
8373
static inline void
8474
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
8575
{
86-
tlb_flush_mmu(tlb, start, end);
76+
tlb_flush_mmu(tlb);
8777

8878
/* keep the page table cache within bounds */
8979
check_pgt_cache();
90-
91-
put_cpu_var(mmu_gathers);
9280
}
9381

9482
/* tlb_remove_page
9583
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
9684
* while handling the additional races in SMP caused by other CPUs
9785
* caching valid mappings in their TLBs.
9886
*/
99-
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
87+
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
10088
{
10189
tlb->need_flush = 1;
10290
free_page_and_swap_cache(page);
103-
return;
91+
return 1; /* avoid calling tlb_flush_mmu */
92+
}
93+
94+
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
95+
{
96+
__tlb_remove_page(tlb, page);
10497
}
10598

10699
/**

0 commit comments

Comments
 (0)