Skip to content

Commit 196d9d8

Browse files
Peter Zijlstrawildea01
authored andcommitted
mm/memory: Move mmu_gather and TLB invalidation code into its own file
In preparation for maintaining the mmu_gather code as its own entity, move the implementation out of memory.c and into its own file. Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
1 parent a6d6024 commit 196d9d8

File tree

4 files changed

+265
-252
lines changed

4 files changed

+265
-252
lines changed

include/asm-generic/tlb.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,7 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb,
138138
void tlb_flush_mmu(struct mmu_gather *tlb);
139139
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
140140
unsigned long start, unsigned long end, bool force);
141+
void tlb_flush_mmu_free(struct mmu_gather *tlb);
141142
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
142143
int page_size);
143144

mm/Makefile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@ KCOV_INSTRUMENT_vmstat.o := n
2323

2424
mmu-y := nommu.o
2525
mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
26-
mlock.o mmap.o mprotect.o mremap.o msync.o \
27-
page_vma_mapped.o pagewalk.o pgtable-generic.o \
28-
rmap.o vmalloc.o
26+
mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
27+
msync.o page_vma_mapped.o pagewalk.o \
28+
pgtable-generic.o rmap.o vmalloc.o
2929

3030

3131
ifdef CONFIG_CROSS_MEMORY_ATTACH

mm/memory.c

Lines changed: 0 additions & 249 deletions
Original file line numberDiff line numberDiff line change
@@ -186,255 +186,6 @@ static void check_sync_rss_stat(struct task_struct *task)
186186

187187
#endif /* SPLIT_RSS_COUNTING */
188188

189-
#ifdef HAVE_GENERIC_MMU_GATHER
190-
191-
static bool tlb_next_batch(struct mmu_gather *tlb)
192-
{
193-
struct mmu_gather_batch *batch;
194-
195-
batch = tlb->active;
196-
if (batch->next) {
197-
tlb->active = batch->next;
198-
return true;
199-
}
200-
201-
if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
202-
return false;
203-
204-
batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
205-
if (!batch)
206-
return false;
207-
208-
tlb->batch_count++;
209-
batch->next = NULL;
210-
batch->nr = 0;
211-
batch->max = MAX_GATHER_BATCH;
212-
213-
tlb->active->next = batch;
214-
tlb->active = batch;
215-
216-
return true;
217-
}
218-
219-
void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
220-
unsigned long start, unsigned long end)
221-
{
222-
tlb->mm = mm;
223-
224-
/* Is it from 0 to ~0? */
225-
tlb->fullmm = !(start | (end+1));
226-
tlb->need_flush_all = 0;
227-
tlb->local.next = NULL;
228-
tlb->local.nr = 0;
229-
tlb->local.max = ARRAY_SIZE(tlb->__pages);
230-
tlb->active = &tlb->local;
231-
tlb->batch_count = 0;
232-
233-
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
234-
tlb->batch = NULL;
235-
#endif
236-
tlb->page_size = 0;
237-
238-
__tlb_reset_range(tlb);
239-
}
240-
241-
static void tlb_flush_mmu_free(struct mmu_gather *tlb)
242-
{
243-
struct mmu_gather_batch *batch;
244-
245-
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
246-
tlb_table_flush(tlb);
247-
#endif
248-
for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
249-
free_pages_and_swap_cache(batch->pages, batch->nr);
250-
batch->nr = 0;
251-
}
252-
tlb->active = &tlb->local;
253-
}
254-
255-
void tlb_flush_mmu(struct mmu_gather *tlb)
256-
{
257-
tlb_flush_mmu_tlbonly(tlb);
258-
tlb_flush_mmu_free(tlb);
259-
}
260-
261-
/* tlb_finish_mmu
262-
* Called at the end of the shootdown operation to free up any resources
263-
* that were required.
264-
*/
265-
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
266-
unsigned long start, unsigned long end, bool force)
267-
{
268-
struct mmu_gather_batch *batch, *next;
269-
270-
if (force) {
271-
__tlb_reset_range(tlb);
272-
__tlb_adjust_range(tlb, start, end - start);
273-
}
274-
275-
tlb_flush_mmu(tlb);
276-
277-
/* keep the page table cache within bounds */
278-
check_pgt_cache();
279-
280-
for (batch = tlb->local.next; batch; batch = next) {
281-
next = batch->next;
282-
free_pages((unsigned long)batch, 0);
283-
}
284-
tlb->local.next = NULL;
285-
}
286-
287-
/* __tlb_remove_page
288-
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
289-
* handling the additional races in SMP caused by other CPUs caching valid
290-
* mappings in their TLBs. Returns the number of free page slots left.
291-
* When out of page slots we must call tlb_flush_mmu().
292-
*returns true if the caller should flush.
293-
*/
294-
bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
295-
{
296-
struct mmu_gather_batch *batch;
297-
298-
VM_BUG_ON(!tlb->end);
299-
VM_WARN_ON(tlb->page_size != page_size);
300-
301-
batch = tlb->active;
302-
/*
303-
* Add the page and check if we are full. If so
304-
* force a flush.
305-
*/
306-
batch->pages[batch->nr++] = page;
307-
if (batch->nr == batch->max) {
308-
if (!tlb_next_batch(tlb))
309-
return true;
310-
batch = tlb->active;
311-
}
312-
VM_BUG_ON_PAGE(batch->nr > batch->max, page);
313-
314-
return false;
315-
}
316-
317-
#endif /* HAVE_GENERIC_MMU_GATHER */
318-
319-
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
320-
321-
/*
322-
* See the comment near struct mmu_table_batch.
323-
*/
324-
325-
/*
326-
* If we want tlb_remove_table() to imply TLB invalidates.
327-
*/
328-
static inline void tlb_table_invalidate(struct mmu_gather *tlb)
329-
{
330-
#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
331-
/*
332-
* Invalidate page-table caches used by hardware walkers. Then we still
333-
* need to RCU-sched wait while freeing the pages because software
334-
* walkers can still be in-flight.
335-
*/
336-
tlb_flush_mmu_tlbonly(tlb);
337-
#endif
338-
}
339-
340-
static void tlb_remove_table_smp_sync(void *arg)
341-
{
342-
/* Simply deliver the interrupt */
343-
}
344-
345-
static void tlb_remove_table_one(void *table)
346-
{
347-
/*
348-
* This isn't an RCU grace period and hence the page-tables cannot be
349-
* assumed to be actually RCU-freed.
350-
*
351-
* It is however sufficient for software page-table walkers that rely on
352-
* IRQ disabling. See the comment near struct mmu_table_batch.
353-
*/
354-
smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
355-
__tlb_remove_table(table);
356-
}
357-
358-
static void tlb_remove_table_rcu(struct rcu_head *head)
359-
{
360-
struct mmu_table_batch *batch;
361-
int i;
362-
363-
batch = container_of(head, struct mmu_table_batch, rcu);
364-
365-
for (i = 0; i < batch->nr; i++)
366-
__tlb_remove_table(batch->tables[i]);
367-
368-
free_page((unsigned long)batch);
369-
}
370-
371-
void tlb_table_flush(struct mmu_gather *tlb)
372-
{
373-
struct mmu_table_batch **batch = &tlb->batch;
374-
375-
if (*batch) {
376-
tlb_table_invalidate(tlb);
377-
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
378-
*batch = NULL;
379-
}
380-
}
381-
382-
void tlb_remove_table(struct mmu_gather *tlb, void *table)
383-
{
384-
struct mmu_table_batch **batch = &tlb->batch;
385-
386-
if (*batch == NULL) {
387-
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
388-
if (*batch == NULL) {
389-
tlb_table_invalidate(tlb);
390-
tlb_remove_table_one(table);
391-
return;
392-
}
393-
(*batch)->nr = 0;
394-
}
395-
396-
(*batch)->tables[(*batch)->nr++] = table;
397-
if ((*batch)->nr == MAX_TABLE_BATCH)
398-
tlb_table_flush(tlb);
399-
}
400-
401-
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
402-
403-
/**
404-
* tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
405-
* @tlb: the mmu_gather structure to initialize
406-
* @mm: the mm_struct of the target address space
407-
* @start: start of the region that will be removed from the page-table
408-
* @end: end of the region that will be removed from the page-table
409-
*
410-
* Called to initialize an (on-stack) mmu_gather structure for page-table
411-
* tear-down from @mm. The @start and @end are set to 0 and -1
412-
* respectively when @mm is without users and we're going to destroy
413-
* the full address space (exit/execve).
414-
*/
415-
void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
416-
unsigned long start, unsigned long end)
417-
{
418-
arch_tlb_gather_mmu(tlb, mm, start, end);
419-
inc_tlb_flush_pending(tlb->mm);
420-
}
421-
422-
void tlb_finish_mmu(struct mmu_gather *tlb,
423-
unsigned long start, unsigned long end)
424-
{
425-
/*
426-
* If there are parallel threads are doing PTE changes on same range
427-
* under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
428-
* flush by batching, a thread has stable TLB entry can fail to flush
429-
* the TLB by observing pte_none|!pte_dirty, for example so flush TLB
430-
* forcefully if we detect parallel PTE batching threads.
431-
*/
432-
bool force = mm_tlb_flush_nested(tlb->mm);
433-
434-
arch_tlb_finish_mmu(tlb, start, end, force);
435-
dec_tlb_flush_pending(tlb->mm);
436-
}
437-
438189
/*
439190
* Note: this doesn't free the actual pages themselves. That
440191
* has been handled earlier when unmapping all the memory regions.

0 commit comments

Comments
 (0)