mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-22 06:32:08 +00:00
asm-generic/tlb: rename HAVE_MMU_GATHER_NO_GATHER
Towards a more consistent naming scheme. Link: http://lkml.kernel.org/r/20200116064531.483522-9-aneesh.kumar@linux.ibm.com Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3af4bd0337
commit
580a586c40
4 changed files with 19 additions and 9 deletions
|
@ -402,7 +402,7 @@ config MMU_GATHER_PAGE_SIZE
|
||||||
config MMU_GATHER_NO_RANGE
|
config MMU_GATHER_NO_RANGE
|
||||||
bool
|
bool
|
||||||
|
|
||||||
config HAVE_MMU_GATHER_NO_GATHER
|
config MMU_GATHER_NO_GATHER
|
||||||
bool
|
bool
|
||||||
|
|
||||||
config ARCH_HAVE_NMI_SAFE_CMPXCHG
|
config ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||||
|
|
|
@ -163,7 +163,7 @@ config S390
|
||||||
select HAVE_PERF_USER_STACK_DUMP
|
select HAVE_PERF_USER_STACK_DUMP
|
||||||
select HAVE_MEMBLOCK_NODE_MAP
|
select HAVE_MEMBLOCK_NODE_MAP
|
||||||
select HAVE_MEMBLOCK_PHYS_MAP
|
select HAVE_MEMBLOCK_PHYS_MAP
|
||||||
select HAVE_MMU_GATHER_NO_GATHER
|
select MMU_GATHER_NO_GATHER
|
||||||
select HAVE_MOD_ARCH_SPECIFIC
|
select HAVE_MOD_ARCH_SPECIFIC
|
||||||
select HAVE_NOP_MCOUNT
|
select HAVE_NOP_MCOUNT
|
||||||
select HAVE_OPROFILE
|
select HAVE_OPROFILE
|
||||||
|
|
|
@ -143,6 +143,16 @@
|
||||||
* MMU_GATHER_NO_RANGE
|
* MMU_GATHER_NO_RANGE
|
||||||
*
|
*
|
||||||
* Use this if your architecture lacks an efficient flush_tlb_range().
|
* Use this if your architecture lacks an efficient flush_tlb_range().
|
||||||
|
*
|
||||||
|
* MMU_GATHER_NO_GATHER
|
||||||
|
*
|
||||||
|
* If the option is set the mmu_gather will not track individual pages for
|
||||||
|
* delayed page free anymore. A platform that enables the option needs to
|
||||||
|
* provide its own implementation of the __tlb_remove_page_size() function to
|
||||||
|
* free pages.
|
||||||
|
*
|
||||||
|
* This is useful if your architecture already flushes TLB entries in the
|
||||||
|
* various ptep_get_and_clear() functions.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
|
#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
|
||||||
|
@ -202,7 +212,7 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
|
||||||
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
|
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
|
||||||
|
|
||||||
|
|
||||||
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
|
#ifndef CONFIG_MMU_GATHER_NO_GATHER
|
||||||
/*
|
/*
|
||||||
* If we can't allocate a page to make a big batch of page pointers
|
* If we can't allocate a page to make a big batch of page pointers
|
||||||
* to work on, then just handle a few from the on-stack structure.
|
* to work on, then just handle a few from the on-stack structure.
|
||||||
|
@ -277,7 +287,7 @@ struct mmu_gather {
|
||||||
|
|
||||||
unsigned int batch_count;
|
unsigned int batch_count;
|
||||||
|
|
||||||
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
|
#ifndef CONFIG_MMU_GATHER_NO_GATHER
|
||||||
struct mmu_gather_batch *active;
|
struct mmu_gather_batch *active;
|
||||||
struct mmu_gather_batch local;
|
struct mmu_gather_batch local;
|
||||||
struct page *__pages[MMU_GATHER_BUNDLE];
|
struct page *__pages[MMU_GATHER_BUNDLE];
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/tlb.h>
|
#include <asm/tlb.h>
|
||||||
|
|
||||||
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
|
#ifndef CONFIG_MMU_GATHER_NO_GATHER
|
||||||
|
|
||||||
static bool tlb_next_batch(struct mmu_gather *tlb)
|
static bool tlb_next_batch(struct mmu_gather *tlb)
|
||||||
{
|
{
|
||||||
|
@ -89,7 +89,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* HAVE_MMU_GATHER_NO_GATHER */
|
#endif /* MMU_GATHER_NO_GATHER */
|
||||||
|
|
||||||
#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
|
#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
|
||||||
|
|
||||||
|
@ -180,7 +180,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
|
||||||
#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
|
#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
|
||||||
tlb_table_flush(tlb);
|
tlb_table_flush(tlb);
|
||||||
#endif
|
#endif
|
||||||
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
|
#ifndef CONFIG_MMU_GATHER_NO_GATHER
|
||||||
tlb_batch_pages_flush(tlb);
|
tlb_batch_pages_flush(tlb);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -211,7 +211,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
|
||||||
/* Is it from 0 to ~0? */
|
/* Is it from 0 to ~0? */
|
||||||
tlb->fullmm = !(start | (end+1));
|
tlb->fullmm = !(start | (end+1));
|
||||||
|
|
||||||
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
|
#ifndef CONFIG_MMU_GATHER_NO_GATHER
|
||||||
tlb->need_flush_all = 0;
|
tlb->need_flush_all = 0;
|
||||||
tlb->local.next = NULL;
|
tlb->local.next = NULL;
|
||||||
tlb->local.nr = 0;
|
tlb->local.nr = 0;
|
||||||
|
@ -271,7 +271,7 @@ void tlb_finish_mmu(struct mmu_gather *tlb,
|
||||||
|
|
||||||
tlb_flush_mmu(tlb);
|
tlb_flush_mmu(tlb);
|
||||||
|
|
||||||
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
|
#ifndef CONFIG_MMU_GATHER_NO_GATHER
|
||||||
tlb_batch_list_free(tlb);
|
tlb_batch_list_free(tlb);
|
||||||
#endif
|
#endif
|
||||||
dec_tlb_flush_pending(tlb->mm);
|
dec_tlb_flush_pending(tlb->mm);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue