mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-07 15:01:44 +00:00
x86/mm/tlb: Ignore f->new_tlb_gen when zero
Commitaa44284960
("x86/mm/tlb: Avoid reading mm_tlb_gen when possible") introduced an optimization to skip superfluous TLB flushes based on the generation provided in flush_tlb_info. However, arch_tlbbatch_flush() does not provide any generation in flush_tlb_info and populates the flush_tlb_info generation with 0. This 0 is causes the flush_tlb_info to be interpreted as a superfluous, old flush. As a result, try_to_unmap_one() would not perform any TLB flushes. Fix it by checking whether f->new_tlb_gen is nonzero. Zero value is anyhow is an invalid generation value. To avoid future confusion, introduce TLB_GENERATION_INVALID constant and use it properly. Add warnings to ensure no partial flushes are done with TLB_GENERATION_INVALID or when f->mm is NULL, since this does not make any sense. In addition, add the missing unlikely(). [ dhansen: change VM_BUG_ON() -> VM_WARN_ON(), clarify changelog ] Fixes:aa44284960
("x86/mm/tlb: Avoid reading mm_tlb_gen when possible") Reported-by: Hugh Dickins <hughd@google.com> Signed-off-by: Nadav Amit <namit@vmware.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Tested-by: Hugh Dickins <hughd@google.com> Link: https://lkml.kernel.org/r/20220710232837.3618-1-namit@vmware.com
This commit is contained in:
parent
54ee184404
commit
8f1d56f64f
2 changed files with 13 additions and 3 deletions
|
@ -16,6 +16,7 @@
|
||||||
void __flush_tlb_all(void);
|
void __flush_tlb_all(void);
|
||||||
|
|
||||||
#define TLB_FLUSH_ALL -1UL
|
#define TLB_FLUSH_ALL -1UL
|
||||||
|
#define TLB_GENERATION_INVALID 0
|
||||||
|
|
||||||
void cr4_update_irqsoff(unsigned long set, unsigned long clear);
|
void cr4_update_irqsoff(unsigned long set, unsigned long clear);
|
||||||
unsigned long cr4_read_shadow(void);
|
unsigned long cr4_read_shadow(void);
|
||||||
|
|
|
@ -771,7 +771,8 @@ static void flush_tlb_func(void *info)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (f->new_tlb_gen <= local_tlb_gen) {
|
if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID &&
|
||||||
|
f->new_tlb_gen <= local_tlb_gen)) {
|
||||||
/*
|
/*
|
||||||
* The TLB is already up to date in respect to f->new_tlb_gen.
|
* The TLB is already up to date in respect to f->new_tlb_gen.
|
||||||
* While the core might be still behind mm_tlb_gen, checking
|
* While the core might be still behind mm_tlb_gen, checking
|
||||||
|
@ -843,6 +844,12 @@ static void flush_tlb_func(void *info)
|
||||||
/* Partial flush */
|
/* Partial flush */
|
||||||
unsigned long addr = f->start;
|
unsigned long addr = f->start;
|
||||||
|
|
||||||
|
/* Partial flush cannot have invalid generations */
|
||||||
|
VM_WARN_ON(f->new_tlb_gen == TLB_GENERATION_INVALID);
|
||||||
|
|
||||||
|
/* Partial flush must have valid mm */
|
||||||
|
VM_WARN_ON(f->mm == NULL);
|
||||||
|
|
||||||
nr_invalidate = (f->end - f->start) >> f->stride_shift;
|
nr_invalidate = (f->end - f->start) >> f->stride_shift;
|
||||||
|
|
||||||
while (addr < f->end) {
|
while (addr < f->end) {
|
||||||
|
@ -1045,7 +1052,8 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||||
struct flush_tlb_info *info;
|
struct flush_tlb_info *info;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
info = get_flush_tlb_info(NULL, start, end, 0, false, 0);
|
info = get_flush_tlb_info(NULL, start, end, 0, false,
|
||||||
|
TLB_GENERATION_INVALID);
|
||||||
|
|
||||||
on_each_cpu(do_kernel_range_flush, info, 1);
|
on_each_cpu(do_kernel_range_flush, info, 1);
|
||||||
|
|
||||||
|
@ -1214,7 +1222,8 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
|
||||||
|
|
||||||
int cpu = get_cpu();
|
int cpu = get_cpu();
|
||||||
|
|
||||||
info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, false, 0);
|
info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, false,
|
||||||
|
TLB_GENERATION_INVALID);
|
||||||
/*
|
/*
|
||||||
* flush_tlb_multi() is not optimized for the common case in which only
|
* flush_tlb_multi() is not optimized for the common case in which only
|
||||||
* a local TLB flush is needed. Optimize this use-case by calling
|
* a local TLB flush is needed. Optimize this use-case by calling
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue