mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-04-13 17:55:14 +00:00
This updates the generic and arch specific implementation to return true if we need to do a tlb flush. That means if a __tlb_remove_page indicate a flush is needed, the page we try to remove need to be tracked and added again after the flush. We need to track it because we have already update the pte to none and we can't just loop back. This change is done to enable us to do a tlb_flush when we try to flush a range that consists of different page sizes. For architectures like ppc64, we can do a range based tlb flush and we need to track page size for that. When we try to remove a huge page, we will force a tlb flush and starts a new mmu gather. [aneesh.kumar@linux.vnet.ibm.com: mm-change-the-interface-for-__tlb_remove_page-v3] Link: http://lkml.kernel.org/r/1465049193-22197-2-git-send-email-aneesh.kumar@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1464860389-29019-2-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Mel Gorman <mgorman@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
140 lines
3.4 KiB
C
140 lines
3.4 KiB
C
#ifndef __UM_TLB_H
|
|
#define __UM_TLB_H
|
|
|
|
#include <linux/pagemap.h>
|
|
#include <linux/swap.h>
|
|
#include <asm/percpu.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#define tlb_start_vma(tlb, vma) do { } while (0)
|
|
#define tlb_end_vma(tlb, vma) do { } while (0)
|
|
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
|
|
|
|
/* struct mmu_gather is an opaque type used by the mm code for passing around
|
|
* any data needed by arch specific code for tlb_remove_page.
|
|
*/
|
|
struct mmu_gather {
|
|
struct mm_struct *mm;
|
|
unsigned int need_flush; /* Really unmapped some ptes? */
|
|
unsigned long start;
|
|
unsigned long end;
|
|
unsigned int fullmm; /* non-zero means full mm flush */
|
|
};
|
|
|
|
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
|
|
unsigned long address)
|
|
{
|
|
if (tlb->start > address)
|
|
tlb->start = address;
|
|
if (tlb->end < address + PAGE_SIZE)
|
|
tlb->end = address + PAGE_SIZE;
|
|
}
|
|
|
|
static inline void init_tlb_gather(struct mmu_gather *tlb)
|
|
{
|
|
tlb->need_flush = 0;
|
|
|
|
tlb->start = TASK_SIZE;
|
|
tlb->end = 0;
|
|
|
|
if (tlb->fullmm) {
|
|
tlb->start = 0;
|
|
tlb->end = TASK_SIZE;
|
|
}
|
|
}
|
|
|
|
static inline void
|
|
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
|
|
{
|
|
tlb->mm = mm;
|
|
tlb->start = start;
|
|
tlb->end = end;
|
|
tlb->fullmm = !(start | (end+1));
|
|
|
|
init_tlb_gather(tlb);
|
|
}
|
|
|
|
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|
unsigned long end);
|
|
|
|
static inline void
|
|
tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
|
|
{
|
|
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
|
|
}
|
|
|
|
static inline void
|
|
tlb_flush_mmu_free(struct mmu_gather *tlb)
|
|
{
|
|
init_tlb_gather(tlb);
|
|
}
|
|
|
|
static inline void
|
|
tlb_flush_mmu(struct mmu_gather *tlb)
|
|
{
|
|
if (!tlb->need_flush)
|
|
return;
|
|
|
|
tlb_flush_mmu_tlbonly(tlb);
|
|
tlb_flush_mmu_free(tlb);
|
|
}
|
|
|
|
/* tlb_finish_mmu
|
|
* Called at the end of the shootdown operation to free up any resources
|
|
* that were required.
|
|
*/
|
|
static inline void
|
|
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
|
|
{
|
|
tlb_flush_mmu(tlb);
|
|
|
|
/* keep the page table cache within bounds */
|
|
check_pgt_cache();
|
|
}
|
|
|
|
/* tlb_remove_page
|
|
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
|
|
* while handling the additional races in SMP caused by other CPUs
|
|
* caching valid mappings in their TLBs.
|
|
*/
|
|
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
|
|
{
|
|
tlb->need_flush = 1;
|
|
free_page_and_swap_cache(page);
|
|
return false; /* avoid calling tlb_flush_mmu */
|
|
}
|
|
|
|
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
|
|
{
|
|
__tlb_remove_page(tlb, page);
|
|
}
|
|
|
|
static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
|
|
struct page *page)
|
|
{
|
|
return __tlb_remove_page(tlb, page);
|
|
}
|
|
|
|
/**
|
|
* tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
|
|
*
|
|
* Record the fact that pte's were really umapped in ->need_flush, so we can
|
|
* later optimise away the tlb invalidate. This helps when userspace is
|
|
* unmapping already-unmapped pages, which happens quite a lot.
|
|
*/
|
|
#define tlb_remove_tlb_entry(tlb, ptep, address) \
|
|
do { \
|
|
tlb->need_flush = 1; \
|
|
__tlb_remove_tlb_entry(tlb, ptep, address); \
|
|
} while (0)
|
|
|
|
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
|
|
|
|
#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
|
|
|
|
#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
|
|
|
|
#define tlb_migrate_finish(mm) do {} while (0)
|
|
|
|
#endif
|