mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-26 00:21:17 +00:00
xen: lazy-mmu operations
This patch uses the lazy-mmu hooks to batch mmu operations where possible. This is primarily useful for batching operations applied to active pagetables, which happens during mprotect, munmap, mremap and the like (mmap does not do bulk pagetable operations, so it isn't helped). Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Acked-by: Chris Wright <chrisw@sous-sol.org>
This commit is contained in:
parent
f120f13ea0
commit
d66bf8fcf3
3 changed files with 72 additions and 32 deletions
|
@ -472,28 +472,38 @@ static void xen_apic_write(unsigned long reg, unsigned long val)
|
||||||
|
|
||||||
static void xen_flush_tlb(void)
|
static void xen_flush_tlb(void)
|
||||||
{
|
{
|
||||||
struct mmuext_op op;
|
struct mmuext_op *op;
|
||||||
|
struct multicall_space mcs = xen_mc_entry(sizeof(*op));
|
||||||
|
|
||||||
op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
|
op = mcs.args;
|
||||||
if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
|
op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
|
||||||
BUG();
|
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
|
||||||
|
|
||||||
|
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_flush_tlb_single(unsigned long addr)
|
static void xen_flush_tlb_single(unsigned long addr)
|
||||||
{
|
{
|
||||||
struct mmuext_op op;
|
struct mmuext_op *op;
|
||||||
|
struct multicall_space mcs = xen_mc_entry(sizeof(*op));
|
||||||
|
|
||||||
op.cmd = MMUEXT_INVLPG_LOCAL;
|
op = mcs.args;
|
||||||
op.arg1.linear_addr = addr & PAGE_MASK;
|
op->cmd = MMUEXT_INVLPG_LOCAL;
|
||||||
if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
|
op->arg1.linear_addr = addr & PAGE_MASK;
|
||||||
BUG();
|
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
|
||||||
|
|
||||||
|
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
|
static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
|
||||||
unsigned long va)
|
unsigned long va)
|
||||||
{
|
{
|
||||||
struct mmuext_op op;
|
struct {
|
||||||
|
struct mmuext_op op;
|
||||||
|
cpumask_t mask;
|
||||||
|
} *args;
|
||||||
cpumask_t cpumask = *cpus;
|
cpumask_t cpumask = *cpus;
|
||||||
|
struct multicall_space mcs;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A couple of (to be removed) sanity checks:
|
* A couple of (to be removed) sanity checks:
|
||||||
|
@ -510,17 +520,21 @@ static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
|
||||||
if (cpus_empty(cpumask))
|
if (cpus_empty(cpumask))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
mcs = xen_mc_entry(sizeof(*args));
|
||||||
|
args = mcs.args;
|
||||||
|
args->mask = cpumask;
|
||||||
|
args->op.arg2.vcpumask = &args->mask;
|
||||||
|
|
||||||
if (va == TLB_FLUSH_ALL) {
|
if (va == TLB_FLUSH_ALL) {
|
||||||
op.cmd = MMUEXT_TLB_FLUSH_MULTI;
|
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
|
||||||
op.arg2.vcpumask = (void *)cpus;
|
|
||||||
} else {
|
} else {
|
||||||
op.cmd = MMUEXT_INVLPG_MULTI;
|
args->op.cmd = MMUEXT_INVLPG_MULTI;
|
||||||
op.arg1.linear_addr = va;
|
args->op.arg1.linear_addr = va;
|
||||||
op.arg2.vcpumask = (void *)cpus;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
|
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
|
||||||
BUG();
|
|
||||||
|
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long xen_read_cr2(void)
|
static unsigned long xen_read_cr2(void)
|
||||||
|
|
|
@ -98,12 +98,20 @@ void make_lowmem_page_readwrite(void *vaddr)
|
||||||
|
|
||||||
void xen_set_pmd(pmd_t *ptr, pmd_t val)
|
void xen_set_pmd(pmd_t *ptr, pmd_t val)
|
||||||
{
|
{
|
||||||
struct mmu_update u;
|
struct multicall_space mcs;
|
||||||
|
struct mmu_update *u;
|
||||||
|
|
||||||
u.ptr = virt_to_machine(ptr).maddr;
|
preempt_disable();
|
||||||
u.val = pmd_val_ma(val);
|
|
||||||
if (HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0)
|
mcs = xen_mc_entry(sizeof(*u));
|
||||||
BUG();
|
u = mcs.args;
|
||||||
|
u->ptr = virt_to_machine(ptr).maddr;
|
||||||
|
u->val = pmd_val_ma(val);
|
||||||
|
MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
|
||||||
|
|
||||||
|
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
||||||
|
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -146,20 +154,38 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
|
||||||
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||||
pte_t *ptep, pte_t pteval)
|
pte_t *ptep, pte_t pteval)
|
||||||
{
|
{
|
||||||
if ((mm != current->mm && mm != &init_mm) ||
|
if (mm == current->mm || mm == &init_mm) {
|
||||||
HYPERVISOR_update_va_mapping(addr, pteval, 0) != 0)
|
if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
|
||||||
xen_set_pte(ptep, pteval);
|
struct multicall_space mcs;
|
||||||
|
mcs = xen_mc_entry(0);
|
||||||
|
|
||||||
|
MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
|
||||||
|
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
||||||
|
return;
|
||||||
|
} else
|
||||||
|
if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
xen_set_pte(ptep, pteval);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_PAE
|
#ifdef CONFIG_X86_PAE
|
||||||
void xen_set_pud(pud_t *ptr, pud_t val)
|
void xen_set_pud(pud_t *ptr, pud_t val)
|
||||||
{
|
{
|
||||||
struct mmu_update u;
|
struct multicall_space mcs;
|
||||||
|
struct mmu_update *u;
|
||||||
|
|
||||||
u.ptr = virt_to_machine(ptr).maddr;
|
preempt_disable();
|
||||||
u.val = pud_val_ma(val);
|
|
||||||
if (HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0)
|
mcs = xen_mc_entry(sizeof(*u));
|
||||||
BUG();
|
u = mcs.args;
|
||||||
|
u->ptr = virt_to_machine(ptr).maddr;
|
||||||
|
u->val = pud_val_ma(val);
|
||||||
|
MULTI_mmu_update(mcs.mc, u, 1, NULL, DOMID_SELF);
|
||||||
|
|
||||||
|
xen_mc_issue(PARAVIRT_LAZY_MMU);
|
||||||
|
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
void xen_set_pte(pte_t *ptep, pte_t pte)
|
void xen_set_pte(pte_t *ptep, pte_t pte)
|
||||||
|
|
|
@ -26,8 +26,8 @@
|
||||||
|
|
||||||
#include "multicalls.h"
|
#include "multicalls.h"
|
||||||
|
|
||||||
#define MC_BATCH 8
|
#define MC_BATCH 32
|
||||||
#define MC_ARGS (MC_BATCH * 32 / sizeof(u64))
|
#define MC_ARGS (MC_BATCH * 16 / sizeof(u64))
|
||||||
|
|
||||||
struct mc_buffer {
|
struct mc_buffer {
|
||||||
struct multicall_entry entries[MC_BATCH];
|
struct multicall_entry entries[MC_BATCH];
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue