mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-16 12:14:06 +00:00
Merge branch kvm-arm64/52bit-fixes into kvmarm-master/next
* kvm-arm64/52bit-fixes: : . : 52bit PA fixes, courtesy of Ryan Roberts. From the cover letter: : : "I've been adding support for FEAT_LPA2 to KVM and as part of that work have been : testing various (84) configurations of HW, host and guest kernels on FVP. This : has thrown up a couple of pre-existing bugs, for which the fixes are provided." : . KVM: arm64: Fix benign bug with incorrect use of VA_BITS KVM: arm64: Fix PAR_TO_HPFAR() to work independently of PA_BITS. KVM: arm64: Fix kvm init failure when mode!=vhe and VA_BITS=52. Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
commit
3bbcc8cce2
3 changed files with 36 additions and 20 deletions
|
@ -340,9 +340,13 @@
|
|||
* We have
|
||||
* PAR [PA_Shift - 1 : 12] = PA [PA_Shift - 1 : 12]
|
||||
* HPFAR [PA_Shift - 9 : 4] = FIPA [PA_Shift - 1 : 12]
|
||||
*
|
||||
* Always assume 52 bit PA since at this point, we don't know how many PA bits
|
||||
* the page table has been set up for. This should be safe since unused address
|
||||
* bits in PAR are res0.
|
||||
*/
|
||||
#define PAR_TO_HPFAR(par) \
|
||||
(((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8)
|
||||
(((par) & GENMASK_ULL(52 - 1, 12)) >> 8)
|
||||
|
||||
#define ECN(x) { ESR_ELx_EC_##x, #x }
|
||||
|
||||
|
|
|
@ -1518,7 +1518,7 @@ static int kvm_init_vector_slots(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void cpu_prepare_hyp_mode(int cpu)
|
||||
static void cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
|
||||
{
|
||||
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
|
||||
unsigned long tcr;
|
||||
|
@ -1534,23 +1534,9 @@ static void cpu_prepare_hyp_mode(int cpu)
|
|||
|
||||
params->mair_el2 = read_sysreg(mair_el1);
|
||||
|
||||
/*
|
||||
* The ID map may be configured to use an extended virtual address
|
||||
* range. This is only the case if system RAM is out of range for the
|
||||
* currently configured page size and VA_BITS, in which case we will
|
||||
* also need the extended virtual range for the HYP ID map, or we won't
|
||||
* be able to enable the EL2 MMU.
|
||||
*
|
||||
* However, at EL2, there is only one TTBR register, and we can't switch
|
||||
* between translation tables *and* update TCR_EL2.T0SZ at the same
|
||||
* time. Bottom line: we need to use the extended range with *both* our
|
||||
* translation tables.
|
||||
*
|
||||
* So use the same T0SZ value we use for the ID map.
|
||||
*/
|
||||
tcr = (read_sysreg(tcr_el1) & TCR_EL2_MASK) | TCR_EL2_RES1;
|
||||
tcr &= ~TCR_T0SZ_MASK;
|
||||
tcr |= (idmap_t0sz & GENMASK(TCR_TxSZ_WIDTH - 1, 0)) << TCR_T0SZ_OFFSET;
|
||||
tcr |= TCR_T0SZ(hyp_va_bits);
|
||||
params->tcr_el2 = tcr;
|
||||
|
||||
params->pgd_pa = kvm_mmu_get_httbr();
|
||||
|
@ -2054,7 +2040,7 @@ static int init_hyp_mode(void)
|
|||
}
|
||||
|
||||
/* Prepare the CPU initialization parameters */
|
||||
cpu_prepare_hyp_mode(cpu);
|
||||
cpu_prepare_hyp_mode(cpu, hyp_va_bits);
|
||||
}
|
||||
|
||||
if (is_protected_kvm_enabled()) {
|
||||
|
|
|
@ -641,7 +641,7 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
|
|||
{
|
||||
struct kvm_pgtable pgt = {
|
||||
.pgd = (kvm_pte_t *)kvm->mm->pgd,
|
||||
.ia_bits = VA_BITS,
|
||||
.ia_bits = vabits_actual,
|
||||
.start_level = (KVM_PGTABLE_MAX_LEVELS -
|
||||
CONFIG_PGTABLE_LEVELS),
|
||||
.mm_ops = &kvm_user_mm_ops,
|
||||
|
@ -1618,6 +1618,8 @@ static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = {
|
|||
int kvm_mmu_init(u32 *hyp_va_bits)
|
||||
{
|
||||
int err;
|
||||
u32 idmap_bits;
|
||||
u32 kernel_bits;
|
||||
|
||||
hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
|
||||
hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
|
||||
|
@ -1631,7 +1633,31 @@ int kvm_mmu_init(u32 *hyp_va_bits)
|
|||
*/
|
||||
BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
|
||||
|
||||
*hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
|
||||
/*
|
||||
* The ID map may be configured to use an extended virtual address
|
||||
* range. This is only the case if system RAM is out of range for the
|
||||
* currently configured page size and VA_BITS_MIN, in which case we will
|
||||
* also need the extended virtual range for the HYP ID map, or we won't
|
||||
* be able to enable the EL2 MMU.
|
||||
*
|
||||
* However, in some cases the ID map may be configured for fewer than
|
||||
* the number of VA bits used by the regular kernel stage 1. This
|
||||
* happens when VA_BITS=52 and the kernel image is placed in PA space
|
||||
* below 48 bits.
|
||||
*
|
||||
* At EL2, there is only one TTBR register, and we can't switch between
|
||||
* translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom
|
||||
* line: we need to use the extended range with *both* our translation
|
||||
* tables.
|
||||
*
|
||||
* So use the maximum of the idmap VA bits and the regular kernel stage
|
||||
* 1 VA bits to assure that the hypervisor can both ID map its code page
|
||||
* and map any kernel memory.
|
||||
*/
|
||||
idmap_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
|
||||
kernel_bits = vabits_actual;
|
||||
*hyp_va_bits = max(idmap_bits, kernel_bits);
|
||||
|
||||
kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
|
||||
kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
|
||||
kvm_debug("HYP VA range: %lx:%lx\n",
|
||||
|
|
Loading…
Add table
Reference in a new issue