mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-07 15:18:15 +00:00
Merge branch 'fixes' into next
Merge our fixes branch from the 4.18 cycle to resolve some minor conflicts.
This commit is contained in:
commit
b3124ec2f9
8 changed files with 73 additions and 26 deletions
|
@ -237,6 +237,7 @@ endif
|
||||||
cpu-as-$(CONFIG_4xx) += -Wa,-m405
|
cpu-as-$(CONFIG_4xx) += -Wa,-m405
|
||||||
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
|
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
|
||||||
cpu-as-$(CONFIG_E200) += -Wa,-me200
|
cpu-as-$(CONFIG_E200) += -Wa,-me200
|
||||||
|
cpu-as-$(CONFIG_E500) += -Wa,-me500
|
||||||
cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
|
cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
|
||||||
cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
|
cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
|
||||||
|
|
||||||
|
|
|
@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
|
||||||
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
||||||
unsigned long ua, unsigned long entries);
|
unsigned long ua, unsigned long entries);
|
||||||
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
||||||
unsigned long ua, unsigned long *hpa);
|
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
|
||||||
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
||||||
unsigned long ua, unsigned long *hpa);
|
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
|
||||||
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
|
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
|
||||||
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
|
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
|
||||||
#endif
|
#endif
|
||||||
|
@ -143,24 +143,33 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
int c;
|
int c;
|
||||||
|
|
||||||
c = atomic_dec_if_positive(&mm->context.copros);
|
|
||||||
|
|
||||||
/* Detect imbalance between add and remove */
|
|
||||||
WARN_ON(c < 0);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Need to broadcast a global flush of the full mm before
|
* When removing the last copro, we need to broadcast a global
|
||||||
* decrementing active_cpus count, as the next TLBI may be
|
* flush of the full mm, as the next TLBI may be local and the
|
||||||
* local and the nMMU and/or PSL need to be cleaned up.
|
* nMMU and/or PSL need to be cleaned up.
|
||||||
* Should be rare enough so that it's acceptable.
|
*
|
||||||
|
* Both the 'copros' and 'active_cpus' counts are looked at in
|
||||||
|
* flush_all_mm() to determine the scope (local/global) of the
|
||||||
|
* TLBIs, so we need to flush first before decrementing
|
||||||
|
* 'copros'. If this API is used by several callers for the
|
||||||
|
* same context, it can lead to over-flushing. It's hopefully
|
||||||
|
* not common enough to be a problem.
|
||||||
*
|
*
|
||||||
* Skip on hash, as we don't know how to do the proper flush
|
* Skip on hash, as we don't know how to do the proper flush
|
||||||
* for the time being. Invalidations will remain global if
|
* for the time being. Invalidations will remain global if
|
||||||
* used on hash.
|
* used on hash. Note that we can't drop 'copros' either, as
|
||||||
|
* it could make some invalidations local with no flush
|
||||||
|
* in-between.
|
||||||
*/
|
*/
|
||||||
if (c == 0 && radix_enabled()) {
|
if (radix_enabled()) {
|
||||||
flush_all_mm(mm);
|
flush_all_mm(mm);
|
||||||
dec_mm_active_cpus(mm);
|
|
||||||
|
c = atomic_dec_if_positive(&mm->context.copros);
|
||||||
|
/* Detect imbalance between add and remove */
|
||||||
|
WARN_ON(c < 0);
|
||||||
|
|
||||||
|
if (c == 0)
|
||||||
|
dec_mm_active_cpus(mm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -146,7 +146,9 @@ power9_restore_additional_sprs:
|
||||||
mtspr SPRN_MMCR1, r4
|
mtspr SPRN_MMCR1, r4
|
||||||
|
|
||||||
ld r3, STOP_MMCR2(r13)
|
ld r3, STOP_MMCR2(r13)
|
||||||
|
ld r4, PACA_SPRG_VDSO(r13)
|
||||||
mtspr SPRN_MMCR2, r3
|
mtspr SPRN_MMCR2, r3
|
||||||
|
mtspr SPRN_SPRG3, r4
|
||||||
blr
|
blr
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -449,7 +449,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
|
||||||
/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
|
/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
||||||
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa)))
|
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
|
||||||
return H_HARDWARE;
|
return H_HARDWARE;
|
||||||
|
|
||||||
if (mm_iommu_mapped_inc(mem))
|
if (mm_iommu_mapped_inc(mem))
|
||||||
|
|
|
@ -275,7 +275,8 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
|
||||||
if (!mem)
|
if (!mem)
|
||||||
return H_TOO_HARD;
|
return H_TOO_HARD;
|
||||||
|
|
||||||
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
|
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
|
||||||
|
&hpa)))
|
||||||
return H_HARDWARE;
|
return H_HARDWARE;
|
||||||
|
|
||||||
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
|
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
|
||||||
|
@ -461,7 +462,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
|
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
|
||||||
if (mem)
|
if (mem)
|
||||||
prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
|
prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
|
||||||
|
IOMMU_PAGE_SHIFT_4K, &tces) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!prereg) {
|
if (!prereg) {
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <linux/hugetlb.h>
|
#include <linux/hugetlb.h>
|
||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
|
#include <asm/pte-walk.h>
|
||||||
|
|
||||||
static DEFINE_MUTEX(mem_list_mutex);
|
static DEFINE_MUTEX(mem_list_mutex);
|
||||||
|
|
||||||
|
@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t {
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
unsigned long used;
|
unsigned long used;
|
||||||
atomic64_t mapped;
|
atomic64_t mapped;
|
||||||
|
unsigned int pageshift;
|
||||||
u64 ua; /* userspace address */
|
u64 ua; /* userspace address */
|
||||||
u64 entries; /* number of entries in hpas[] */
|
u64 entries; /* number of entries in hpas[] */
|
||||||
u64 *hpas; /* vmalloc'ed */
|
u64 *hpas; /* vmalloc'ed */
|
||||||
|
@ -125,6 +127,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
|
||||||
{
|
{
|
||||||
struct mm_iommu_table_group_mem_t *mem;
|
struct mm_iommu_table_group_mem_t *mem;
|
||||||
long i, j, ret = 0, locked_entries = 0;
|
long i, j, ret = 0, locked_entries = 0;
|
||||||
|
unsigned int pageshift;
|
||||||
|
unsigned long flags;
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
|
||||||
mutex_lock(&mem_list_mutex);
|
mutex_lock(&mem_list_mutex);
|
||||||
|
@ -159,6 +163,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
|
||||||
goto unlock_exit;
|
goto unlock_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For a starting point for a maximum page size calculation
|
||||||
|
* we use @ua and @entries natural alignment to allow IOMMU pages
|
||||||
|
* smaller than huge pages but still bigger than PAGE_SIZE.
|
||||||
|
*/
|
||||||
|
mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
|
||||||
mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
|
mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
|
||||||
if (!mem->hpas) {
|
if (!mem->hpas) {
|
||||||
kfree(mem);
|
kfree(mem);
|
||||||
|
@ -199,6 +209,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
populate:
|
populate:
|
||||||
|
pageshift = PAGE_SHIFT;
|
||||||
|
if (PageCompound(page)) {
|
||||||
|
pte_t *pte;
|
||||||
|
struct page *head = compound_head(page);
|
||||||
|
unsigned int compshift = compound_order(head);
|
||||||
|
|
||||||
|
local_irq_save(flags); /* disables as well */
|
||||||
|
pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
|
||||||
|
/* Double check it is still the same pinned page */
|
||||||
|
if (pte && pte_page(*pte) == head &&
|
||||||
|
pageshift == compshift)
|
||||||
|
pageshift = max_t(unsigned int, pageshift,
|
||||||
|
PAGE_SHIFT);
|
||||||
|
}
|
||||||
|
mem->pageshift = min(mem->pageshift, pageshift);
|
||||||
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
|
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -349,7 +376,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
|
||||||
EXPORT_SYMBOL_GPL(mm_iommu_find);
|
EXPORT_SYMBOL_GPL(mm_iommu_find);
|
||||||
|
|
||||||
long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
||||||
unsigned long ua, unsigned long *hpa)
|
unsigned long ua, unsigned int pageshift, unsigned long *hpa)
|
||||||
{
|
{
|
||||||
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
|
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
|
||||||
u64 *va = &mem->hpas[entry];
|
u64 *va = &mem->hpas[entry];
|
||||||
|
@ -357,6 +384,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
||||||
if (entry >= mem->entries)
|
if (entry >= mem->entries)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
|
if (pageshift > mem->pageshift)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
*hpa = *va | (ua & ~PAGE_MASK);
|
*hpa = *va | (ua & ~PAGE_MASK);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -364,7 +394,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
|
||||||
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
|
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
|
||||||
|
|
||||||
long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
||||||
unsigned long ua, unsigned long *hpa)
|
unsigned long ua, unsigned int pageshift, unsigned long *hpa)
|
||||||
{
|
{
|
||||||
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
|
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
|
||||||
void *va = &mem->hpas[entry];
|
void *va = &mem->hpas[entry];
|
||||||
|
@ -373,6 +403,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
|
||||||
if (entry >= mem->entries)
|
if (entry >= mem->entries)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
|
if (pageshift > mem->pageshift)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
pa = (void *) vmalloc_to_phys(va);
|
pa = (void *) vmalloc_to_phys(va);
|
||||||
if (!pa)
|
if (!pa)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
|
@ -2735,7 +2735,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
|
||||||
{
|
{
|
||||||
int nr, dotted;
|
int nr, dotted;
|
||||||
unsigned long first_adr;
|
unsigned long first_adr;
|
||||||
unsigned long inst, last_inst = 0;
|
unsigned int inst, last_inst = 0;
|
||||||
unsigned char val[4];
|
unsigned char val[4];
|
||||||
|
|
||||||
dotted = 0;
|
dotted = 0;
|
||||||
|
@ -2759,7 +2759,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
|
||||||
dotted = 0;
|
dotted = 0;
|
||||||
last_inst = inst;
|
last_inst = inst;
|
||||||
if (praddr)
|
if (praddr)
|
||||||
printf(REG" %.8lx", adr, inst);
|
printf(REG" %.8x", adr, inst);
|
||||||
printf("\t");
|
printf("\t");
|
||||||
dump_func(inst, adr);
|
dump_func(inst, adr);
|
||||||
printf("\n");
|
printf("\n");
|
||||||
|
|
|
@ -419,17 +419,17 @@ static void tce_iommu_unuse_page(struct tce_container *container,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
|
static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
|
||||||
unsigned long tce, unsigned long size,
|
unsigned long tce, unsigned long shift,
|
||||||
unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
|
unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
|
||||||
{
|
{
|
||||||
long ret = 0;
|
long ret = 0;
|
||||||
struct mm_iommu_table_group_mem_t *mem;
|
struct mm_iommu_table_group_mem_t *mem;
|
||||||
|
|
||||||
mem = mm_iommu_lookup(container->mm, tce, size);
|
mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
|
||||||
if (!mem)
|
if (!mem)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
|
ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
|
||||||
if (ret)
|
if (ret)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -450,7 +450,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua),
|
ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua),
|
||||||
IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
|
tbl->it_page_shift, &hpa, &mem);
|
||||||
if (ret)
|
if (ret)
|
||||||
pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
|
pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
|
||||||
__func__, be64_to_cpu(*pua), entry, ret);
|
__func__, be64_to_cpu(*pua), entry, ret);
|
||||||
|
@ -566,7 +566,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
|
||||||
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
|
__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
|
||||||
|
|
||||||
ret = tce_iommu_prereg_ua_to_hpa(container,
|
ret = tce_iommu_prereg_ua_to_hpa(container,
|
||||||
tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
|
tce, tbl->it_page_shift, &hpa, &mem);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue