mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-22 14:41:27 +00:00
iommu/iova: Make dma_32bit_pfn implicit
Now that the cached node optimisation can apply to all allocations, the couple of users which were playing tricks with dma_32bit_pfn in order to benefit from it can stop doing so. Conversely, there is also no need for all the other users to explicitly calculate a 'real' 32-bit PFN, when init_iova_domain() can happily do that itself from the page granularity. CC: Thierry Reding <thierry.reding@gmail.com> CC: Jonathan Hunter <jonathanh@nvidia.com> CC: David Airlie <airlied@linux.ie> CC: Sudeep Dutt <sudeep.dutt@intel.com> CC: Ashutosh Dixit <ashutosh.dixit@intel.com> Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com> Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Tested-by: Zhen Lei <thunder.leizhen@huawei.com> Tested-by: Nate Watterson <nwatters@codeaurora.org> [rm: use iova_shift(), rewrote commit message] Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
e60aa7b538
commit
aa3ac9469c
8 changed files with 13 additions and 41 deletions
|
@ -155,8 +155,7 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
|
||||||
|
|
||||||
order = __ffs(tegra->domain->pgsize_bitmap);
|
order = __ffs(tegra->domain->pgsize_bitmap);
|
||||||
init_iova_domain(&tegra->carveout.domain, 1UL << order,
|
init_iova_domain(&tegra->carveout.domain, 1UL << order,
|
||||||
carveout_start >> order,
|
carveout_start >> order);
|
||||||
carveout_end >> order);
|
|
||||||
|
|
||||||
tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
|
tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
|
||||||
tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
|
tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
|
||||||
|
|
|
@ -198,8 +198,7 @@ static int host1x_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
order = __ffs(host->domain->pgsize_bitmap);
|
order = __ffs(host->domain->pgsize_bitmap);
|
||||||
init_iova_domain(&host->iova, 1UL << order,
|
init_iova_domain(&host->iova, 1UL << order,
|
||||||
geometry->aperture_start >> order,
|
geometry->aperture_start >> order);
|
||||||
geometry->aperture_end >> order);
|
|
||||||
host->iova_end = geometry->aperture_end;
|
host->iova_end = geometry->aperture_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,6 @@
|
||||||
/* IO virtual address start page frame number */
|
/* IO virtual address start page frame number */
|
||||||
#define IOVA_START_PFN (1)
|
#define IOVA_START_PFN (1)
|
||||||
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
|
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
|
||||||
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
|
|
||||||
|
|
||||||
/* Reserved IOVA ranges */
|
/* Reserved IOVA ranges */
|
||||||
#define MSI_RANGE_START (0xfee00000)
|
#define MSI_RANGE_START (0xfee00000)
|
||||||
|
@ -1788,8 +1787,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
|
||||||
if (!dma_dom->domain.pt_root)
|
if (!dma_dom->domain.pt_root)
|
||||||
goto free_dma_dom;
|
goto free_dma_dom;
|
||||||
|
|
||||||
init_iova_domain(&dma_dom->iovad, PAGE_SIZE,
|
init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN);
|
||||||
IOVA_START_PFN, DMA_32BIT_PFN);
|
|
||||||
|
|
||||||
if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL))
|
if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL))
|
||||||
goto free_dma_dom;
|
goto free_dma_dom;
|
||||||
|
@ -2696,8 +2694,7 @@ static int init_reserved_iova_ranges(void)
|
||||||
struct pci_dev *pdev = NULL;
|
struct pci_dev *pdev = NULL;
|
||||||
struct iova *val;
|
struct iova *val;
|
||||||
|
|
||||||
init_iova_domain(&reserved_iova_ranges, PAGE_SIZE,
|
init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, IOVA_START_PFN);
|
||||||
IOVA_START_PFN, DMA_32BIT_PFN);
|
|
||||||
|
|
||||||
lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
|
lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
|
||||||
&reserved_rbtree_key);
|
&reserved_rbtree_key);
|
||||||
|
|
|
@ -292,18 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||||
/* ...then finally give it a kicking to make sure it fits */
|
/* ...then finally give it a kicking to make sure it fits */
|
||||||
base_pfn = max_t(unsigned long, base_pfn,
|
base_pfn = max_t(unsigned long, base_pfn,
|
||||||
domain->geometry.aperture_start >> order);
|
domain->geometry.aperture_start >> order);
|
||||||
end_pfn = min_t(unsigned long, end_pfn,
|
|
||||||
domain->geometry.aperture_end >> order);
|
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
* PCI devices may have larger DMA masks, but still prefer allocating
|
|
||||||
* within a 32-bit mask to avoid DAC addressing. Such limitations don't
|
|
||||||
* apply to the typical platform device, so for those we may as well
|
|
||||||
* leave the cache limit at the top of their range to save an rb_last()
|
|
||||||
* traversal on every allocation.
|
|
||||||
*/
|
|
||||||
if (dev && dev_is_pci(dev))
|
|
||||||
end_pfn &= DMA_BIT_MASK(32) >> order;
|
|
||||||
|
|
||||||
/* start_pfn is always nonzero for an already-initialised domain */
|
/* start_pfn is always nonzero for an already-initialised domain */
|
||||||
if (iovad->start_pfn) {
|
if (iovad->start_pfn) {
|
||||||
|
@ -312,16 +301,11 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||||
pr_warn("Incompatible range for DMA domain\n");
|
pr_warn("Incompatible range for DMA domain\n");
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
* If we have devices with different DMA masks, move the free
|
|
||||||
* area cache limit down for the benefit of the smaller one.
|
|
||||||
*/
|
|
||||||
iovad->dma_32bit_pfn = min(end_pfn + 1, iovad->dma_32bit_pfn);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
|
init_iova_domain(iovad, 1UL << order, base_pfn);
|
||||||
if (!dev)
|
if (!dev)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
|
@ -82,8 +82,6 @@
|
||||||
#define IOVA_START_PFN (1)
|
#define IOVA_START_PFN (1)
|
||||||
|
|
||||||
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
|
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
|
||||||
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
|
|
||||||
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
|
|
||||||
|
|
||||||
/* page table handling */
|
/* page table handling */
|
||||||
#define LEVEL_STRIDE (9)
|
#define LEVEL_STRIDE (9)
|
||||||
|
@ -1878,8 +1876,7 @@ static int dmar_init_reserved_ranges(void)
|
||||||
struct iova *iova;
|
struct iova *iova;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
|
init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
|
||||||
DMA_32BIT_PFN);
|
|
||||||
|
|
||||||
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
|
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
|
||||||
&reserved_rbtree_key);
|
&reserved_rbtree_key);
|
||||||
|
@ -1938,8 +1935,7 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
|
||||||
unsigned long sagaw;
|
unsigned long sagaw;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
|
init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
|
||||||
DMA_32BIT_PFN);
|
|
||||||
|
|
||||||
err = init_iova_flush_queue(&domain->iovad,
|
err = init_iova_flush_queue(&domain->iovad,
|
||||||
iommu_flush_iova, iova_entry_free);
|
iommu_flush_iova, iova_entry_free);
|
||||||
|
@ -4897,8 +4893,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
|
||||||
{
|
{
|
||||||
int adjust_width;
|
int adjust_width;
|
||||||
|
|
||||||
init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
|
init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
|
||||||
DMA_32BIT_PFN);
|
|
||||||
domain_reserve_special_ranges(domain);
|
domain_reserve_special_ranges(domain);
|
||||||
|
|
||||||
/* calculate AGAW */
|
/* calculate AGAW */
|
||||||
|
|
|
@ -37,7 +37,7 @@ static void fq_flush_timeout(unsigned long data);
|
||||||
|
|
||||||
void
|
void
|
||||||
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
||||||
unsigned long start_pfn, unsigned long pfn_32bit)
|
unsigned long start_pfn)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* IOVA granularity will normally be equal to the smallest
|
* IOVA granularity will normally be equal to the smallest
|
||||||
|
@ -52,7 +52,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
||||||
iovad->cached32_node = NULL;
|
iovad->cached32_node = NULL;
|
||||||
iovad->granule = granule;
|
iovad->granule = granule;
|
||||||
iovad->start_pfn = start_pfn;
|
iovad->start_pfn = start_pfn;
|
||||||
iovad->dma_32bit_pfn = pfn_32bit + 1;
|
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
|
||||||
iovad->flush_cb = NULL;
|
iovad->flush_cb = NULL;
|
||||||
iovad->fq = NULL;
|
iovad->fq = NULL;
|
||||||
init_iova_rcaches(iovad);
|
init_iova_rcaches(iovad);
|
||||||
|
|
|
@ -39,8 +39,7 @@ void scif_rma_ep_init(struct scif_endpt *ep)
|
||||||
struct scif_endpt_rma_info *rma = &ep->rma_info;
|
struct scif_endpt_rma_info *rma = &ep->rma_info;
|
||||||
|
|
||||||
mutex_init(&rma->rma_lock);
|
mutex_init(&rma->rma_lock);
|
||||||
init_iova_domain(&rma->iovad, PAGE_SIZE, SCIF_IOVA_START_PFN,
|
init_iova_domain(&rma->iovad, PAGE_SIZE, SCIF_IOVA_START_PFN);
|
||||||
SCIF_DMA_64BIT_PFN);
|
|
||||||
spin_lock_init(&rma->tc_lock);
|
spin_lock_init(&rma->tc_lock);
|
||||||
mutex_init(&rma->mmn_lock);
|
mutex_init(&rma->mmn_lock);
|
||||||
INIT_LIST_HEAD(&rma->reg_list);
|
INIT_LIST_HEAD(&rma->reg_list);
|
||||||
|
|
|
@ -154,7 +154,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
|
||||||
unsigned long pfn_hi);
|
unsigned long pfn_hi);
|
||||||
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
|
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
|
||||||
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
|
||||||
unsigned long start_pfn, unsigned long pfn_32bit);
|
unsigned long start_pfn);
|
||||||
int init_iova_flush_queue(struct iova_domain *iovad,
|
int init_iova_flush_queue(struct iova_domain *iovad,
|
||||||
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
|
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
|
||||||
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
|
||||||
|
@ -230,8 +230,7 @@ static inline void copy_reserved_iova(struct iova_domain *from,
|
||||||
|
|
||||||
static inline void init_iova_domain(struct iova_domain *iovad,
|
static inline void init_iova_domain(struct iova_domain *iovad,
|
||||||
unsigned long granule,
|
unsigned long granule,
|
||||||
unsigned long start_pfn,
|
unsigned long start_pfn)
|
||||||
unsigned long pfn_32bit)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue