mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-01 03:54:02 +00:00
iommu-api: Remove iommu_{un}map_range functions
These functions are not longer used and can be removed savely. There functionality is now provided by the iommu_{un}map functions which are also capable of multiple page sizes. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
parent
468e2366cd
commit
12c7389abe
3 changed files with 2 additions and 92 deletions
|
@ -2506,52 +2506,6 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amd_iommu_map_range(struct iommu_domain *dom,
|
|
||||||
unsigned long iova, phys_addr_t paddr,
|
|
||||||
size_t size, int iommu_prot)
|
|
||||||
{
|
|
||||||
struct protection_domain *domain = dom->priv;
|
|
||||||
unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE);
|
|
||||||
int prot = 0;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (iommu_prot & IOMMU_READ)
|
|
||||||
prot |= IOMMU_PROT_IR;
|
|
||||||
if (iommu_prot & IOMMU_WRITE)
|
|
||||||
prot |= IOMMU_PROT_IW;
|
|
||||||
|
|
||||||
iova &= PAGE_MASK;
|
|
||||||
paddr &= PAGE_MASK;
|
|
||||||
|
|
||||||
for (i = 0; i < npages; ++i) {
|
|
||||||
ret = iommu_map_page(domain, iova, paddr, prot, PAGE_SIZE);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
iova += PAGE_SIZE;
|
|
||||||
paddr += PAGE_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void amd_iommu_unmap_range(struct iommu_domain *dom,
|
|
||||||
unsigned long iova, size_t size)
|
|
||||||
{
|
|
||||||
|
|
||||||
struct protection_domain *domain = dom->priv;
|
|
||||||
unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE);
|
|
||||||
|
|
||||||
iova &= PAGE_MASK;
|
|
||||||
|
|
||||||
for (i = 0; i < npages; ++i) {
|
|
||||||
iommu_unmap_page(domain, iova, PAGE_SIZE);
|
|
||||||
iova += PAGE_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
iommu_flush_tlb_pde(domain);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
|
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
|
||||||
phys_addr_t paddr, int gfp_order, int iommu_prot)
|
phys_addr_t paddr, int gfp_order, int iommu_prot)
|
||||||
{
|
{
|
||||||
|
@ -2616,8 +2570,6 @@ static struct iommu_ops amd_iommu_ops = {
|
||||||
.detach_dev = amd_iommu_detach_device,
|
.detach_dev = amd_iommu_detach_device,
|
||||||
.map = amd_iommu_map,
|
.map = amd_iommu_map,
|
||||||
.unmap = amd_iommu_unmap,
|
.unmap = amd_iommu_unmap,
|
||||||
.map_range = amd_iommu_map_range,
|
|
||||||
.unmap_range = amd_iommu_unmap_range,
|
|
||||||
.iova_to_phys = amd_iommu_iova_to_phys,
|
.iova_to_phys = amd_iommu_iova_to_phys,
|
||||||
.domain_has_cap = amd_iommu_domain_has_cap,
|
.domain_has_cap = amd_iommu_domain_has_cap,
|
||||||
};
|
};
|
||||||
|
|
|
@ -80,20 +80,6 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_detach_device);
|
EXPORT_SYMBOL_GPL(iommu_detach_device);
|
||||||
|
|
||||||
int iommu_map_range(struct iommu_domain *domain, unsigned long iova,
|
|
||||||
phys_addr_t paddr, size_t size, int prot)
|
|
||||||
{
|
|
||||||
return iommu_ops->map_range(domain, iova, paddr, size, prot);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(iommu_map_range);
|
|
||||||
|
|
||||||
void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
|
|
||||||
size_t size)
|
|
||||||
{
|
|
||||||
iommu_ops->unmap_range(domain, iova, size);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(iommu_unmap_range);
|
|
||||||
|
|
||||||
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
|
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
|
||||||
unsigned long iova)
|
unsigned long iova)
|
||||||
{
|
{
|
||||||
|
@ -119,10 +105,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||||
|
|
||||||
BUG_ON((iova | paddr) & invalid_mask);
|
BUG_ON((iova | paddr) & invalid_mask);
|
||||||
|
|
||||||
if (iommu_ops->map)
|
return iommu_ops->map(domain, iova, paddr, gfp_order, prot);
|
||||||
return iommu_ops->map(domain, iova, paddr, gfp_order, prot);
|
|
||||||
|
|
||||||
return iommu_ops->map_range(domain, iova, paddr, size, prot);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_map);
|
EXPORT_SYMBOL_GPL(iommu_map);
|
||||||
|
|
||||||
|
@ -136,11 +119,6 @@ int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
|
||||||
|
|
||||||
BUG_ON(iova & invalid_mask);
|
BUG_ON(iova & invalid_mask);
|
||||||
|
|
||||||
if (iommu_ops->unmap)
|
return iommu_ops->unmap(domain, iova, gfp_order);
|
||||||
return iommu_ops->unmap(domain, iova, gfp_order);
|
|
||||||
|
|
||||||
iommu_ops->unmap_range(domain, iova, size);
|
|
||||||
|
|
||||||
return gfp_order;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_unmap);
|
EXPORT_SYMBOL_GPL(iommu_unmap);
|
||||||
|
|
|
@ -40,10 +40,6 @@ struct iommu_ops {
|
||||||
phys_addr_t paddr, int gfp_order, int prot);
|
phys_addr_t paddr, int gfp_order, int prot);
|
||||||
int (*unmap)(struct iommu_domain *domain, unsigned long iova,
|
int (*unmap)(struct iommu_domain *domain, unsigned long iova,
|
||||||
int gfp_order);
|
int gfp_order);
|
||||||
int (*map_range)(struct iommu_domain *domain, unsigned long iova,
|
|
||||||
phys_addr_t paddr, size_t size, int prot);
|
|
||||||
void (*unmap_range)(struct iommu_domain *domain, unsigned long iova,
|
|
||||||
size_t size);
|
|
||||||
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
|
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
|
||||||
unsigned long iova);
|
unsigned long iova);
|
||||||
int (*domain_has_cap)(struct iommu_domain *domain,
|
int (*domain_has_cap)(struct iommu_domain *domain,
|
||||||
|
@ -60,10 +56,6 @@ extern int iommu_attach_device(struct iommu_domain *domain,
|
||||||
struct device *dev);
|
struct device *dev);
|
||||||
extern void iommu_detach_device(struct iommu_domain *domain,
|
extern void iommu_detach_device(struct iommu_domain *domain,
|
||||||
struct device *dev);
|
struct device *dev);
|
||||||
extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova,
|
|
||||||
phys_addr_t paddr, size_t size, int prot);
|
|
||||||
extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
|
|
||||||
size_t size);
|
|
||||||
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||||
phys_addr_t paddr, int gfp_order, int prot);
|
phys_addr_t paddr, int gfp_order, int prot);
|
||||||
extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||||
|
@ -104,18 +96,6 @@ static inline void iommu_detach_device(struct iommu_domain *domain,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int iommu_map_range(struct iommu_domain *domain,
|
|
||||||
unsigned long iova, phys_addr_t paddr,
|
|
||||||
size_t size, int prot)
|
|
||||||
{
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void iommu_unmap_range(struct iommu_domain *domain,
|
|
||||||
unsigned long iova, size_t size)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||||
phys_addr_t paddr, int gfp_order, int prot)
|
phys_addr_t paddr, int gfp_order, int prot)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Add table
Reference in a new issue