FROMLIST: iommu: Add support for the map_pages() callback

Since iommu_pgsize can calculate how many pages of the
same size can be mapped/unmapped before the next largest
page size boundary, add support for invoking an IOMMU
driver's map_pages() callback, if it provides one.

Bug: 178537788
Change-Id: Id2dfb0f882c3c2ae385781de1d8cec178532c3ce
Link: https://lore.kernel.org/linux-iommu/20210408171402.12607-1-isaacm@codeaurora.org/T/#t
Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
Suggested-by: Will Deacon <will@kernel.org>
This commit is contained in:
Isaac J. Manjarres
2021-04-01 14:11:12 -07:00
committed by Todd Kjos
parent b9a5924547
commit 08d46858c7

View File

@@ -2385,6 +2385,30 @@ out_set_count:
return pgsize;
}
static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot,
gfp_t gfp, size_t *mapped)
{
const struct iommu_ops *ops = domain->ops;
size_t pgsize, count;
int ret;
pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %ld\n",
iova, &paddr, pgsize, count);
if (ops->map_pages) {
ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
gfp, mapped);
} else {
ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
*mapped = ret ? 0 : pgsize;
}
return ret;
}
static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
@@ -2395,7 +2419,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t orig_paddr = paddr;
int ret = 0;
if (unlikely(ops->map == NULL ||
if (unlikely(!(ops->map || ops->map_pages) ||
domain->pgsize_bitmap == 0UL))
return -ENODEV;
@@ -2419,18 +2443,21 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
while (size) {
size_t pgsize = iommu_pgsize(domain, iova, paddr, size, NULL);
size_t mapped = 0;
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize);
ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
&mapped);
/*
* Some pages may have been mapped, even if an error occurred,
* so we should account for those so they can be unmapped.
*/
size -= mapped;
if (ret)
break;
iova += pgsize;
paddr += pgsize;
size -= pgsize;
iova += mapped;
paddr += mapped;
}
/* unroll mapping in case something went wrong */