ANDROID: iommu/io-pgtable-arm: Free underlying page tables for large mappings

Consider the case where a 2N--where N > 1--MB buffer is composed
entirely of 4 KB pages. This means that at the second to last level,
the buffer will have N non-leaf entries that point to page tables
with 4 KB mappings.

When the buffer is unmapped, all N entries will be cleared at the
second to last level. However, the existing logic only checks if
it needs to free the underlying page tables for the first non-leaf
entry. Therefore, the page table memory for the other entries N-1
entries will be leaked.

Fix this memory leak by ensuring that we apply the same check to
all N entries that are being unmapped.

Bug: 178537788
Fixes: 4ad6f2a43c ("FROMLIST: iommu/io-pgtable-arm: Implement arm_lpae_unmap_pages()")
Change-Id: I44cb26296209d1e351b94c763a6b322809c1159a
Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
This commit is contained in:
Isaac J. Manjarres
2021-05-12 14:15:21 -07:00
committed by Will Deacon
parent 345f1259a0
commit 3c75179a31

View File

@@ -621,7 +621,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
{
arm_lpae_iopte pte;
struct io_pgtable *iop = &data->iop;
int i, num_entries, max_entries, unmap_idx_start;
int i = 0, num_entries, max_entries, unmap_idx_start;
/* Something went horribly wrong and we ran out of page table */
if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
@@ -637,27 +637,35 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
num_entries = min_t(int, pgcount, max_entries);
__arm_lpae_set_pte(ptep, 0, num_entries, &iop->cfg);
if (!iopte_leaf(pte, lvl, iop->fmt)) {
/* Also flush any partial walks */
io_pgtable_tlb_flush_walk(iop, iova, num_entries * size,
ARM_LPAE_GRANULE(data));
ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
/*
* Order the PTE update against queueing the IOVA, to
* guarantee that a flush callback from a different CPU
* has observed it before the TLBIALL can be issued.
*/
smp_wmb();
} else {
for (i = 0; i < num_entries; i++)
while (i < num_entries) {
pte = READ_ONCE(*ptep);
if (WARN_ON(!pte))
break;
__arm_lpae_set_pte(ptep, 0, num_entries, &iop->cfg);
if (!iopte_leaf(pte, lvl, iop->fmt)) {
/* Also flush any partial walks */
io_pgtable_tlb_flush_walk(iop, iova, num_entries * size,
ARM_LPAE_GRANULE(data));
__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
/*
* Order the PTE update against queueing the IOVA, to
* guarantee that a flush callback from a different CPU
* has observed it before the TLBIALL can be issued.
*/
smp_wmb();
} else {
io_pgtable_tlb_add_page(iop, gather, iova + i * size, size);
}
ptep++;
i++;
}
return num_entries * size;
return i * size;
} else if (iopte_leaf(pte, lvl, iop->fmt)) {
/*
* Insert a table at the next level to map the old region,