summaryrefslogtreecommitdiff
path: root/drivers/iommu/iova.c
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2021-12-17 15:30:57 +0000
committerJoerg Roedel <jroedel@suse.de>2021-12-20 09:03:05 +0100
commit649ad9835a3783bcb6c69368fa939e0010abb2c6 (patch)
treecd15efbd451a968cfabde7aad79c78d2e40df006 /drivers/iommu/iova.c
parentd5c383f2c98ac58c210b266cdaf7b86bc32d1ad1 (diff)
iommu/iova: Squash flush_cb abstraction
Once again, with iommu-dma now being the only flush queue user, we no longer need the extra level of indirection through flush_cb. Squash that and let the flush queue code call the domain method directly. This does mean temporarily having to carry an additional copy of the IOMMU domain pointer around instead, but only until a later patch untangles it again. Reviewed-by: John Garry <john.garry@huawei.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Link: https://lore.kernel.org/r/e3f9b4acdd6640012ef4fbc819ac868d727b64a9.1639753638.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/iova.c')
-rw-r--r--drivers/iommu/iova.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 541857ca4fd5..bbf642940988 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -63,7 +63,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
- iovad->flush_cb = NULL;
+ iovad->fq_domain = NULL;
iovad->fq = NULL;
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
@@ -90,10 +90,10 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
free_percpu(iovad->fq);
iovad->fq = NULL;
- iovad->flush_cb = NULL;
+ iovad->fq_domain = NULL;
}
-int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb)
+int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain)
{
struct iova_fq __percpu *queue;
int cpu;
@@ -105,8 +105,6 @@ int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb)
if (!queue)
return -ENOMEM;
- iovad->flush_cb = flush_cb;
-
for_each_possible_cpu(cpu) {
struct iova_fq *fq;
@@ -117,6 +115,7 @@ int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb)
spin_lock_init(&fq->lock);
}
+ iovad->fq_domain = fq_domain;
iovad->fq = queue;
timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
@@ -598,7 +597,7 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
static void iova_domain_flush(struct iova_domain *iovad)
{
atomic64_inc(&iovad->fq_flush_start_cnt);
- iovad->flush_cb(iovad);
+ iovad->fq_domain->ops->flush_iotlb_all(iovad->fq_domain);
atomic64_inc(&iovad->fq_flush_finish_cnt);
}