summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSimon Xue <xxm@rock-chips.com>2018-04-22 00:29:15 +0800
committerTao Huang <huangtao@rock-chips.com>2018-04-27 10:23:33 +0800
commitb8bca70342256b149528654e3328597baa334364 (patch)
treeca27ca690cc684a0bb288b21dfd902f87643ee97
parentc9b30e4ff462fafcfeb60b3e2127a7bc01f49875 (diff)
iommu/rockchip: support rk_iommu_map_sg for iommu ops
Impletement rk_iommu_map_sg for rk_iommu_ops, which only flush TLB once after each sg been mapped, that speed up the map operation. Change-Id: Ief123ad363018d2b3227066c07338ccbd75c9d84 Signed-off-by: Simon Xue <xxm@rock-chips.com>
-rw-r--r--drivers/iommu/rockchip-iommu.c73
1 files changed, 71 insertions, 2 deletions
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 3ef7d54576df..bf4018ff644f 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -76,6 +76,8 @@
#define IOMMU_REG_POLL_COUNT_FAST 1000
+#define IOMMU_INV_TLB_ENTIRE BIT(4) /* invalidate tlb entire */
+
static LIST_HEAD(iommu_dev_list);
struct rk_iommu_domain {
@@ -756,7 +758,8 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
* We only zap the first and last iova, since only they could have
* dte or pte shared with an existing mapping.
*/
- rk_iommu_zap_iova_first_last(rk_domain, iova, size);
+ if (!(prot & IOMMU_INV_TLB_ENTIRE))
+ rk_iommu_zap_iova_first_last(rk_domain, iova, size);
return 0;
unwind:
@@ -847,6 +850,72 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
return unmap_size;
}
+static void rk_iommu_zap_tlb(struct iommu_domain *domain)
+{
+ struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ struct list_head *pos;
+ int i;
+
+ mutex_lock(&rk_domain->iommus_lock);
+ list_for_each(pos, &rk_domain->iommus) {
+ struct rk_iommu *iommu;
+
+ iommu = list_entry(pos, struct rk_iommu, node);
+ rk_iommu_power_on(iommu);
+ for (i = 0; i < iommu->num_mmu; i++) {
+ rk_iommu_write(iommu->bases[i],
+ RK_MMU_COMMAND,
+ RK_MMU_CMD_ZAP_CACHE);
+ }
+ rk_iommu_power_off(iommu);
+ }
+ mutex_unlock(&rk_domain->iommus_lock);
+}
+
+static size_t rk_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
+{
+ struct scatterlist *s;
+ size_t mapped = 0;
+ unsigned int i, min_pagesz;
+ int ret;
+
+ if (unlikely(domain->ops->pgsize_bitmap == 0UL))
+ return 0;
+
+ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+ for_each_sg(sg, s, nents, i) {
+ phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+
+ /*
+ * We are mapping on IOMMU page boundaries, so offset within
+ * the page must be 0. However, the IOMMU may support pages
+ * smaller than PAGE_SIZE, so s->offset may still represent
+ * an offset of that boundary within the CPU page.
+ */
+ if (!IS_ALIGNED(s->offset, min_pagesz))
+ goto out_err;
+
+ ret = iommu_map(domain, iova + mapped, phys, s->length,
+ prot | IOMMU_INV_TLB_ENTIRE);
+ if (ret)
+ goto out_err;
+
+ mapped += s->length;
+ }
+
+ rk_iommu_zap_tlb(domain);
+
+ return mapped;
+
+out_err:
+ /* undo mappings already done */
+ iommu_unmap(domain, iova, mapped);
+
+ return 0;
+}
+
static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
{
struct iommu_group *group;
@@ -1165,7 +1234,7 @@ static const struct iommu_ops rk_iommu_ops = {
.detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map,
.unmap = rk_iommu_unmap,
- .map_sg = default_iommu_map_sg,
+ .map_sg = rk_iommu_map_sg,
.add_device = rk_iommu_add_device,
.remove_device = rk_iommu_remove_device,
.iova_to_phys = rk_iommu_iova_to_phys,