Commit 5009065d38c95455bd2d27c2838313e3dd0c5bc7

Authored by Ohad Ben-Cohen
Committed by Joerg Roedel
1 parent 1ea6b8f489

iommu/core: stop converting bytes to page order back and forth

Express sizes in bytes rather than in page order, to eliminate the
size->order->size conversions we have whenever the IOMMU API is calling
the low level drivers' map/unmap methods.

Adopt all existing drivers.

Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Cc: David Brown <davidb@codeaurora.org>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Joerg Roedel <Joerg.Roedel@amd.com>
Cc: Stepan Moskovchenko <stepanm@codeaurora.org>
Cc: KyongHo Cho <pullip.cho@samsung.com>
Cc: Hiroshi DOYU <hdoyu@nvidia.com>
Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>

Showing 6 changed files with 29 additions and 42 deletions Side-by-side Diff

drivers/iommu/amd_iommu.c
... ... @@ -2702,9 +2702,8 @@
2702 2702 }
2703 2703  
2704 2704 static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
2705   - phys_addr_t paddr, int gfp_order, int iommu_prot)
  2705 + phys_addr_t paddr, size_t page_size, int iommu_prot)
2706 2706 {
2707   - unsigned long page_size = 0x1000UL << gfp_order;
2708 2707 struct protection_domain *domain = dom->priv;
2709 2708 int prot = 0;
2710 2709 int ret;
2711 2710  
2712 2711  
2713 2712  
... ... @@ -2721,21 +2720,19 @@
2721 2720 return ret;
2722 2721 }
2723 2722  
2724   -static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
2725   - int gfp_order)
  2723 +static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
  2724 + size_t page_size)
2726 2725 {
2727 2726 struct protection_domain *domain = dom->priv;
2728   - unsigned long page_size, unmap_size;
  2727 + size_t unmap_size;
2729 2728  
2730   - page_size = 0x1000UL << gfp_order;
2731   -
2732 2729 mutex_lock(&domain->api_lock);
2733 2730 unmap_size = iommu_unmap_page(domain, iova, page_size);
2734 2731 mutex_unlock(&domain->api_lock);
2735 2732  
2736 2733 domain_flush_tlb_pde(domain);
2737 2734  
2738   - return get_order(unmap_size);
  2735 + return unmap_size;
2739 2736 }
2740 2737  
2741 2738 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
drivers/iommu/intel-iommu.c
... ... @@ -3979,12 +3979,11 @@
3979 3979  
3980 3980 static int intel_iommu_map(struct iommu_domain *domain,
3981 3981 unsigned long iova, phys_addr_t hpa,
3982   - int gfp_order, int iommu_prot)
  3982 + size_t size, int iommu_prot)
3983 3983 {
3984 3984 struct dmar_domain *dmar_domain = domain->priv;
3985 3985 u64 max_addr;
3986 3986 int prot = 0;
3987   - size_t size;
3988 3987 int ret;
3989 3988  
3990 3989 if (iommu_prot & IOMMU_READ)
... ... @@ -3994,7 +3993,6 @@
3994 3993 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3995 3994 prot |= DMA_PTE_SNP;
3996 3995  
3997   - size = PAGE_SIZE << gfp_order;
3998 3996 max_addr = iova + size;
3999 3997 if (dmar_domain->max_addr < max_addr) {
4000 3998 u64 end;
4001 3999  
... ... @@ -4017,11 +4015,10 @@
4017 4015 return ret;
4018 4016 }
4019 4017  
4020   -static int intel_iommu_unmap(struct iommu_domain *domain,
4021   - unsigned long iova, int gfp_order)
  4018 +static size_t intel_iommu_unmap(struct iommu_domain *domain,
  4019 + unsigned long iova, size_t size)
4022 4020 {
4023 4021 struct dmar_domain *dmar_domain = domain->priv;
4024   - size_t size = PAGE_SIZE << gfp_order;
4025 4022 int order;
4026 4023  
4027 4024 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
... ... @@ -4030,7 +4027,7 @@
4030 4027 if (dmar_domain->max_addr == iova + size)
4031 4028 dmar_domain->max_addr = iova;
4032 4029  
4033   - return order;
  4030 + return PAGE_SIZE << order;
4034 4031 }
4035 4032  
4036 4033 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
drivers/iommu/iommu.c
... ... @@ -168,13 +168,13 @@
168 168  
169 169 BUG_ON(!IS_ALIGNED(iova | paddr, size));
170 170  
171   - return domain->ops->map(domain, iova, paddr, gfp_order, prot);
  171 + return domain->ops->map(domain, iova, paddr, size, prot);
172 172 }
173 173 EXPORT_SYMBOL_GPL(iommu_map);
174 174  
175 175 int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
176 176 {
177   - size_t size;
  177 + size_t size, unmapped;
178 178  
179 179 if (unlikely(domain->ops->unmap == NULL))
180 180 return -ENODEV;
... ... @@ -183,7 +183,9 @@
183 183  
184 184 BUG_ON(!IS_ALIGNED(iova, size));
185 185  
186   - return domain->ops->unmap(domain, iova, gfp_order);
  186 + unmapped = domain->ops->unmap(domain, iova, size);
  187 +
  188 + return get_order(unmapped);
187 189 }
188 190 EXPORT_SYMBOL_GPL(iommu_unmap);
drivers/iommu/msm_iommu.c
... ... @@ -352,7 +352,7 @@
352 352 }
353 353  
354 354 static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
355   - phys_addr_t pa, int order, int prot)
  355 + phys_addr_t pa, size_t len, int prot)
356 356 {
357 357 struct msm_priv *priv;
358 358 unsigned long flags;
... ... @@ -363,7 +363,6 @@
363 363 unsigned long *sl_pte;
364 364 unsigned long sl_offset;
365 365 unsigned int pgprot;
366   - size_t len = 0x1000UL << order;
367 366 int ret = 0, tex, sh;
368 367  
369 368 spin_lock_irqsave(&msm_iommu_lock, flags);
... ... @@ -463,8 +462,8 @@
463 462 return ret;
464 463 }
465 464  
466   -static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
467   - int order)
  465 +static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
  466 + size_t len)
468 467 {
469 468 struct msm_priv *priv;
470 469 unsigned long flags;
... ... @@ -474,7 +473,6 @@
474 473 unsigned long *sl_table;
475 474 unsigned long *sl_pte;
476 475 unsigned long sl_offset;
477   - size_t len = 0x1000UL << order;
478 476 int i, ret = 0;
479 477  
480 478 spin_lock_irqsave(&msm_iommu_lock, flags);
481 479  
... ... @@ -544,15 +542,12 @@
544 542  
545 543 ret = __flush_iotlb(domain);
546 544  
547   - /*
548   - * the IOMMU API requires us to return the order of the unmapped
549   - * page (on success).
550   - */
551   - if (!ret)
552   - ret = order;
553 545 fail:
554 546 spin_unlock_irqrestore(&msm_iommu_lock, flags);
555   - return ret;
  547 +
  548 + /* the IOMMU API requires us to return how many bytes were unmapped */
  549 + len = ret ? 0 : len;
  550 + return len;
556 551 }
557 552  
558 553 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
drivers/iommu/omap-iommu.c
... ... @@ -1019,12 +1019,11 @@
1019 1019 }
1020 1020  
1021 1021 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1022   - phys_addr_t pa, int order, int prot)
  1022 + phys_addr_t pa, size_t bytes, int prot)
1023 1023 {
1024 1024 struct omap_iommu_domain *omap_domain = domain->priv;
1025 1025 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1026 1026 struct device *dev = oiommu->dev;
1027   - size_t bytes = PAGE_SIZE << order;
1028 1027 struct iotlb_entry e;
1029 1028 int omap_pgsz;
1030 1029 u32 ret, flags;
1031 1030  
1032 1031  
1033 1032  
... ... @@ -1049,19 +1048,16 @@
1049 1048 return ret;
1050 1049 }
1051 1050  
1052   -static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1053   - int order)
  1051 +static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
  1052 + size_t size)
1054 1053 {
1055 1054 struct omap_iommu_domain *omap_domain = domain->priv;
1056 1055 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1057 1056 struct device *dev = oiommu->dev;
1058   - size_t unmap_size;
1059 1057  
1060   - dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);
  1058 + dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
1061 1059  
1062   - unmap_size = iopgtable_clear_entry(oiommu, da);
1063   -
1064   - return unmap_size ? get_order(unmap_size) : -EINVAL;
  1060 + return iopgtable_clear_entry(oiommu, da);
1065 1061 }
1066 1062  
1067 1063 static int
include/linux/iommu.h
... ... @@ -54,9 +54,9 @@
54 54 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
55 55 void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
56 56 int (*map)(struct iommu_domain *domain, unsigned long iova,
57   - phys_addr_t paddr, int gfp_order, int prot);
58   - int (*unmap)(struct iommu_domain *domain, unsigned long iova,
59   - int gfp_order);
  57 + phys_addr_t paddr, size_t size, int prot);
  58 + size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
  59 + size_t size);
60 60 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
61 61 unsigned long iova);
62 62 int (*domain_has_cap)(struct iommu_domain *domain,