Commit 6a0596583fadd15dca293736114abdea306d3d7c

Authored by Linus Torvalds

Merge git://git.infradead.org/iommu-2.6

* git://git.infradead.org/iommu-2.6:
  intel-iommu: fix superpage support in pfn_to_dma_pte()
  intel-iommu: set iommu_superpage on VM domains to lowest common denominator
  intel-iommu: fix return value of iommu_unmap() API
  MAINTAINERS: Update VT-d entry for drivers/pci -> drivers/iommu move
  intel-iommu: Export a flag indicating that the IOMMU is used for iGFX.
  intel-iommu: Workaround IOTLB hang on Ironlake GPU
  intel-iommu: Fix AB-BA lockdep report

Showing 2 changed files Side-by-side Diff

... ... @@ -3313,7 +3313,7 @@
3313 3313 L: iommu@lists.linux-foundation.org
3314 3314 T: git git://git.infradead.org/iommu-2.6.git
3315 3315 S: Supported
3316   -F: drivers/pci/intel-iommu.c
  3316 +F: drivers/iommu/intel-iommu.c
3317 3317 F: include/linux/intel-iommu.h
3318 3318  
3319 3319 INTEL IOP-ADMA DMA DRIVER
drivers/iommu/intel-iommu.c
... ... @@ -306,6 +306,11 @@
306 306 return (pte->val & 3) != 0;
307 307 }
308 308  
  309 +static inline bool dma_pte_superpage(struct dma_pte *pte)
  310 +{
  311 + return (pte->val & (1 << 7));
  312 +}
  313 +
309 314 static inline int first_pte_in_page(struct dma_pte *pte)
310 315 {
311 316 return !((unsigned long)pte & ~VTD_PAGE_MASK);
... ... @@ -404,6 +409,9 @@
404 409 static int intel_iommu_strict;
405 410 static int intel_iommu_superpage = 1;
406 411  
  412 +int intel_iommu_gfx_mapped;
  413 +EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
  414 +
407 415 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
408 416 static DEFINE_SPINLOCK(device_domain_lock);
409 417 static LIST_HEAD(device_domain_list);
410 418  
... ... @@ -577,17 +585,18 @@
577 585  
578 586 static void domain_update_iommu_superpage(struct dmar_domain *domain)
579 587 {
580   - int i, mask = 0xf;
  588 + struct dmar_drhd_unit *drhd;
  589 + struct intel_iommu *iommu = NULL;
  590 + int mask = 0xf;
581 591  
582 592 if (!intel_iommu_superpage) {
583 593 domain->iommu_superpage = 0;
584 594 return;
585 595 }
586 596  
587   - domain->iommu_superpage = 4; /* 1TiB */
588   -
589   - for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
590   - mask |= cap_super_page_val(g_iommus[i]->cap);
  597 + /* set iommu_superpage to the smallest common denominator */
  598 + for_each_active_iommu(iommu, drhd) {
  599 + mask &= cap_super_page_val(iommu->cap);
591 600 if (!mask) {
592 601 break;
593 602 }
594 603  
595 604  
596 605  
... ... @@ -730,29 +739,23 @@
730 739 }
731 740  
732 741 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
733   - unsigned long pfn, int large_level)
  742 + unsigned long pfn, int target_level)
734 743 {
735 744 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
736 745 struct dma_pte *parent, *pte = NULL;
737 746 int level = agaw_to_level(domain->agaw);
738   - int offset, target_level;
  747 + int offset;
739 748  
740 749 BUG_ON(!domain->pgd);
741 750 BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
742 751 parent = domain->pgd;
743 752  
744   - /* Search pte */
745   - if (!large_level)
746   - target_level = 1;
747   - else
748   - target_level = large_level;
749   -
750 753 while (level > 0) {
751 754 void *tmp_page;
752 755  
753 756 offset = pfn_level_offset(pfn, level);
754 757 pte = &parent[offset];
755   - if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
  758 + if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
756 759 break;
757 760 if (level == target_level)
758 761 break;
759 762  
... ... @@ -816,13 +819,14 @@
816 819 }
817 820  
818 821 /* clear last level pte, a tlb flush should be followed */
819   -static void dma_pte_clear_range(struct dmar_domain *domain,
  822 +static int dma_pte_clear_range(struct dmar_domain *domain,
820 823 unsigned long start_pfn,
821 824 unsigned long last_pfn)
822 825 {
823 826 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
824 827 unsigned int large_page = 1;
825 828 struct dma_pte *first_pte, *pte;
  829 + int order;
826 830  
827 831 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
828 832 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
... ... @@ -846,6 +850,9 @@
846 850 (void *)pte - (void *)first_pte);
847 851  
848 852 } while (start_pfn && start_pfn <= last_pfn);
  853 +
  854 + order = (large_page - 1) * 9;
  855 + return order;
849 856 }
850 857  
851 858 /* free page table pages. last level pte should already be cleared */
... ... @@ -3226,9 +3233,6 @@
3226 3233 }
3227 3234 }
3228 3235  
3229   - if (dmar_map_gfx)
3230   - return;
3231   -
3232 3236 for_each_drhd_unit(drhd) {
3233 3237 int i;
3234 3238 if (drhd->ignored || drhd->include_all)
3235 3239  
... ... @@ -3236,18 +3240,23 @@
3236 3240  
3237 3241 for (i = 0; i < drhd->devices_cnt; i++)
3238 3242 if (drhd->devices[i] &&
3239   - !IS_GFX_DEVICE(drhd->devices[i]))
  3243 + !IS_GFX_DEVICE(drhd->devices[i]))
3240 3244 break;
3241 3245  
3242 3246 if (i < drhd->devices_cnt)
3243 3247 continue;
3244 3248  
3245   - /* bypass IOMMU if it is just for gfx devices */
3246   - drhd->ignored = 1;
3247   - for (i = 0; i < drhd->devices_cnt; i++) {
3248   - if (!drhd->devices[i])
3249   - continue;
3250   - drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
  3249 + /* This IOMMU has *only* gfx devices. Either bypass it or
  3250 + set the gfx_mapped flag, as appropriate */
  3251 + if (dmar_map_gfx) {
  3252 + intel_iommu_gfx_mapped = 1;
  3253 + } else {
  3254 + drhd->ignored = 1;
  3255 + for (i = 0; i < drhd->devices_cnt; i++) {
  3256 + if (!drhd->devices[i])
  3257 + continue;
  3258 + drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
  3259 + }
3251 3260 }
3252 3261 }
3253 3262 }
... ... @@ -3568,6 +3577,8 @@
3568 3577 found = 1;
3569 3578 }
3570 3579  
  3580 + spin_unlock_irqrestore(&device_domain_lock, flags);
  3581 +
3571 3582 if (found == 0) {
3572 3583 unsigned long tmp_flags;
3573 3584 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
... ... @@ -3584,8 +3595,6 @@
3584 3595 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3585 3596 }
3586 3597 }
3587   -
3588   - spin_unlock_irqrestore(&device_domain_lock, flags);
3589 3598 }
3590 3599  
3591 3600 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
... ... @@ -3739,6 +3748,7 @@
3739 3748 vm_domain_exit(dmar_domain);
3740 3749 return -ENOMEM;
3741 3750 }
  3751 + domain_update_iommu_cap(dmar_domain);
3742 3752 domain->priv = dmar_domain;
3743 3753  
3744 3754 return 0;
3745 3755  
3746 3756  
... ... @@ -3864,14 +3874,15 @@
3864 3874 {
3865 3875 struct dmar_domain *dmar_domain = domain->priv;
3866 3876 size_t size = PAGE_SIZE << gfp_order;
  3877 + int order;
3867 3878  
3868   - dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
  3879 + order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
3869 3880 (iova + size - 1) >> VTD_PAGE_SHIFT);
3870 3881  
3871 3882 if (dmar_domain->max_addr == iova + size)
3872 3883 dmar_domain->max_addr = iova;
3873 3884  
3874   - return gfp_order;
  3885 + return order;
3875 3886 }
3876 3887  
3877 3888 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
... ... @@ -3950,7 +3961,11 @@
3950 3961 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
3951 3962 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
3952 3963 dmar_map_gfx = 0;
3953   - }
  3964 + } else if (dmar_map_gfx) {
  3965 + /* we have to ensure the gfx device is idle before we flush */
  3966 + printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
  3967 + intel_iommu_strict = 1;
  3968 + }
3954 3969 }
3955 3970 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
3956 3971 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);