Commit f661197e0a95ec7305e1e790d95b72a74a1c4a0f

Authored by David Miller
Committed by Linus Torvalds
1 parent b1ed88b47f

Genericizing iova.[ch]

I would like to potentially move the sparc64 IOMMU code over to using
the nice new drivers/pci/iova.[ch] code for free area management..

In order to do that we have to detach the IOMMU page size assumptions
which only really need to exist in the intel-iommu.[ch] code.

This patch attempts to implement that.

[akpm@linux-foundation.org: build fix]
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 5 changed files with 23 additions and 20 deletions Side-by-side Diff

... ... @@ -26,6 +26,7 @@
26 26 #include <linux/pci.h>
27 27 #include <linux/dmar.h>
28 28 #include "iova.h"
  29 +#include "intel-iommu.h"
29 30  
30 31 #undef PREFIX
31 32 #define PREFIX "DMAR:"
drivers/pci/intel-iommu.c
... ... @@ -1088,7 +1088,7 @@
1088 1088 int i;
1089 1089 u64 addr, size;
1090 1090  
1091   - init_iova_domain(&reserved_iova_list);
  1091 + init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
1092 1092  
1093 1093 /* IOAPIC ranges shouldn't be accessed by DMA */
1094 1094 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
... ... @@ -1142,7 +1142,7 @@
1142 1142 int adjust_width, agaw;
1143 1143 unsigned long sagaw;
1144 1144  
1145   - init_iova_domain(&domain->iovad);
  1145 + init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1146 1146 spin_lock_init(&domain->mapping_lock);
1147 1147  
1148 1148 domain_reserve_special_ranges(domain);
drivers/pci/intel-iommu.h
... ... @@ -23,8 +23,22 @@
23 23  
24 24 #include <linux/types.h>
25 25 #include <linux/msi.h>
  26 +#include <linux/sysdev.h>
26 27 #include "iova.h"
27 28 #include <linux/io.h>
  29 +
  30 +/*
  31 + * We need a fixed PAGE_SIZE of 4K irrespective of
  32 + * arch PAGE_SIZE for IOMMU page tables.
  33 + */
  34 +#define PAGE_SHIFT_4K (12)
  35 +#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
  36 +#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
  37 +#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
  38 +
  39 +#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
  40 +#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
  41 +#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
28 42  
29 43 /*
30 44 * Intel IOMMU register specification per version 1.0 public spec.
... ... @@ -9,19 +9,19 @@
9 9 #include "iova.h"
10 10  
11 11 void
12   -init_iova_domain(struct iova_domain *iovad)
  12 +init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
13 13 {
14 14 spin_lock_init(&iovad->iova_alloc_lock);
15 15 spin_lock_init(&iovad->iova_rbtree_lock);
16 16 iovad->rbroot = RB_ROOT;
17 17 iovad->cached32_node = NULL;
18   -
  18 + iovad->dma_32bit_pfn = pfn_32bit;
19 19 }
20 20  
21 21 static struct rb_node *
22 22 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
23 23 {
24   - if ((*limit_pfn != DMA_32BIT_PFN) ||
  24 + if ((*limit_pfn != iovad->dma_32bit_pfn) ||
25 25 (iovad->cached32_node == NULL))
26 26 return rb_last(&iovad->rbroot);
27 27 else {
... ... @@ -37,7 +37,7 @@
37 37 __cached_rbnode_insert_update(struct iova_domain *iovad,
38 38 unsigned long limit_pfn, struct iova *new)
39 39 {
40   - if (limit_pfn != DMA_32BIT_PFN)
  40 + if (limit_pfn != iovad->dma_32bit_pfn)
41 41 return;
42 42 iovad->cached32_node = &new->node;
43 43 }
... ... @@ -15,22 +15,9 @@
15 15 #include <linux/rbtree.h>
16 16 #include <linux/dma-mapping.h>
17 17  
18   -/*
19   - * We need a fixed PAGE_SIZE of 4K irrespective of
20   - * arch PAGE_SIZE for IOMMU page tables.
21   - */
22   -#define PAGE_SHIFT_4K (12)
23   -#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
24   -#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
25   -#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
26   -
27 18 /* IO virtual address start page frame number */
28 19 #define IOVA_START_PFN (1)
29 20  
30   -#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
31   -#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
32   -#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
33   -
34 21 /* iova structure */
35 22 struct iova {
36 23 struct rb_node node;
... ... @@ -44,6 +31,7 @@
44 31 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
45 32 struct rb_root rbroot; /* iova domain rbtree root */
46 33 struct rb_node *cached32_node; /* Save last alloced node */
  34 + unsigned long dma_32bit_pfn;
47 35 };
48 36  
49 37 struct iova *alloc_iova_mem(void);
... ... @@ -56,7 +44,7 @@
56 44 struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
57 45 unsigned long pfn_hi);
58 46 void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
59   -void init_iova_domain(struct iova_domain *iovad);
  47 +void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
60 48 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
61 49 void put_iova_domain(struct iova_domain *iovad);
62 50