Blame view

drivers/pci/intel-iommu.c 71.3 KB
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
  /*
   * Copyright (c) 2006, Intel Corporation.
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms and conditions of the GNU General Public License,
   * version 2, as published by the Free Software Foundation.
   *
   * This program is distributed in the hope it will be useful, but WITHOUT
   * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   * more details.
   *
   * You should have received a copy of the GNU General Public License along with
   * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
   * Place - Suite 330, Boston, MA 02111-1307 USA.
   *
98bcef56c   mark gross   copyright owner a...
17
18
19
20
   * Copyright (C) 2006-2008 Intel Corporation
   * Author: Ashok Raj <ashok.raj@intel.com>
   * Author: Shaohua Li <shaohua.li@intel.com>
   * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
21
   * Author: Fenghua Yu <fenghua.yu@intel.com>
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
22
23
24
25
   */
  
  #include <linux/init.h>
  #include <linux/bitmap.h>
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
26
  #include <linux/debugfs.h>
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
27
28
29
  #include <linux/slab.h>
  #include <linux/irq.h>
  #include <linux/interrupt.h>
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
30
31
32
33
34
  #include <linux/spinlock.h>
  #include <linux/pci.h>
  #include <linux/dmar.h>
  #include <linux/dma-mapping.h>
  #include <linux/mempool.h>
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
35
  #include <linux/timer.h>
387179464   Kay, Allen M   VT-d: Changes to ...
36
  #include <linux/iova.h>
5d450806e   Joerg Roedel   VT-d: adapt domai...
37
  #include <linux/iommu.h>
387179464   Kay, Allen M   VT-d: Changes to ...
38
  #include <linux/intel-iommu.h>
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
39
  #include <asm/cacheflush.h>
46a7fa270   FUJITA Tomonori   x86: make only GA...
40
  #include <asm/iommu.h>
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
41
  #include "pci.h"
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
42
43
  #define ROOT_SIZE		VTD_PAGE_SIZE
  #define CONTEXT_SIZE		VTD_PAGE_SIZE
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
44
45
46
47
48
49
50
51
  #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
  #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
  
  #define IOAPIC_RANGE_START	(0xfee00000)
  #define IOAPIC_RANGE_END	(0xfeefffff)
  #define IOVA_START_ADDR		(0x1000)
  
  #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
52
  #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
f27be03b2   Mark McLoughlin   intel-iommu: move...
53
54
55
  #define IOVA_PFN(addr)		((addr) >> PAGE_SHIFT)
  #define DMA_32BIT_PFN		IOVA_PFN(DMA_32BIT_MASK)
  #define DMA_64BIT_PFN		IOVA_PFN(DMA_64BIT_MASK)
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
56

d9630fe94   Weidong Han   Add global iommu ...
57
58
  /* global iommu list, set NULL for ignored DMAR units */
  static struct intel_iommu **g_iommus;
9af88143b   David Woodhouse   iommu: fix Intel ...
59
  static int rwbf_quirk;
46b08e1a7   Mark McLoughlin   intel-iommu: move...
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
  /*
   * 0: Present
   * 1-11: Reserved
   * 12-63: Context Ptr (12 - (haw-1))
   * 64-127: Reserved
   */
  struct root_entry {
  	u64	val;
  	u64	rsvd1;
  };
  #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
  static inline bool root_present(struct root_entry *root)
  {
  	return (root->val & 1);
  }
  static inline void set_root_present(struct root_entry *root)
  {
  	root->val |= 1;
  }
  static inline void set_root_value(struct root_entry *root, unsigned long value)
  {
  	root->val |= value & VTD_PAGE_MASK;
  }
  
  static inline struct context_entry *
  get_context_addr_from_root(struct root_entry *root)
  {
  	return (struct context_entry *)
  		(root_present(root)?phys_to_virt(
  		root->val & VTD_PAGE_MASK) :
  		NULL);
  }
7a8fc25e0   Mark McLoughlin   intel-iommu: move...
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
  /*
   * low 64 bits:
   * 0: present
   * 1: fault processing disable
   * 2-3: translation type
   * 12-63: address space root
   * high 64 bits:
   * 0-2: address width
   * 3-6: aval
   * 8-23: domain id
   */
  struct context_entry {
  	u64 lo;
  	u64 hi;
  };
c07e7d217   Mark McLoughlin   intel-iommu: triv...
107
108
109
110
111
112
113
114
115
116
117
118
119
120
  
  static inline bool context_present(struct context_entry *context)
  {
  	return (context->lo & 1);
  }
  static inline void context_set_present(struct context_entry *context)
  {
  	context->lo |= 1;
  }
  
  static inline void context_set_fault_enable(struct context_entry *context)
  {
  	context->lo &= (((u64)-1) << 2) | 1;
  }
7a8fc25e0   Mark McLoughlin   intel-iommu: move...
121
  #define CONTEXT_TT_MULTI_LEVEL 0
c07e7d217   Mark McLoughlin   intel-iommu: triv...
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
  
  static inline void context_set_translation_type(struct context_entry *context,
  						unsigned long value)
  {
  	context->lo &= (((u64)-1) << 4) | 3;
  	context->lo |= (value & 3) << 2;
  }
  
  static inline void context_set_address_root(struct context_entry *context,
  					    unsigned long value)
  {
  	context->lo |= value & VTD_PAGE_MASK;
  }
  
  static inline void context_set_address_width(struct context_entry *context,
  					     unsigned long value)
  {
  	context->hi |= value & 7;
  }
  
  static inline void context_set_domain_id(struct context_entry *context,
  					 unsigned long value)
  {
  	context->hi |= (value & ((1 << 16) - 1)) << 8;
  }
  
  static inline void context_clear_entry(struct context_entry *context)
  {
  	context->lo = 0;
  	context->hi = 0;
  }
7a8fc25e0   Mark McLoughlin   intel-iommu: move...
153

622ba12a4   Mark McLoughlin   intel-iommu: move...
154
155
156
157
158
159
160
161
162
163
164
  /*
   * 0: readable
   * 1: writable
   * 2-6: reserved
   * 7: super page
   * 8-11: available
   * 12-63: Host physcial address
   */
  struct dma_pte {
  	u64 val;
  };
622ba12a4   Mark McLoughlin   intel-iommu: move...
165

19c239ce3   Mark McLoughlin   intel-iommu: triv...
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
  static inline void dma_clear_pte(struct dma_pte *pte)
  {
  	pte->val = 0;
  }
  
  static inline void dma_set_pte_readable(struct dma_pte *pte)
  {
  	pte->val |= DMA_PTE_READ;
  }
  
  static inline void dma_set_pte_writable(struct dma_pte *pte)
  {
  	pte->val |= DMA_PTE_WRITE;
  }
  
  static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
  {
  	pte->val = (pte->val & ~3) | (prot & 3);
  }
  
  static inline u64 dma_pte_addr(struct dma_pte *pte)
  {
  	return (pte->val & VTD_PAGE_MASK);
  }
  
  static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
  {
  	pte->val |= (addr & VTD_PAGE_MASK);
  }
  
  static inline bool dma_pte_present(struct dma_pte *pte)
  {
  	return (pte->val & 3) != 0;
  }
622ba12a4   Mark McLoughlin   intel-iommu: move...
200

3b5410e73   Weidong Han   change P2P domain...
201
  /* devices under the same p2p bridge are owned in one domain */
cdc7b8372   Mike Day   intel-iommu: fix ...
202
  #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
3b5410e73   Weidong Han   change P2P domain...
203

1ce28feb2   Weidong Han   Add domain flag D...
204
205
206
207
  /* domain represents a virtual machine, more than one devices
   * across iommus may be owned in one domain, e.g. kvm guest.
   */
  #define DOMAIN_FLAG_VIRTUAL_MACHINE	(1 << 1)
99126f7ce   Mark McLoughlin   intel-iommu: move...
208
209
  struct dmar_domain {
  	int	id;			/* domain id */
8c11e798e   Weidong Han   iommu bitmap inst...
210
  	unsigned long iommu_bmp;	/* bitmap of iommus this domain uses*/
99126f7ce   Mark McLoughlin   intel-iommu: move...
211
212
213
214
215
216
217
218
219
220
  
  	struct list_head devices; 	/* all devices' list */
  	struct iova_domain iovad;	/* iova's that belong to this domain */
  
  	struct dma_pte	*pgd;		/* virtual address */
  	spinlock_t	mapping_lock;	/* page table lock */
  	int		gaw;		/* max guest address width */
  
  	/* adjusted guest address width, 0 is level 2 30-bit */
  	int		agaw;
3b5410e73   Weidong Han   change P2P domain...
221
  	int		flags;		/* flags to find out type of domain */
8e604097d   Weidong Han   iommu coherency
222
223
  
  	int		iommu_coherency;/* indicate coherency of iommu access */
c7151a8df   Weidong Han   Add/remove domain...
224
225
  	int		iommu_count;	/* reference count of iommu */
  	spinlock_t	iommu_lock;	/* protect iommu set in domain */
fe40f1e02   Weidong Han   Check agaw is suf...
226
  	u64		max_addr;	/* maximum mapped address */
99126f7ce   Mark McLoughlin   intel-iommu: move...
227
  };
a647dacbb   Mark McLoughlin   intel-iommu: move...
228
229
230
231
232
233
234
235
236
  /* PCI domain-device relationship */
  struct device_domain_info {
  	struct list_head link;	/* link to domain siblings */
  	struct list_head global; /* link to global list */
  	u8 bus;			/* PCI bus numer */
  	u8 devfn;		/* PCI devfn number */
  	struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
  	struct dmar_domain *domain; /* pointer to domain */
  };
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
237
238
239
  static void flush_unmaps_timeout(unsigned long data);
  
  DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
80b20dd85   mark gross   PCI: pci-iommu-io...
240
241
242
243
244
245
246
247
  #define HIGH_WATER_MARK 250
  struct deferred_flush_tables {
  	int next;
  	struct iova *iova[HIGH_WATER_MARK];
  	struct dmar_domain *domain[HIGH_WATER_MARK];
  };
  
  static struct deferred_flush_tables *deferred_flush;
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
248
  /* bitmap for indexing intel_iommus */
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
249
250
251
252
253
254
255
  static int g_num_of_iommus;
  
  static DEFINE_SPINLOCK(async_umap_flush_lock);
  static LIST_HEAD(unmaps_to_do);
  
  static int timer_on;
  static long list_size;
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
256

ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
257
  static void domain_remove_dev_info(struct dmar_domain *domain);
0cd5c3c80   Kyle McMartin   x86: disable inte...
258
259
260
261
262
  #ifdef CONFIG_DMAR_DEFAULT_ON
  int dmar_disabled = 0;
  #else
  int dmar_disabled = 1;
  #endif /*CONFIG_DMAR_DEFAULT_ON*/
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
263
  static int __initdata dmar_map_gfx = 1;
7d3b03ce7   Keshavamurthy, Anil S   Intel IOMMU: Inte...
264
  static int dmar_forcedac;
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
265
  static int intel_iommu_strict;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
266
267
268
269
  
  #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
  static DEFINE_SPINLOCK(device_domain_lock);
  static LIST_HEAD(device_domain_list);
a8bcbb0de   Joerg Roedel   VT-d: register fu...
270
  static struct iommu_ops intel_iommu_ops;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
271
272
273
274
275
  static int __init intel_iommu_setup(char *str)
  {
  	if (!str)
  		return -EINVAL;
  	while (*str) {
0cd5c3c80   Kyle McMartin   x86: disable inte...
276
277
278
279
280
  		if (!strncmp(str, "on", 2)) {
  			dmar_disabled = 0;
  			printk(KERN_INFO "Intel-IOMMU: enabled
  ");
  		} else if (!strncmp(str, "off", 3)) {
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
281
  			dmar_disabled = 1;
0cd5c3c80   Kyle McMartin   x86: disable inte...
282
283
  			printk(KERN_INFO "Intel-IOMMU: disabled
  ");
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
284
285
286
287
288
  		} else if (!strncmp(str, "igfx_off", 8)) {
  			dmar_map_gfx = 0;
  			printk(KERN_INFO
  				"Intel-IOMMU: disable GFX device mapping
  ");
7d3b03ce7   Keshavamurthy, Anil S   Intel IOMMU: Inte...
289
  		} else if (!strncmp(str, "forcedac", 8)) {
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
290
  			printk(KERN_INFO
7d3b03ce7   Keshavamurthy, Anil S   Intel IOMMU: Inte...
291
292
293
  				"Intel-IOMMU: Forcing DAC for PCI devices
  ");
  			dmar_forcedac = 1;
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
294
295
296
297
298
  		} else if (!strncmp(str, "strict", 6)) {
  			printk(KERN_INFO
  				"Intel-IOMMU: disable batched IOTLB flush
  ");
  			intel_iommu_strict = 1;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
299
300
301
302
303
304
305
306
307
308
309
310
311
  		}
  
  		str += strcspn(str, ",");
  		while (*str == ',')
  			str++;
  	}
  	return 0;
  }
  __setup("intel_iommu=", intel_iommu_setup);
  
  static struct kmem_cache *iommu_domain_cache;
  static struct kmem_cache *iommu_devinfo_cache;
  static struct kmem_cache *iommu_iova_cache;
eb3fa7cb5   Keshavamurthy, Anil S   Intel IOMMU: Avoi...
312
313
314
315
316
317
318
319
320
321
322
323
  static inline void *iommu_kmem_cache_alloc(struct kmem_cache *cachep)
  {
  	unsigned int flags;
  	void *vaddr;
  
  	/* trying to avoid low memory issues */
  	flags = current->flags & PF_MEMALLOC;
  	current->flags |= PF_MEMALLOC;
  	vaddr = kmem_cache_alloc(cachep, GFP_ATOMIC);
  	current->flags &= (~PF_MEMALLOC | flags);
  	return vaddr;
  }
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
324
325
  static inline void *alloc_pgtable_page(void)
  {
eb3fa7cb5   Keshavamurthy, Anil S   Intel IOMMU: Avoi...
326
327
328
329
330
331
332
333
334
  	unsigned int flags;
  	void *vaddr;
  
  	/* trying to avoid low memory issues */
  	flags = current->flags & PF_MEMALLOC;
  	current->flags |= PF_MEMALLOC;
  	vaddr = (void *)get_zeroed_page(GFP_ATOMIC);
  	current->flags &= (~PF_MEMALLOC | flags);
  	return vaddr;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
335
336
337
338
339
340
341
342
343
  }
  
  static inline void free_pgtable_page(void *vaddr)
  {
  	free_page((unsigned long)vaddr);
  }
  
  static inline void *alloc_domain_mem(void)
  {
eb3fa7cb5   Keshavamurthy, Anil S   Intel IOMMU: Avoi...
344
  	return iommu_kmem_cache_alloc(iommu_domain_cache);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
345
  }
387179464   Kay, Allen M   VT-d: Changes to ...
346
  static void free_domain_mem(void *vaddr)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
347
348
349
350
351
352
  {
  	kmem_cache_free(iommu_domain_cache, vaddr);
  }
  
  static inline void * alloc_devinfo_mem(void)
  {
eb3fa7cb5   Keshavamurthy, Anil S   Intel IOMMU: Avoi...
353
  	return iommu_kmem_cache_alloc(iommu_devinfo_cache);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
354
355
356
357
358
359
360
361
362
  }
  
  static inline void free_devinfo_mem(void *vaddr)
  {
  	kmem_cache_free(iommu_devinfo_cache, vaddr);
  }
  
  struct iova *alloc_iova_mem(void)
  {
eb3fa7cb5   Keshavamurthy, Anil S   Intel IOMMU: Avoi...
363
  	return iommu_kmem_cache_alloc(iommu_iova_cache);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
364
365
366
367
368
369
  }
  
  void free_iova_mem(struct iova *iova)
  {
  	kmem_cache_free(iommu_iova_cache, iova);
  }
1b5736839   Weidong Han   calculate agaw fo...
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
  
  static inline int width_to_agaw(int width);
  
  /* calculate agaw for each iommu.
   * "SAGAW" may be different across iommus, use a default agaw, and
   * get a supported less agaw for iommus that don't support the default agaw.
   */
  int iommu_calculate_agaw(struct intel_iommu *iommu)
  {
  	unsigned long sagaw;
  	int agaw = -1;
  
  	sagaw = cap_sagaw(iommu->cap);
  	for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
  	     agaw >= 0; agaw--) {
  		if (test_bit(agaw, &sagaw))
  			break;
  	}
  
  	return agaw;
  }
8c11e798e   Weidong Han   iommu bitmap inst...
391
392
393
394
  /* in native case, each domain is related to only one iommu */
  static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
  {
  	int iommu_id;
1ce28feb2   Weidong Han   Add domain flag D...
395
  	BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
8c11e798e   Weidong Han   iommu bitmap inst...
396
397
398
399
400
401
  	iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
  	if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
  		return NULL;
  
  	return g_iommus[iommu_id];
  }
8e604097d   Weidong Han   iommu coherency
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
  /* "Coherency" capability may be different across iommus */
  static void domain_update_iommu_coherency(struct dmar_domain *domain)
  {
  	int i;
  
  	domain->iommu_coherency = 1;
  
  	i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
  	for (; i < g_num_of_iommus; ) {
  		if (!ecap_coherent(g_iommus[i]->ecap)) {
  			domain->iommu_coherency = 0;
  			break;
  		}
  		i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
  	}
  }
c7151a8df   Weidong Han   Add/remove domain...
418
419
420
421
422
423
424
425
426
427
  static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
  {
  	struct dmar_drhd_unit *drhd = NULL;
  	int i;
  
  	for_each_drhd_unit(drhd) {
  		if (drhd->ignored)
  			continue;
  
  		for (i = 0; i < drhd->devices_cnt; i++)
288e4877f   Dirk Hohndel   Prevent oops at b...
428
429
  			if (drhd->devices[i] &&
  			    drhd->devices[i]->bus->number == bus &&
c7151a8df   Weidong Han   Add/remove domain...
430
431
432
433
434
435
436
437
438
  			    drhd->devices[i]->devfn == devfn)
  				return drhd->iommu;
  
  		if (drhd->include_all)
  			return drhd->iommu;
  	}
  
  	return NULL;
  }
5331fe6f5   Weidong Han   Add domain_flush_...
439
440
441
442
443
444
  static void domain_flush_cache(struct dmar_domain *domain,
  			       void *addr, int size)
  {
  	if (!domain->iommu_coherency)
  		clflush_cache_range(addr, size);
  }
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
  /* Gets context entry for a given bus and devfn */
  static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
  		u8 bus, u8 devfn)
  {
  	struct root_entry *root;
  	struct context_entry *context;
  	unsigned long phy_addr;
  	unsigned long flags;
  
  	spin_lock_irqsave(&iommu->lock, flags);
  	root = &iommu->root_entry[bus];
  	context = get_context_addr_from_root(root);
  	if (!context) {
  		context = (struct context_entry *)alloc_pgtable_page();
  		if (!context) {
  			spin_unlock_irqrestore(&iommu->lock, flags);
  			return NULL;
  		}
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
463
  		__iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
  		phy_addr = virt_to_phys((void *)context);
  		set_root_value(root, phy_addr);
  		set_root_present(root);
  		__iommu_flush_cache(iommu, root, sizeof(*root));
  	}
  	spin_unlock_irqrestore(&iommu->lock, flags);
  	return &context[devfn];
  }
  
  static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
  {
  	struct root_entry *root;
  	struct context_entry *context;
  	int ret;
  	unsigned long flags;
  
  	spin_lock_irqsave(&iommu->lock, flags);
  	root = &iommu->root_entry[bus];
  	context = get_context_addr_from_root(root);
  	if (!context) {
  		ret = 0;
  		goto out;
  	}
c07e7d217   Mark McLoughlin   intel-iommu: triv...
487
  	ret = context_present(&context[devfn]);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
  out:
  	spin_unlock_irqrestore(&iommu->lock, flags);
  	return ret;
  }
  
  static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
  {
  	struct root_entry *root;
  	struct context_entry *context;
  	unsigned long flags;
  
  	spin_lock_irqsave(&iommu->lock, flags);
  	root = &iommu->root_entry[bus];
  	context = get_context_addr_from_root(root);
  	if (context) {
c07e7d217   Mark McLoughlin   intel-iommu: triv...
503
  		context_clear_entry(&context[devfn]);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
  		__iommu_flush_cache(iommu, &context[devfn], \
  			sizeof(*context));
  	}
  	spin_unlock_irqrestore(&iommu->lock, flags);
  }
  
  static void free_context_table(struct intel_iommu *iommu)
  {
  	struct root_entry *root;
  	int i;
  	unsigned long flags;
  	struct context_entry *context;
  
  	spin_lock_irqsave(&iommu->lock, flags);
  	if (!iommu->root_entry) {
  		goto out;
  	}
  	for (i = 0; i < ROOT_ENTRY_NR; i++) {
  		root = &iommu->root_entry[i];
  		context = get_context_addr_from_root(root);
  		if (context)
  			free_pgtable_page(context);
  	}
  	free_pgtable_page(iommu->root_entry);
  	iommu->root_entry = NULL;
  out:
  	spin_unlock_irqrestore(&iommu->lock, flags);
  }
  
  /* page table handling */
  #define LEVEL_STRIDE		(9)
  #define LEVEL_MASK		(((u64)1 << LEVEL_STRIDE) - 1)
  
  static inline int agaw_to_level(int agaw)
  {
  	return agaw + 2;
  }
  
  static inline int agaw_to_width(int agaw)
  {
  	return 30 + agaw * LEVEL_STRIDE;
  
  }
  
  static inline int width_to_agaw(int width)
  {
  	return (width - 30) / LEVEL_STRIDE;
  }
  
  static inline unsigned int level_to_offset_bits(int level)
  {
  	return (12 + (level - 1) * LEVEL_STRIDE);
  }
  
  static inline int address_level_offset(u64 addr, int level)
  {
  	return ((addr >> level_to_offset_bits(level)) & LEVEL_MASK);
  }
  
  static inline u64 level_mask(int level)
  {
  	return ((u64)-1 << level_to_offset_bits(level));
  }
  
  static inline u64 level_size(int level)
  {
  	return ((u64)1 << level_to_offset_bits(level));
  }
  
  static inline u64 align_to_level(u64 addr, int level)
  {
  	return ((addr + level_size(level) - 1) & level_mask(level));
  }
  
  static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
  {
  	int addr_width = agaw_to_width(domain->agaw);
  	struct dma_pte *parent, *pte = NULL;
  	int level = agaw_to_level(domain->agaw);
  	int offset;
  	unsigned long flags;
  
  	BUG_ON(!domain->pgd);
  
  	addr &= (((u64)1) << addr_width) - 1;
  	parent = domain->pgd;
  
  	spin_lock_irqsave(&domain->mapping_lock, flags);
  	while (level > 0) {
  		void *tmp_page;
  
  		offset = address_level_offset(addr, level);
  		pte = &parent[offset];
  		if (level == 1)
  			break;
19c239ce3   Mark McLoughlin   intel-iommu: triv...
599
  		if (!dma_pte_present(pte)) {
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
600
601
602
603
604
605
606
  			tmp_page = alloc_pgtable_page();
  
  			if (!tmp_page) {
  				spin_unlock_irqrestore(&domain->mapping_lock,
  					flags);
  				return NULL;
  			}
5331fe6f5   Weidong Han   Add domain_flush_...
607
  			domain_flush_cache(domain, tmp_page, PAGE_SIZE);
19c239ce3   Mark McLoughlin   intel-iommu: triv...
608
  			dma_set_pte_addr(pte, virt_to_phys(tmp_page));
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
609
610
611
612
  			/*
  			 * high level table always sets r/w, last level page
  			 * table control read/write
  			 */
19c239ce3   Mark McLoughlin   intel-iommu: triv...
613
614
  			dma_set_pte_readable(pte);
  			dma_set_pte_writable(pte);
5331fe6f5   Weidong Han   Add domain_flush_...
615
  			domain_flush_cache(domain, pte, sizeof(*pte));
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
616
  		}
19c239ce3   Mark McLoughlin   intel-iommu: triv...
617
  		parent = phys_to_virt(dma_pte_addr(pte));
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
  		level--;
  	}
  
  	spin_unlock_irqrestore(&domain->mapping_lock, flags);
  	return pte;
  }
  
  /* return address's pte at specific level */
  static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
  		int level)
  {
  	struct dma_pte *parent, *pte = NULL;
  	int total = agaw_to_level(domain->agaw);
  	int offset;
  
  	parent = domain->pgd;
  	while (level <= total) {
  		offset = address_level_offset(addr, total);
  		pte = &parent[offset];
  		if (level == total)
  			return pte;
19c239ce3   Mark McLoughlin   intel-iommu: triv...
639
  		if (!dma_pte_present(pte))
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
640
  			break;
19c239ce3   Mark McLoughlin   intel-iommu: triv...
641
  		parent = phys_to_virt(dma_pte_addr(pte));
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
642
643
644
645
646
647
648
649
650
651
652
653
654
655
  		total--;
  	}
  	return NULL;
  }
  
  /* clear one page's page table */
  static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
  {
  	struct dma_pte *pte = NULL;
  
  	/* get last level pte */
  	pte = dma_addr_level_pte(domain, addr, 1);
  
  	if (pte) {
19c239ce3   Mark McLoughlin   intel-iommu: triv...
656
  		dma_clear_pte(pte);
5331fe6f5   Weidong Han   Add domain_flush_...
657
  		domain_flush_cache(domain, pte, sizeof(*pte));
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
658
659
660
661
662
663
664
665
666
667
668
  	}
  }
  
  /* clear last level pte, a tlb flush should be followed */
  static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
  {
  	int addr_width = agaw_to_width(domain->agaw);
  
  	start &= (((u64)1) << addr_width) - 1;
  	end &= (((u64)1) << addr_width) - 1;
  	/* in case it's partial page */
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
669
670
  	start = PAGE_ALIGN(start);
  	end &= PAGE_MASK;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
671
672
673
674
  
  	/* we don't need lock here, nobody else touches the iova range */
  	while (start < end) {
  		dma_pte_clear_one(domain, start);
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
675
  		start += VTD_PAGE_SIZE;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
  	}
  }
  
  /* free page table pages. last level pte should already be cleared */
  static void dma_pte_free_pagetable(struct dmar_domain *domain,
  	u64 start, u64 end)
  {
  	int addr_width = agaw_to_width(domain->agaw);
  	struct dma_pte *pte;
  	int total = agaw_to_level(domain->agaw);
  	int level;
  	u64 tmp;
  
  	start &= (((u64)1) << addr_width) - 1;
  	end &= (((u64)1) << addr_width) - 1;
  
  	/* we don't need lock here, nobody else touches the iova range */
  	level = 2;
  	while (level <= total) {
  		tmp = align_to_level(start, level);
  		if (tmp >= end || (tmp + level_size(level) > end))
  			return;
  
  		while (tmp < end) {
  			pte = dma_addr_level_pte(domain, tmp, level);
  			if (pte) {
  				free_pgtable_page(
19c239ce3   Mark McLoughlin   intel-iommu: triv...
703
704
  					phys_to_virt(dma_pte_addr(pte)));
  				dma_clear_pte(pte);
5331fe6f5   Weidong Han   Add domain_flush_...
705
  				domain_flush_cache(domain, pte, sizeof(*pte));
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
  			}
  			tmp += level_size(level);
  		}
  		level++;
  	}
  	/* free pgd */
  	if (start == 0 && end >= ((((u64)1) << addr_width) - 1)) {
  		free_pgtable_page(domain->pgd);
  		domain->pgd = NULL;
  	}
  }
  
  /* iommu handling */
  static int iommu_alloc_root_entry(struct intel_iommu *iommu)
  {
  	struct root_entry *root;
  	unsigned long flags;
  
  	root = (struct root_entry *)alloc_pgtable_page();
  	if (!root)
  		return -ENOMEM;
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
727
  	__iommu_flush_cache(iommu, root, ROOT_SIZE);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
728
729
730
731
732
733
734
  
  	spin_lock_irqsave(&iommu->lock, flags);
  	iommu->root_entry = root;
  	spin_unlock_irqrestore(&iommu->lock, flags);
  
  	return 0;
  }
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
  static void iommu_set_root_entry(struct intel_iommu *iommu)
  {
  	void *addr;
  	u32 cmd, sts;
  	unsigned long flag;
  
  	addr = iommu->root_entry;
  
  	spin_lock_irqsave(&iommu->register_lock, flag);
  	dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
  
  	cmd = iommu->gcmd | DMA_GCMD_SRTP;
  	writel(cmd, iommu->reg + DMAR_GCMD_REG);
  
  	/* Make sure hardware complete it */
  	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  		readl, (sts & DMA_GSTS_RTPS), sts);
  
  	spin_unlock_irqrestore(&iommu->register_lock, flag);
  }
  
  static void iommu_flush_write_buffer(struct intel_iommu *iommu)
  {
  	u32 val;
  	unsigned long flag;
9af88143b   David Woodhouse   iommu: fix Intel ...
760
  	if (!rwbf_quirk && !cap_rwbf(iommu->cap))
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
  		return;
  	val = iommu->gcmd | DMA_GCMD_WBF;
  
  	spin_lock_irqsave(&iommu->register_lock, flag);
  	writel(val, iommu->reg + DMAR_GCMD_REG);
  
  	/* Make sure hardware complete it */
  	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  			readl, (!(val & DMA_GSTS_WBFS)), val);
  
  	spin_unlock_irqrestore(&iommu->register_lock, flag);
  }
  
  /* return value determine if we need a write buffer flush */
  static int __iommu_flush_context(struct intel_iommu *iommu,
  	u16 did, u16 source_id, u8 function_mask, u64 type,
  	int non_present_entry_flush)
  {
  	u64 val = 0;
  	unsigned long flag;
  
  	/*
  	 * In the non-present entry flush case, if hardware doesn't cache
  	 * non-present entry we do nothing and if hardware cache non-present
  	 * entry, we flush entries of domain 0 (the domain id is used to cache
  	 * any non-present entries)
  	 */
  	if (non_present_entry_flush) {
  		if (!cap_caching_mode(iommu->cap))
  			return 1;
  		else
  			did = 0;
  	}
  
  	switch (type) {
  	case DMA_CCMD_GLOBAL_INVL:
  		val = DMA_CCMD_GLOBAL_INVL;
  		break;
  	case DMA_CCMD_DOMAIN_INVL:
  		val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
  		break;
  	case DMA_CCMD_DEVICE_INVL:
  		val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
  			| DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
  		break;
  	default:
  		BUG();
  	}
  	val |= DMA_CCMD_ICC;
  
  	spin_lock_irqsave(&iommu->register_lock, flag);
  	dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
  
  	/* Make sure hardware complete it */
  	IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
  		dmar_readq, (!(val & DMA_CCMD_ICC)), val);
  
  	spin_unlock_irqrestore(&iommu->register_lock, flag);
4d235ba6c   Ameya Palande   intel-iommu: typo...
819
  	/* flush context entry will implicitly flush write buffer */
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
820
821
  	return 0;
  }
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
  /* return value determine if we need a write buffer flush */
  static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
  	u64 addr, unsigned int size_order, u64 type,
  	int non_present_entry_flush)
  {
  	int tlb_offset = ecap_iotlb_offset(iommu->ecap);
  	u64 val = 0, val_iva = 0;
  	unsigned long flag;
  
  	/*
  	 * In the non-present entry flush case, if hardware doesn't cache
  	 * non-present entry we do nothing and if hardware cache non-present
  	 * entry, we flush entries of domain 0 (the domain id is used to cache
  	 * any non-present entries)
  	 */
  	if (non_present_entry_flush) {
  		if (!cap_caching_mode(iommu->cap))
  			return 1;
  		else
  			did = 0;
  	}
  
  	switch (type) {
  	case DMA_TLB_GLOBAL_FLUSH:
  		/* global flush doesn't need set IVA_REG */
  		val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
  		break;
  	case DMA_TLB_DSI_FLUSH:
  		val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
  		break;
  	case DMA_TLB_PSI_FLUSH:
  		val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
  		/* Note: always flush non-leaf currently */
  		val_iva = size_order | addr;
  		break;
  	default:
  		BUG();
  	}
  	/* Note: set drain read/write */
  #if 0
  	/*
  	 * This is probably to be super secure.. Looks like we can
  	 * ignore it without any impact.
  	 */
  	if (cap_read_drain(iommu->cap))
  		val |= DMA_TLB_READ_DRAIN;
  #endif
  	if (cap_write_drain(iommu->cap))
  		val |= DMA_TLB_WRITE_DRAIN;
  
  	spin_lock_irqsave(&iommu->register_lock, flag);
  	/* Note: Only uses first TLB reg currently */
  	if (val_iva)
  		dmar_writeq(iommu->reg + tlb_offset, val_iva);
  	dmar_writeq(iommu->reg + tlb_offset + 8, val);
  
  	/* Make sure hardware complete it */
  	IOMMU_WAIT_OP(iommu, tlb_offset + 8,
  		dmar_readq, (!(val & DMA_TLB_IVT)), val);
  
  	spin_unlock_irqrestore(&iommu->register_lock, flag);
  
  	/* check IOTLB invalidation granularity */
  	if (DMA_TLB_IAIG(val) == 0)
  		printk(KERN_ERR"IOMMU: flush IOTLB failed
  ");
  	if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
  		pr_debug("IOMMU: tlb flush request %Lx, actual %Lx
  ",
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
891
892
  			(unsigned long long)DMA_TLB_IIRG(type),
  			(unsigned long long)DMA_TLB_IAIG(val));
4d235ba6c   Ameya Palande   intel-iommu: typo...
893
  	/* flush iotlb entry will implicitly flush write buffer */
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
894
895
  	return 0;
  }
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
896
897
898
  static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
  	u64 addr, unsigned int pages, int non_present_entry_flush)
  {
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
899
  	unsigned int mask;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
900

5b6985ce8   Fenghua Yu   intel-iommu: IA64...
901
  	BUG_ON(addr & (~VTD_PAGE_MASK));
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
902
903
904
905
  	BUG_ON(pages == 0);
  
  	/* Fallback to domain selective flush if no PSI support */
  	if (!cap_pgsel_inv(iommu->cap))
a77b67d40   Youquan Song   dmar: Use queued ...
906
907
908
  		return iommu->flush.flush_iotlb(iommu, did, 0, 0,
  						DMA_TLB_DSI_FLUSH,
  						non_present_entry_flush);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
909
910
911
912
913
  
  	/*
  	 * PSI requires page size to be 2 ^ x, and the base address is naturally
  	 * aligned to the size
  	 */
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
914
  	mask = ilog2(__roundup_pow_of_two(pages));
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
915
  	/* Fallback to domain selective flush if size is too big */
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
916
  	if (mask > cap_max_amask_val(iommu->cap))
a77b67d40   Youquan Song   dmar: Use queued ...
917
918
  		return iommu->flush.flush_iotlb(iommu, did, 0, 0,
  			DMA_TLB_DSI_FLUSH, non_present_entry_flush);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
919

a77b67d40   Youquan Song   dmar: Use queued ...
920
921
922
  	return iommu->flush.flush_iotlb(iommu, did, addr, mask,
  					DMA_TLB_PSI_FLUSH,
  					non_present_entry_flush);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
923
  }
f8bab7351   mark gross   intel-iommu: PMEN...
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
  static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
  {
  	u32 pmen;
  	unsigned long flags;
  
  	spin_lock_irqsave(&iommu->register_lock, flags);
  	pmen = readl(iommu->reg + DMAR_PMEN_REG);
  	pmen &= ~DMA_PMEN_EPM;
  	writel(pmen, iommu->reg + DMAR_PMEN_REG);
  
  	/* wait for the protected region status bit to clear */
  	IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
  		readl, !(pmen & DMA_PMEN_PRS), pmen);
  
  	spin_unlock_irqrestore(&iommu->register_lock, flags);
  }
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
  static int iommu_enable_translation(struct intel_iommu *iommu)
  {
  	u32 sts;
  	unsigned long flags;
  
  	spin_lock_irqsave(&iommu->register_lock, flags);
  	writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG);
  
  	/* Make sure hardware complete it */
  	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  		readl, (sts & DMA_GSTS_TES), sts);
  
  	iommu->gcmd |= DMA_GCMD_TE;
  	spin_unlock_irqrestore(&iommu->register_lock, flags);
  	return 0;
  }
  
  static int iommu_disable_translation(struct intel_iommu *iommu)
  {
  	u32 sts;
  	unsigned long flag;
  
  	spin_lock_irqsave(&iommu->register_lock, flag);
  	iommu->gcmd &= ~DMA_GCMD_TE;
  	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  
  	/* Make sure hardware complete it */
  	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  		readl, (!(sts & DMA_GSTS_TES)), sts);
  
  	spin_unlock_irqrestore(&iommu->register_lock, flag);
  	return 0;
  }
3460a6d9c   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
973

ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
  static int iommu_init_domains(struct intel_iommu *iommu)
  {
  	unsigned long ndomains;
  	unsigned long nlongs;
  
  	ndomains = cap_ndoms(iommu->cap);
  	pr_debug("Number of Domains supportd <%ld>
  ", ndomains);
  	nlongs = BITS_TO_LONGS(ndomains);
  
  	/* TBD: there might be 64K domains,
  	 * consider other allocation for future chip
  	 */
  	iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
  	if (!iommu->domain_ids) {
  		printk(KERN_ERR "Allocating domain id array failed
  ");
  		return -ENOMEM;
  	}
  	iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
  			GFP_KERNEL);
  	if (!iommu->domains) {
  		printk(KERN_ERR "Allocating domain array failed
  ");
  		kfree(iommu->domain_ids);
  		return -ENOMEM;
  	}
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
1001
  	spin_lock_init(&iommu->lock);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1002
1003
1004
1005
1006
1007
1008
1009
  	/*
  	 * if Caching mode is set, then invalid translations are tagged
  	 * with domainid 0. Hence we need to pre-allocate it.
  	 */
  	if (cap_caching_mode(iommu->cap))
  		set_bit(0, iommu->domain_ids);
  	return 0;
  }
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1010

ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1011
1012
  
  static void domain_exit(struct dmar_domain *domain);
5e98c4b1d   Weidong Han   Allocation and fr...
1013
  static void vm_domain_exit(struct dmar_domain *domain);
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
1014
1015
  
  void free_dmar_iommu(struct intel_iommu *iommu)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1016
1017
1018
  {
  	struct dmar_domain *domain;
  	int i;
c7151a8df   Weidong Han   Add/remove domain...
1019
  	unsigned long flags;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1020

ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1021
1022
1023
1024
  	i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
  	for (; i < cap_ndoms(iommu->cap); ) {
  		domain = iommu->domains[i];
  		clear_bit(i, iommu->domain_ids);
c7151a8df   Weidong Han   Add/remove domain...
1025
1026
  
  		spin_lock_irqsave(&domain->iommu_lock, flags);
5e98c4b1d   Weidong Han   Allocation and fr...
1027
1028
1029
1030
1031
1032
  		if (--domain->iommu_count == 0) {
  			if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
  				vm_domain_exit(domain);
  			else
  				domain_exit(domain);
  		}
c7151a8df   Weidong Han   Add/remove domain...
1033
  		spin_unlock_irqrestore(&domain->iommu_lock, flags);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
  		i = find_next_bit(iommu->domain_ids,
  			cap_ndoms(iommu->cap), i+1);
  	}
  
  	if (iommu->gcmd & DMA_GCMD_TE)
  		iommu_disable_translation(iommu);
  
  	if (iommu->irq) {
  		set_irq_data(iommu->irq, NULL);
  		/* This will mask the irq */
  		free_irq(iommu->irq, iommu);
  		destroy_irq(iommu->irq);
  	}
  
  	kfree(iommu->domains);
  	kfree(iommu->domain_ids);
d9630fe94   Weidong Han   Add global iommu ...
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
  	g_iommus[iommu->seq_id] = NULL;
  
  	/* if all iommus are freed, free g_iommus */
  	for (i = 0; i < g_num_of_iommus; i++) {
  		if (g_iommus[i])
  			break;
  	}
  
  	if (i == g_num_of_iommus)
  		kfree(g_iommus);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1060
1061
  	/* free context mapping */
  	free_context_table(iommu);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
  }
  
  static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
  {
  	unsigned long num;
  	unsigned long ndomains;
  	struct dmar_domain *domain;
  	unsigned long flags;
  
  	domain = alloc_domain_mem();
  	if (!domain)
  		return NULL;
  
  	ndomains = cap_ndoms(iommu->cap);
  
  	spin_lock_irqsave(&iommu->lock, flags);
  	num = find_first_zero_bit(iommu->domain_ids, ndomains);
  	if (num >= ndomains) {
  		spin_unlock_irqrestore(&iommu->lock, flags);
  		free_domain_mem(domain);
  		printk(KERN_ERR "IOMMU: no free domain ids
  ");
  		return NULL;
  	}
  
  	set_bit(num, iommu->domain_ids);
  	domain->id = num;
8c11e798e   Weidong Han   iommu bitmap inst...
1089
1090
  	memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
  	set_bit(iommu->seq_id, &domain->iommu_bmp);
d71a2f33a   Weidong Han   Initialize domain...
1091
  	domain->flags = 0;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1092
1093
1094
1095
1096
1097
1098
1099
1100
  	iommu->domains[num] = domain;
  	spin_unlock_irqrestore(&iommu->lock, flags);
  
  	return domain;
  }
  
  static void iommu_free_domain(struct dmar_domain *domain)
  {
  	unsigned long flags;
8c11e798e   Weidong Han   iommu bitmap inst...
1101
1102
1103
  	struct intel_iommu *iommu;
  
  	iommu = domain_get_iommu(domain);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1104

8c11e798e   Weidong Han   iommu bitmap inst...
1105
1106
1107
  	spin_lock_irqsave(&iommu->lock, flags);
  	clear_bit(domain->id, iommu->domain_ids);
  	spin_unlock_irqrestore(&iommu->lock, flags);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1108
1109
1110
  }
  
  static struct iova_domain reserved_iova_list;
8a443df40   Mark Gross   PCI: iova: lockde...
1111
1112
  static struct lock_class_key reserved_alloc_key;
  static struct lock_class_key reserved_rbtree_key;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1113
1114
1115
1116
1117
1118
1119
  
  static void dmar_init_reserved_ranges(void)
  {
  	struct pci_dev *pdev = NULL;
  	struct iova *iova;
  	int i;
  	u64 addr, size;
f661197e0   David Miller   Genericizing iova...
1120
  	init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1121

8a443df40   Mark Gross   PCI: iova: lockde...
1122
1123
1124
1125
  	lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
  		&reserved_alloc_key);
  	lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
  		&reserved_rbtree_key);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
  	/* IOAPIC ranges shouldn't be accessed by DMA */
  	iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
  		IOVA_PFN(IOAPIC_RANGE_END));
  	if (!iova)
  		printk(KERN_ERR "Reserve IOAPIC range failed
  ");
  
  	/* Reserve all PCI MMIO to avoid peer-to-peer access */
  	for_each_pci_dev(pdev) {
  		struct resource *r;
  
  		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
  			r = &pdev->resource[i];
  			if (!r->flags || !(r->flags & IORESOURCE_MEM))
  				continue;
  			addr = r->start;
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
1142
  			addr &= PAGE_MASK;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1143
  			size = r->end - addr;
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
1144
  			size = PAGE_ALIGN(size);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
  			iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
  				IOVA_PFN(size + addr) - 1);
  			if (!iova)
  				printk(KERN_ERR "Reserve iova failed
  ");
  		}
  	}
  
  }
  
  static void domain_reserve_special_ranges(struct dmar_domain *domain)
  {
  	copy_reserved_iova(&reserved_iova_list, &domain->iovad);
  }
  
  static inline int guestwidth_to_adjustwidth(int gaw)
  {
  	int agaw;
  	int r = (gaw - 12) % 9;
  
  	if (r == 0)
  		agaw = gaw;
  	else
  		agaw = gaw + 9 - r;
  	if (agaw > 64)
  		agaw = 64;
  	return agaw;
  }
  
  static int domain_init(struct dmar_domain *domain, int guest_width)
  {
  	struct intel_iommu *iommu;
  	int adjust_width, agaw;
  	unsigned long sagaw;
f661197e0   David Miller   Genericizing iova...
1179
  	init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1180
  	spin_lock_init(&domain->mapping_lock);
c7151a8df   Weidong Han   Add/remove domain...
1181
  	spin_lock_init(&domain->iommu_lock);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1182
1183
1184
1185
  
  	domain_reserve_special_ranges(domain);
  
  	/* calculate AGAW */
8c11e798e   Weidong Han   iommu bitmap inst...
1186
  	iommu = domain_get_iommu(domain);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
  	if (guest_width > cap_mgaw(iommu->cap))
  		guest_width = cap_mgaw(iommu->cap);
  	domain->gaw = guest_width;
  	adjust_width = guestwidth_to_adjustwidth(guest_width);
  	agaw = width_to_agaw(adjust_width);
  	sagaw = cap_sagaw(iommu->cap);
  	if (!test_bit(agaw, &sagaw)) {
  		/* hardware doesn't support it, choose a bigger one */
  		pr_debug("IOMMU: hardware doesn't support agaw %d
  ", agaw);
  		agaw = find_next_bit(&sagaw, 5, agaw);
  		if (agaw >= 5)
  			return -ENODEV;
  	}
  	domain->agaw = agaw;
  	INIT_LIST_HEAD(&domain->devices);
8e604097d   Weidong Han   iommu coherency
1203
1204
1205
1206
  	if (ecap_coherent(iommu->ecap))
  		domain->iommu_coherency = 1;
  	else
  		domain->iommu_coherency = 0;
c7151a8df   Weidong Han   Add/remove domain...
1207
  	domain->iommu_count = 1;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1208
1209
1210
1211
  	/* always allocate the top pgd */
  	domain->pgd = (struct dma_pte *)alloc_pgtable_page();
  	if (!domain->pgd)
  		return -ENOMEM;
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
1212
  	__iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
  	return 0;
  }
  
  static void domain_exit(struct dmar_domain *domain)
  {
  	u64 end;
  
  	/* Domain 0 is reserved, so dont process it */
  	if (!domain)
  		return;
  
  	domain_remove_dev_info(domain);
  	/* destroy iovas */
  	put_iova_domain(&domain->iovad);
  	end = DOMAIN_MAX_ADDR(domain->gaw);
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
1228
  	end = end & (~PAGE_MASK);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
  
  	/* clear ptes */
  	dma_pte_clear_range(domain, 0, end);
  
  	/* free page tables */
  	dma_pte_free_pagetable(domain, 0, end);
  
  	iommu_free_domain(domain);
  	free_domain_mem(domain);
  }
  
  static int domain_context_mapping_one(struct dmar_domain *domain,
  		u8 bus, u8 devfn)
  {
  	struct context_entry *context;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1244
  	unsigned long flags;
5331fe6f5   Weidong Han   Add domain_flush_...
1245
  	struct intel_iommu *iommu;
ea6606b02   Weidong Han   Change domain_con...
1246
1247
1248
1249
1250
  	struct dma_pte *pgd;
  	unsigned long num;
  	unsigned long ndomains;
  	int id;
  	int agaw;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1251
1252
1253
1254
1255
  
  	pr_debug("Set context mapping for %02x:%02x.%d
  ",
  		bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  	BUG_ON(!domain->pgd);
5331fe6f5   Weidong Han   Add domain_flush_...
1256
1257
1258
1259
  
  	iommu = device_to_iommu(bus, devfn);
  	if (!iommu)
  		return -ENODEV;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1260
1261
1262
1263
  	context = device_to_context_entry(iommu, bus, devfn);
  	if (!context)
  		return -ENOMEM;
  	spin_lock_irqsave(&iommu->lock, flags);
c07e7d217   Mark McLoughlin   intel-iommu: triv...
1264
  	if (context_present(context)) {
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1265
1266
1267
  		spin_unlock_irqrestore(&iommu->lock, flags);
  		return 0;
  	}
ea6606b02   Weidong Han   Change domain_con...
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
  	id = domain->id;
  	pgd = domain->pgd;
  
  	if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
  		int found = 0;
  
  		/* find an available domain id for this device in iommu */
  		ndomains = cap_ndoms(iommu->cap);
  		num = find_first_bit(iommu->domain_ids, ndomains);
  		for (; num < ndomains; ) {
  			if (iommu->domains[num] == domain) {
  				id = num;
  				found = 1;
  				break;
  			}
  			num = find_next_bit(iommu->domain_ids,
  					    cap_ndoms(iommu->cap), num+1);
  		}
  
  		if (found == 0) {
  			num = find_first_zero_bit(iommu->domain_ids, ndomains);
  			if (num >= ndomains) {
  				spin_unlock_irqrestore(&iommu->lock, flags);
  				printk(KERN_ERR "IOMMU: no free domain ids
  ");
  				return -EFAULT;
  			}
  
  			set_bit(num, iommu->domain_ids);
  			iommu->domains[num] = domain;
  			id = num;
  		}
  
  		/* Skip top levels of page tables for
  		 * iommu which has less agaw than default.
  		 */
  		for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
  			pgd = phys_to_virt(dma_pte_addr(pgd));
  			if (!dma_pte_present(pgd)) {
  				spin_unlock_irqrestore(&iommu->lock, flags);
  				return -ENOMEM;
  			}
  		}
  	}
  
  	context_set_domain_id(context, id);
  	context_set_address_width(context, iommu->agaw);
  	context_set_address_root(context, virt_to_phys(pgd));
c07e7d217   Mark McLoughlin   intel-iommu: triv...
1316
1317
1318
  	context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
  	context_set_fault_enable(context);
  	context_set_present(context);
5331fe6f5   Weidong Han   Add domain_flush_...
1319
  	domain_flush_cache(domain, context, sizeof(*context));
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1320
1321
  
  	/* it's a non-present to present mapping */
a77b67d40   Youquan Song   dmar: Use queued ...
1322
1323
1324
  	if (iommu->flush.flush_context(iommu, domain->id,
  		(((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
  		DMA_CCMD_DEVICE_INVL, 1))
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1325
1326
  		iommu_flush_write_buffer(iommu);
  	else
a77b67d40   Youquan Song   dmar: Use queued ...
1327
  		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1328
  	spin_unlock_irqrestore(&iommu->lock, flags);
c7151a8df   Weidong Han   Add/remove domain...
1329
1330
1331
1332
1333
1334
1335
  
  	spin_lock_irqsave(&domain->iommu_lock, flags);
  	if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
  		domain->iommu_count++;
  		domain_update_iommu_coherency(domain);
  	}
  	spin_unlock_irqrestore(&domain->iommu_lock, flags);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
  	return 0;
  }
  
  static int
  domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
  {
  	int ret;
  	struct pci_dev *tmp, *parent;
  
  	ret = domain_context_mapping_one(domain, pdev->bus->number,
  		pdev->devfn);
  	if (ret)
  		return ret;
  
  	/* dependent device mapping */
  	tmp = pci_find_upstream_pcie_bridge(pdev);
  	if (!tmp)
  		return 0;
  	/* Secondary interface's bus number and devfn 0 */
  	parent = pdev->bus->self;
  	while (parent != tmp) {
  		ret = domain_context_mapping_one(domain, parent->bus->number,
  			parent->devfn);
  		if (ret)
  			return ret;
  		parent = parent->bus->self;
  	}
  	if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
  		return domain_context_mapping_one(domain,
  			tmp->subordinate->number, 0);
  	else /* this is a legacy PCI bridge */
  		return domain_context_mapping_one(domain,
  			tmp->bus->number, tmp->devfn);
  }
5331fe6f5   Weidong Han   Add domain_flush_...
1370
  static int domain_context_mapped(struct pci_dev *pdev)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1371
1372
1373
  {
  	int ret;
  	struct pci_dev *tmp, *parent;
5331fe6f5   Weidong Han   Add domain_flush_...
1374
1375
1376
1377
1378
  	struct intel_iommu *iommu;
  
  	iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
  	if (!iommu)
  		return -ENODEV;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1379

8c11e798e   Weidong Han   iommu bitmap inst...
1380
  	ret = device_context_mapped(iommu,
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
  		pdev->bus->number, pdev->devfn);
  	if (!ret)
  		return ret;
  	/* dependent device mapping */
  	tmp = pci_find_upstream_pcie_bridge(pdev);
  	if (!tmp)
  		return ret;
  	/* Secondary interface's bus number and devfn 0 */
  	parent = pdev->bus->self;
  	while (parent != tmp) {
8c11e798e   Weidong Han   iommu bitmap inst...
1391
  		ret = device_context_mapped(iommu, parent->bus->number,
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1392
1393
1394
1395
1396
1397
  			parent->devfn);
  		if (!ret)
  			return ret;
  		parent = parent->bus->self;
  	}
  	if (tmp->is_pcie)
8c11e798e   Weidong Han   iommu bitmap inst...
1398
  		return device_context_mapped(iommu,
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1399
1400
  			tmp->subordinate->number, 0);
  	else
8c11e798e   Weidong Han   iommu bitmap inst...
1401
  		return device_context_mapped(iommu,
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
  			tmp->bus->number, tmp->devfn);
  }
  
  static int
  domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
  			u64 hpa, size_t size, int prot)
  {
  	u64 start_pfn, end_pfn;
  	struct dma_pte *pte;
  	int index;
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
1412
1413
1414
  	int addr_width = agaw_to_width(domain->agaw);
  
  	hpa &= (((u64)1) << addr_width) - 1;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1415
1416
1417
  
  	if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
  		return -EINVAL;
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
1418
1419
1420
  	iova &= PAGE_MASK;
  	start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
  	end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1421
1422
  	index = 0;
  	while (start_pfn < end_pfn) {
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
1423
  		pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1424
1425
1426
1427
1428
  		if (!pte)
  			return -ENOMEM;
  		/* We don't need lock here, nobody else
  		 * touches the iova range
  		 */
19c239ce3   Mark McLoughlin   intel-iommu: triv...
1429
1430
1431
  		BUG_ON(dma_pte_addr(pte));
  		dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
  		dma_set_pte_prot(pte, prot);
5331fe6f5   Weidong Han   Add domain_flush_...
1432
  		domain_flush_cache(domain, pte, sizeof(*pte));
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1433
1434
1435
1436
1437
  		start_pfn++;
  		index++;
  	}
  	return 0;
  }
c7151a8df   Weidong Han   Add/remove domain...
1438
  static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1439
  {
c7151a8df   Weidong Han   Add/remove domain...
1440
1441
  	if (!iommu)
  		return;
8c11e798e   Weidong Han   iommu bitmap inst...
1442
1443
1444
  
  	clear_context_table(iommu, bus, devfn);
  	iommu->flush.flush_context(iommu, 0, 0, 0,
a77b67d40   Youquan Song   dmar: Use queued ...
1445
  					   DMA_CCMD_GLOBAL_INVL, 0);
8c11e798e   Weidong Han   iommu bitmap inst...
1446
  	iommu->flush.flush_iotlb(iommu, 0, 0, 0,
a77b67d40   Youquan Song   dmar: Use queued ...
1447
  					 DMA_TLB_GLOBAL_FLUSH, 0);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1448
1449
1450
1451
1452
1453
  }
  
  static void domain_remove_dev_info(struct dmar_domain *domain)
  {
  	struct device_domain_info *info;
  	unsigned long flags;
c7151a8df   Weidong Han   Add/remove domain...
1454
  	struct intel_iommu *iommu;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1455
1456
1457
1458
1459
1460
1461
1462
  
  	spin_lock_irqsave(&device_domain_lock, flags);
  	while (!list_empty(&domain->devices)) {
  		info = list_entry(domain->devices.next,
  			struct device_domain_info, link);
  		list_del(&info->link);
  		list_del(&info->global);
  		if (info->dev)
358dd8ac5   Keshavamurthy, Anil S   intel-iommu: fix ...
1463
  			info->dev->dev.archdata.iommu = NULL;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1464
  		spin_unlock_irqrestore(&device_domain_lock, flags);
c7151a8df   Weidong Han   Add/remove domain...
1465
1466
  		iommu = device_to_iommu(info->bus, info->devfn);
  		iommu_detach_dev(iommu, info->bus, info->devfn);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1467
1468
1469
1470
1471
1472
1473
1474
1475
  		free_devinfo_mem(info);
  
  		spin_lock_irqsave(&device_domain_lock, flags);
  	}
  	spin_unlock_irqrestore(&device_domain_lock, flags);
  }
  
  /*
   * find_domain
358dd8ac5   Keshavamurthy, Anil S   intel-iommu: fix ...
1476
   * Note: we use struct pci_dev->dev.archdata.iommu stores the info
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1477
   */
387179464   Kay, Allen M   VT-d: Changes to ...
1478
  static struct dmar_domain *
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1479
1480
1481
1482
1483
  find_domain(struct pci_dev *pdev)
  {
  	struct device_domain_info *info;
  
  	/* No lock here, assumes no domain exit in normal case */
358dd8ac5   Keshavamurthy, Anil S   intel-iommu: fix ...
1484
  	info = pdev->dev.archdata.iommu;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1485
1486
1487
1488
  	if (info)
  		return info->domain;
  	return NULL;
  }
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
  /* domain is initialized */
  static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
  {
  	struct dmar_domain *domain, *found = NULL;
  	struct intel_iommu *iommu;
  	struct dmar_drhd_unit *drhd;
  	struct device_domain_info *info, *tmp;
  	struct pci_dev *dev_tmp;
  	unsigned long flags;
  	int bus = 0, devfn = 0;
  
  	domain = find_domain(pdev);
  	if (domain)
  		return domain;
  
  	dev_tmp = pci_find_upstream_pcie_bridge(pdev);
  	if (dev_tmp) {
  		if (dev_tmp->is_pcie) {
  			bus = dev_tmp->subordinate->number;
  			devfn = 0;
  		} else {
  			bus = dev_tmp->bus->number;
  			devfn = dev_tmp->devfn;
  		}
  		spin_lock_irqsave(&device_domain_lock, flags);
  		list_for_each_entry(info, &device_domain_list, global) {
  			if (info->bus == bus && info->devfn == devfn) {
  				found = info->domain;
  				break;
  			}
  		}
  		spin_unlock_irqrestore(&device_domain_lock, flags);
  		/* pcie-pci bridge already has a domain, uses it */
  		if (found) {
  			domain = found;
  			goto found_domain;
  		}
  	}
  
  	/* Allocate new domain for the device */
  	drhd = dmar_find_matched_drhd_unit(pdev);
  	if (!drhd) {
  		printk(KERN_ERR "IOMMU: can't find DMAR for device %s
  ",
  			pci_name(pdev));
  		return NULL;
  	}
  	iommu = drhd->iommu;
  
  	domain = iommu_alloc_domain(iommu);
  	if (!domain)
  		goto error;
  
  	if (domain_init(domain, gaw)) {
  		domain_exit(domain);
  		goto error;
  	}
  
  	/* register pcie-to-pci device */
  	if (dev_tmp) {
  		info = alloc_devinfo_mem();
  		if (!info) {
  			domain_exit(domain);
  			goto error;
  		}
  		info->bus = bus;
  		info->devfn = devfn;
  		info->dev = NULL;
  		info->domain = domain;
  		/* This domain is shared by devices under p2p bridge */
3b5410e73   Weidong Han   change P2P domain...
1559
  		domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
  
  		/* pcie-to-pci bridge already has a domain, uses it */
  		found = NULL;
  		spin_lock_irqsave(&device_domain_lock, flags);
  		list_for_each_entry(tmp, &device_domain_list, global) {
  			if (tmp->bus == bus && tmp->devfn == devfn) {
  				found = tmp->domain;
  				break;
  			}
  		}
  		if (found) {
  			free_devinfo_mem(info);
  			domain_exit(domain);
  			domain = found;
  		} else {
  			list_add(&info->link, &domain->devices);
  			list_add(&info->global, &device_domain_list);
  		}
  		spin_unlock_irqrestore(&device_domain_lock, flags);
  	}
  
  found_domain:
  	info = alloc_devinfo_mem();
  	if (!info)
  		goto error;
  	info->bus = pdev->bus->number;
  	info->devfn = pdev->devfn;
  	info->dev = pdev;
  	info->domain = domain;
  	spin_lock_irqsave(&device_domain_lock, flags);
  	/* somebody is fast */
  	found = find_domain(pdev);
  	if (found != NULL) {
  		spin_unlock_irqrestore(&device_domain_lock, flags);
  		if (found != domain) {
  			domain_exit(domain);
  			domain = found;
  		}
  		free_devinfo_mem(info);
  		return domain;
  	}
  	list_add(&info->link, &domain->devices);
  	list_add(&info->global, &device_domain_list);
358dd8ac5   Keshavamurthy, Anil S   intel-iommu: fix ...
1603
  	pdev->dev.archdata.iommu = info;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1604
1605
1606
1607
1608
1609
  	spin_unlock_irqrestore(&device_domain_lock, flags);
  	return domain;
  error:
  	/* recheck it here, maybe others set it */
  	return find_domain(pdev);
  }
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
1610
1611
1612
  static int iommu_prepare_identity_map(struct pci_dev *pdev,
  				      unsigned long long start,
  				      unsigned long long end)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1613
1614
1615
  {
  	struct dmar_domain *domain;
  	unsigned long size;
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
1616
  	unsigned long long base;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
  	int ret;
  
  	printk(KERN_INFO
  		"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]
  ",
  		pci_name(pdev), start, end);
  	/* page table init */
  	domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
  	if (!domain)
  		return -ENOMEM;
  
  	/* The address might not be aligned */
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
1629
  	base = start & PAGE_MASK;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1630
  	size = end - base;
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
1631
  	size = PAGE_ALIGN(size);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
  	if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
  			IOVA_PFN(base + size) - 1)) {
  		printk(KERN_ERR "IOMMU: reserve iova failed
  ");
  		ret = -ENOMEM;
  		goto error;
  	}
  
  	pr_debug("Mapping reserved region %lx@%llx for %s
  ",
  		size, base, pci_name(pdev));
  	/*
  	 * RMRR range might have overlap with physical memory range,
  	 * clear it first
  	 */
  	dma_pte_clear_range(domain, base, base + size);
  
  	ret = domain_page_mapping(domain, base, base, size,
  		DMA_PTE_READ|DMA_PTE_WRITE);
  	if (ret)
  		goto error;
  
  	/* context entry init */
  	ret = domain_context_mapping(domain, pdev);
  	if (!ret)
  		return 0;
  error:
  	domain_exit(domain);
  	return ret;
  
  }
  
  static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
  	struct pci_dev *pdev)
  {
358dd8ac5   Keshavamurthy, Anil S   intel-iommu: fix ...
1667
  	if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1668
1669
1670
1671
  		return 0;
  	return iommu_prepare_identity_map(pdev, rmrr->base_address,
  		rmrr->end_address + 1);
  }
e820482cd   Keshavamurthy, Anil S   Intel IOMMU: Iomm...
1672
  #ifdef CONFIG_DMAR_GFX_WA
d52d53b8a   Yinghai Lu   RFC x86: try to r...
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
  struct iommu_prepare_data {
  	struct pci_dev *pdev;
  	int ret;
  };
  
  static int __init iommu_prepare_work_fn(unsigned long start_pfn,
  					 unsigned long end_pfn, void *datax)
  {
  	struct iommu_prepare_data *data;
  
  	data = (struct iommu_prepare_data *)datax;
  
  	data->ret = iommu_prepare_identity_map(data->pdev,
  				start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
  	return data->ret;
  
  }
  
  static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
  {
  	int nid;
  	struct iommu_prepare_data data;
  
  	data.pdev = pdev;
  	data.ret = 0;
  
  	for_each_online_node(nid) {
  		work_with_active_regions(nid, iommu_prepare_work_fn, &data);
  		if (data.ret)
  			return data.ret;
  	}
  	return data.ret;
  }
e820482cd   Keshavamurthy, Anil S   Intel IOMMU: Iomm...
1706
1707
1708
  static void __init iommu_prepare_gfx_mapping(void)
  {
  	struct pci_dev *pdev = NULL;
e820482cd   Keshavamurthy, Anil S   Intel IOMMU: Iomm...
1709
1710
1711
  	int ret;
  
  	for_each_pci_dev(pdev) {
358dd8ac5   Keshavamurthy, Anil S   intel-iommu: fix ...
1712
  		if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
e820482cd   Keshavamurthy, Anil S   Intel IOMMU: Iomm...
1713
1714
1715
1716
1717
  				!IS_GFX_DEVICE(pdev))
  			continue;
  		printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping
  ",
  			pci_name(pdev));
d52d53b8a   Yinghai Lu   RFC x86: try to r...
1718
1719
1720
1721
  		ret = iommu_prepare_with_active_regions(pdev);
  		if (ret)
  			printk(KERN_ERR "IOMMU: mapping reserved region failed
  ");
e820482cd   Keshavamurthy, Anil S   Intel IOMMU: Iomm...
1722
1723
  	}
  }
2abd7e167   Mark McLoughlin   intel-iommu: move...
1724
1725
1726
1727
1728
  #else /* !CONFIG_DMAR_GFX_WA */
  static inline void iommu_prepare_gfx_mapping(void)
  {
  	return;
  }
e820482cd   Keshavamurthy, Anil S   Intel IOMMU: Iomm...
1729
  #endif
49a0429e5   Keshavamurthy, Anil S   Intel IOMMU: Iomm...
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
  #ifdef CONFIG_DMAR_FLOPPY_WA
  static inline void iommu_prepare_isa(void)
  {
  	struct pci_dev *pdev;
  	int ret;
  
  	pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
  	if (!pdev)
  		return;
  
  	printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC
  ");
  	ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
  
  	if (ret)
1c35b8e53   Frank Seidel   PCI: add missing ...
1745
  		printk(KERN_ERR "IOMMU: Failed to create 0-64M identity map, "
49a0429e5   Keshavamurthy, Anil S   Intel IOMMU: Iomm...
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
  			"floppy might not work
  ");
  
  }
  #else
  static inline void iommu_prepare_isa(void)
  {
  	return;
  }
  #endif /* !CONFIG_DMAR_FLPY_WA */
519a05491   Mark McLoughlin   intel-iommu: make...
1756
  static int __init init_dmars(void)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1757
1758
1759
1760
1761
  {
  	struct dmar_drhd_unit *drhd;
  	struct dmar_rmrr_unit *rmrr;
  	struct pci_dev *pdev;
  	struct intel_iommu *iommu;
9d783ba04   Suresh Siddha   x86, x2apic: enab...
1762
  	int i, ret;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1763
1764
1765
1766
1767
1768
1769
1770
  
  	/*
  	 * for each drhd
  	 *    allocate root
  	 *    initialize and program root entry to not present
  	 * endfor
  	 */
  	for_each_drhd_unit(drhd) {
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
1771
1772
1773
1774
1775
1776
1777
  		g_num_of_iommus++;
  		/*
  		 * lock not needed as this is only incremented in the single
  		 * threaded kernel __init code path all other access are read
  		 * only
  		 */
  	}
d9630fe94   Weidong Han   Add global iommu ...
1778
1779
1780
1781
1782
1783
1784
1785
  	g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
  			GFP_KERNEL);
  	if (!g_iommus) {
  		printk(KERN_ERR "Allocating global iommu array failed
  ");
  		ret = -ENOMEM;
  		goto error;
  	}
80b20dd85   mark gross   PCI: pci-iommu-io...
1786
1787
1788
  	deferred_flush = kzalloc(g_num_of_iommus *
  		sizeof(struct deferred_flush_tables), GFP_KERNEL);
  	if (!deferred_flush) {
d9630fe94   Weidong Han   Add global iommu ...
1789
  		kfree(g_iommus);
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
1790
1791
1792
  		ret = -ENOMEM;
  		goto error;
  	}
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
1793
1794
1795
  	for_each_drhd_unit(drhd) {
  		if (drhd->ignored)
  			continue;
1886e8a90   Suresh Siddha   x64, x2apic/intr-...
1796
1797
  
  		iommu = drhd->iommu;
d9630fe94   Weidong Han   Add global iommu ...
1798
  		g_iommus[iommu->seq_id] = iommu;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1799

e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
1800
1801
1802
  		ret = iommu_init_domains(iommu);
  		if (ret)
  			goto error;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
  		/*
  		 * TBD:
  		 * we could share the same root & context tables
  		 * amoung all IOMMU's. Need to Split it later.
  		 */
  		ret = iommu_alloc_root_entry(iommu);
  		if (ret) {
  			printk(KERN_ERR "IOMMU: allocate root entry failed
  ");
  			goto error;
  		}
  	}
1531a6a6b   Suresh Siddha   x86, dmar: start ...
1815
1816
1817
  	/*
  	 * Start from the sane iommu hardware state.
  	 */
a77b67d40   Youquan Song   dmar: Use queued ...
1818
1819
1820
1821
1822
  	for_each_drhd_unit(drhd) {
  		if (drhd->ignored)
  			continue;
  
  		iommu = drhd->iommu;
1531a6a6b   Suresh Siddha   x86, dmar: start ...
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
  
  		/*
  		 * If the queued invalidation is already initialized by us
  		 * (for example, while enabling interrupt-remapping) then
  		 * we got the things already rolling from a sane state.
  		 */
  		if (iommu->qi)
  			continue;
  
  		/*
  		 * Clear any previous faults.
  		 */
  		dmar_fault(-1, iommu);
  		/*
  		 * Disable queued invalidation if supported and already enabled
  		 * before OS handover.
  		 */
  		dmar_disable_qi(iommu);
  	}
  
  	for_each_drhd_unit(drhd) {
  		if (drhd->ignored)
  			continue;
  
  		iommu = drhd->iommu;
a77b67d40   Youquan Song   dmar: Use queued ...
1848
1849
1850
1851
1852
1853
1854
1855
  		if (dmar_enable_qi(iommu)) {
  			/*
  			 * Queued Invalidate not enabled, use Register Based
  			 * Invalidate
  			 */
  			iommu->flush.flush_context = __iommu_flush_context;
  			iommu->flush.flush_iotlb = __iommu_flush_iotlb;
  			printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
b4e0f9eb8   FUJITA Tomonori   intel-iommu: fix ...
1856
1857
1858
  			       "invalidation
  ",
  			       (unsigned long long)drhd->reg_base_addr);
a77b67d40   Youquan Song   dmar: Use queued ...
1859
1860
1861
1862
  		} else {
  			iommu->flush.flush_context = qi_flush_context;
  			iommu->flush.flush_iotlb = qi_flush_iotlb;
  			printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
b4e0f9eb8   FUJITA Tomonori   intel-iommu: fix ...
1863
1864
1865
  			       "invalidation
  ",
  			       (unsigned long long)drhd->reg_base_addr);
a77b67d40   Youquan Song   dmar: Use queued ...
1866
1867
  		}
  	}
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
  	/*
  	 * For each rmrr
  	 *   for each dev attached to rmrr
  	 *   do
  	 *     locate drhd for dev, alloc domain for dev
  	 *     allocate free domain
  	 *     allocate page table entries for rmrr
  	 *     if context not allocated for bus
  	 *           allocate and init context
  	 *           set present in root table for this bus
  	 *     init context with domain, translation etc
  	 *    endfor
  	 * endfor
  	 */
  	for_each_rmrr_units(rmrr) {
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
  		for (i = 0; i < rmrr->devices_cnt; i++) {
  			pdev = rmrr->devices[i];
  			/* some BIOS lists non-exist devices in DMAR table */
  			if (!pdev)
  				continue;
  			ret = iommu_prepare_rmrr_dev(rmrr, pdev);
  			if (ret)
  				printk(KERN_ERR
  				 "IOMMU: mapping reserved region failed
  ");
  		}
  	}
e820482cd   Keshavamurthy, Anil S   Intel IOMMU: Iomm...
1895
  	iommu_prepare_gfx_mapping();
49a0429e5   Keshavamurthy, Anil S   Intel IOMMU: Iomm...
1896
  	iommu_prepare_isa();
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
  	/*
  	 * for each drhd
  	 *   enable fault log
  	 *   global invalidate context cache
  	 *   global invalidate iotlb
  	 *   enable translation
  	 */
  	for_each_drhd_unit(drhd) {
  		if (drhd->ignored)
  			continue;
  		iommu = drhd->iommu;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1908
1909
  
  		iommu_flush_write_buffer(iommu);
3460a6d9c   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
1910
1911
1912
  		ret = dmar_set_interrupt(iommu);
  		if (ret)
  			goto error;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1913
  		iommu_set_root_entry(iommu);
a77b67d40   Youquan Song   dmar: Use queued ...
1914
1915
1916
1917
  		iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
  					   0);
  		iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
  					 0);
f8bab7351   mark gross   intel-iommu: PMEN...
1918
  		iommu_disable_protect_mem_regions(iommu);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
  		ret = iommu_enable_translation(iommu);
  		if (ret)
  			goto error;
  	}
  
  	return 0;
  error:
  	for_each_drhd_unit(drhd) {
  		if (drhd->ignored)
  			continue;
  		iommu = drhd->iommu;
  		free_iommu(iommu);
  	}
d9630fe94   Weidong Han   Add global iommu ...
1932
  	kfree(g_iommus);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1933
1934
1935
1936
1937
1938
  	return ret;
  }
  
  static inline u64 aligned_size(u64 host_addr, size_t size)
  {
  	u64 addr;
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
1939
1940
  	addr = (host_addr & (~PAGE_MASK)) + size;
  	return PAGE_ALIGN(addr);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1941
1942
1943
  }
  
  struct iova *
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
1944
  iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1945
  {
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1946
1947
1948
  	struct iova *piova;
  
  	/* Make sure it's in range */
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1949
  	end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
1950
  	if (!size || (IOVA_START_ADDR + size > end))
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1951
1952
1953
  		return NULL;
  
  	piova = alloc_iova(&domain->iovad,
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
1954
  			size >> PAGE_SHIFT, IOVA_PFN(end), 1);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1955
1956
  	return piova;
  }
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
1957
1958
  static struct iova *
  __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
bb9e6d650   FUJITA Tomonori   intel-iommu: use ...
1959
  		   size_t size, u64 dma_mask)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1960
  {
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1961
  	struct pci_dev *pdev = to_pci_dev(dev);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1962
  	struct iova *iova = NULL;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1963

bb9e6d650   FUJITA Tomonori   intel-iommu: use ...
1964
1965
1966
  	if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
  		iova = iommu_alloc_iova(domain, size, dma_mask);
  	else {
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1967
1968
1969
  		/*
  		 * First try to allocate an io virtual address in
  		 * DMA_32BIT_MASK and if that fails then try allocating
3609801ec   Joe Perches   PCI: Spelling fixes
1970
  		 * from higher range
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1971
  		 */
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
1972
  		iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1973
  		if (!iova)
bb9e6d650   FUJITA Tomonori   intel-iommu: use ...
1974
  			iova = iommu_alloc_iova(domain, size, dma_mask);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1975
1976
1977
1978
  	}
  
  	if (!iova) {
  		printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
  		return NULL;
  	}
  
  	return iova;
  }
  
  static struct dmar_domain *
  get_valid_domain_for_dev(struct pci_dev *pdev)
  {
  	struct dmar_domain *domain;
  	int ret;
  
  	domain = get_domain_for_dev(pdev,
  			DEFAULT_DOMAIN_ADDRESS_WIDTH);
  	if (!domain) {
  		printk(KERN_ERR
  			"Allocating domain for %s failed", pci_name(pdev));
4fe05bbcd   Al Viro   intel-iommu fixes
1996
  		return NULL;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
1997
1998
1999
  	}
  
  	/* make sure context mapping is ok */
5331fe6f5   Weidong Han   Add domain_flush_...
2000
  	if (unlikely(!domain_context_mapped(pdev))) {
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2001
  		ret = domain_context_mapping(domain, pdev);
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2002
2003
2004
2005
  		if (ret) {
  			printk(KERN_ERR
  				"Domain context map for %s failed",
  				pci_name(pdev));
4fe05bbcd   Al Viro   intel-iommu fixes
2006
  			return NULL;
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2007
  		}
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2008
  	}
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2009
2010
  	return domain;
  }
bb9e6d650   FUJITA Tomonori   intel-iommu: use ...
2011
2012
  static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
  				     size_t size, int dir, u64 dma_mask)
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2013
2014
  {
  	struct pci_dev *pdev = to_pci_dev(hwdev);
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2015
  	struct dmar_domain *domain;
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2016
  	phys_addr_t start_paddr;
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2017
2018
  	struct iova *iova;
  	int prot = 0;
6865f0d19   Ingo Molnar   intel-iommu.c: dm...
2019
  	int ret;
8c11e798e   Weidong Han   iommu bitmap inst...
2020
  	struct intel_iommu *iommu;
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2021
2022
  
  	BUG_ON(dir == DMA_NONE);
358dd8ac5   Keshavamurthy, Anil S   intel-iommu: fix ...
2023
  	if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
6865f0d19   Ingo Molnar   intel-iommu.c: dm...
2024
  		return paddr;
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2025
2026
2027
2028
  
  	domain = get_valid_domain_for_dev(pdev);
  	if (!domain)
  		return 0;
8c11e798e   Weidong Han   iommu bitmap inst...
2029
  	iommu = domain_get_iommu(domain);
6865f0d19   Ingo Molnar   intel-iommu.c: dm...
2030
  	size = aligned_size((u64)paddr, size);
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2031

bb9e6d650   FUJITA Tomonori   intel-iommu: use ...
2032
  	iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2033
2034
  	if (!iova)
  		goto error;
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2035
  	start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2036

ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2037
2038
2039
2040
2041
  	/*
  	 * Check if DMAR supports zero-length reads on write only
  	 * mappings..
  	 */
  	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
8c11e798e   Weidong Han   iommu bitmap inst...
2042
  			!cap_zlr(iommu->cap))
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2043
2044
2045
2046
  		prot |= DMA_PTE_READ;
  	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
  		prot |= DMA_PTE_WRITE;
  	/*
6865f0d19   Ingo Molnar   intel-iommu.c: dm...
2047
  	 * paddr - (paddr + size) might be partial page, we should map the whole
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2048
  	 * page.  Note: if two part of one page are separately mapped, we
6865f0d19   Ingo Molnar   intel-iommu.c: dm...
2049
  	 * might have two guest_addr mapping to the same host paddr, but this
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2050
2051
  	 * is not a big problem
  	 */
6865f0d19   Ingo Molnar   intel-iommu.c: dm...
2052
  	ret = domain_page_mapping(domain, start_paddr,
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2053
  		((u64)paddr) & PAGE_MASK, size, prot);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2054
2055
  	if (ret)
  		goto error;
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2056
  	/* it's a non-present to present mapping */
8c11e798e   Weidong Han   iommu bitmap inst...
2057
  	ret = iommu_flush_iotlb_psi(iommu, domain->id,
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2058
  			start_paddr, size >> VTD_PAGE_SHIFT, 1);
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2059
  	if (ret)
8c11e798e   Weidong Han   iommu bitmap inst...
2060
  		iommu_flush_write_buffer(iommu);
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2061

5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2062
  	return start_paddr + ((u64)paddr & (~PAGE_MASK));
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2063

ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2064
  error:
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2065
2066
  	if (iova)
  		__free_iova(&domain->iovad, iova);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2067
2068
  	printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed
  ",
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2069
  		pci_name(pdev), size, (unsigned long long)paddr, dir);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2070
2071
  	return 0;
  }
ffbbef5c0   FUJITA Tomonori   intel-iommu: add ...
2072
2073
2074
2075
  static dma_addr_t intel_map_page(struct device *dev, struct page *page,
  				 unsigned long offset, size_t size,
  				 enum dma_data_direction dir,
  				 struct dma_attrs *attrs)
bb9e6d650   FUJITA Tomonori   intel-iommu: use ...
2076
  {
ffbbef5c0   FUJITA Tomonori   intel-iommu: add ...
2077
2078
  	return __intel_map_single(dev, page_to_phys(page) + offset, size,
  				  dir, to_pci_dev(dev)->dma_mask);
bb9e6d650   FUJITA Tomonori   intel-iommu: use ...
2079
  }
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
2080
2081
  static void flush_unmaps(void)
  {
80b20dd85   mark gross   PCI: pci-iommu-io...
2082
  	int i, j;
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
2083

5e0d2a6fc   mark gross   PCI: iommu: iotlb...
2084
2085
2086
2087
  	timer_on = 0;
  
  	/* just flush them all */
  	for (i = 0; i < g_num_of_iommus; i++) {
a2bb8459f   Weidong Han   Get iommu from g_...
2088
2089
2090
  		struct intel_iommu *iommu = g_iommus[i];
  		if (!iommu)
  			continue;
c42d9f324   Suresh Siddha   x64, x2apic/intr-...
2091

a2bb8459f   Weidong Han   Get iommu from g_...
2092
  		if (deferred_flush[i].next) {
a77b67d40   Youquan Song   dmar: Use queued ...
2093
2094
  			iommu->flush.flush_iotlb(iommu, 0, 0, 0,
  						 DMA_TLB_GLOBAL_FLUSH, 0);
80b20dd85   mark gross   PCI: pci-iommu-io...
2095
2096
2097
2098
2099
2100
  			for (j = 0; j < deferred_flush[i].next; j++) {
  				__free_iova(&deferred_flush[i].domain[j]->iovad,
  						deferred_flush[i].iova[j]);
  			}
  			deferred_flush[i].next = 0;
  		}
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
2101
  	}
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
2102
  	list_size = 0;
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
2103
2104
2105
2106
  }
  
  static void flush_unmaps_timeout(unsigned long data)
  {
80b20dd85   mark gross   PCI: pci-iommu-io...
2107
2108
2109
  	unsigned long flags;
  
  	spin_lock_irqsave(&async_umap_flush_lock, flags);
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
2110
  	flush_unmaps();
80b20dd85   mark gross   PCI: pci-iommu-io...
2111
  	spin_unlock_irqrestore(&async_umap_flush_lock, flags);
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
2112
2113
2114
2115
2116
  }
  
  static void add_unmap(struct dmar_domain *dom, struct iova *iova)
  {
  	unsigned long flags;
80b20dd85   mark gross   PCI: pci-iommu-io...
2117
  	int next, iommu_id;
8c11e798e   Weidong Han   iommu bitmap inst...
2118
  	struct intel_iommu *iommu;
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
2119
2120
  
  	spin_lock_irqsave(&async_umap_flush_lock, flags);
80b20dd85   mark gross   PCI: pci-iommu-io...
2121
2122
  	if (list_size == HIGH_WATER_MARK)
  		flush_unmaps();
8c11e798e   Weidong Han   iommu bitmap inst...
2123
2124
  	iommu = domain_get_iommu(dom);
  	iommu_id = iommu->seq_id;
c42d9f324   Suresh Siddha   x64, x2apic/intr-...
2125

80b20dd85   mark gross   PCI: pci-iommu-io...
2126
2127
2128
2129
  	next = deferred_flush[iommu_id].next;
  	deferred_flush[iommu_id].domain[next] = dom;
  	deferred_flush[iommu_id].iova[next] = iova;
  	deferred_flush[iommu_id].next++;
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
2130
2131
2132
2133
2134
2135
2136
2137
  
  	if (!timer_on) {
  		mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
  		timer_on = 1;
  	}
  	list_size++;
  	spin_unlock_irqrestore(&async_umap_flush_lock, flags);
  }
ffbbef5c0   FUJITA Tomonori   intel-iommu: add ...
2138
2139
2140
  static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
  			     size_t size, enum dma_data_direction dir,
  			     struct dma_attrs *attrs)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2141
  {
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2142
  	struct pci_dev *pdev = to_pci_dev(dev);
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2143
2144
  	struct dmar_domain *domain;
  	unsigned long start_addr;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2145
  	struct iova *iova;
8c11e798e   Weidong Han   iommu bitmap inst...
2146
  	struct intel_iommu *iommu;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2147

358dd8ac5   Keshavamurthy, Anil S   intel-iommu: fix ...
2148
  	if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2149
  		return;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2150
2151
  	domain = find_domain(pdev);
  	BUG_ON(!domain);
8c11e798e   Weidong Han   iommu bitmap inst...
2152
  	iommu = domain_get_iommu(domain);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2153
  	iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2154
  	if (!iova)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2155
  		return;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2156

5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2157
  	start_addr = iova->pfn_lo << PAGE_SHIFT;
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2158
  	size = aligned_size((u64)dev_addr, size);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2159

f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2160
2161
  	pr_debug("Device %s unmapping: %lx@%llx
  ",
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2162
  		pci_name(pdev), size, (unsigned long long)start_addr);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2163

f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2164
2165
2166
2167
  	/*  clear the whole page */
  	dma_pte_clear_range(domain, start_addr, start_addr + size);
  	/* free page tables */
  	dma_pte_free_pagetable(domain, start_addr, start_addr + size);
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
2168
  	if (intel_iommu_strict) {
8c11e798e   Weidong Han   iommu bitmap inst...
2169
  		if (iommu_flush_iotlb_psi(iommu,
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2170
  			domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
8c11e798e   Weidong Han   iommu bitmap inst...
2171
  			iommu_flush_write_buffer(iommu);
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
2172
2173
2174
2175
2176
2177
2178
2179
  		/* free iova */
  		__free_iova(&domain->iovad, iova);
  	} else {
  		add_unmap(domain, iova);
  		/*
  		 * queue up the release of the unmap to save the 1/6th of the
  		 * cpu used up by the iotlb flush operation...
  		 */
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
2180
  	}
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2181
  }
d7ab5c46a   FUJITA Tomonori   intel-iommu: make...
2182
2183
  static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
  			       int dir)
ffbbef5c0   FUJITA Tomonori   intel-iommu: add ...
2184
2185
2186
  {
  	intel_unmap_page(dev, dev_addr, size, dir, NULL);
  }
d7ab5c46a   FUJITA Tomonori   intel-iommu: make...
2187
2188
  static void *intel_alloc_coherent(struct device *hwdev, size_t size,
  				  dma_addr_t *dma_handle, gfp_t flags)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2189
2190
2191
  {
  	void *vaddr;
  	int order;
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2192
  	size = PAGE_ALIGN(size);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2193
2194
2195
2196
2197
2198
2199
  	order = get_order(size);
  	flags &= ~(GFP_DMA | GFP_DMA32);
  
  	vaddr = (void *)__get_free_pages(flags, order);
  	if (!vaddr)
  		return NULL;
  	memset(vaddr, 0, size);
bb9e6d650   FUJITA Tomonori   intel-iommu: use ...
2200
2201
2202
  	*dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
  					 DMA_BIDIRECTIONAL,
  					 hwdev->coherent_dma_mask);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2203
2204
2205
2206
2207
  	if (*dma_handle)
  		return vaddr;
  	free_pages((unsigned long)vaddr, order);
  	return NULL;
  }
d7ab5c46a   FUJITA Tomonori   intel-iommu: make...
2208
2209
  static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
  				dma_addr_t dma_handle)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2210
2211
  {
  	int order;
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2212
  	size = PAGE_ALIGN(size);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2213
2214
2215
2216
2217
  	order = get_order(size);
  
  	intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
  	free_pages((unsigned long)vaddr, order);
  }
12d4d40e6   FUJITA Tomonori   intel-iommu: fix ...
2218
  #define SG_ENT_VIRT_ADDRESS(sg)	(sg_virt((sg)))
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2219

d7ab5c46a   FUJITA Tomonori   intel-iommu: make...
2220
2221
2222
  static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
  			   int nelems, enum dma_data_direction dir,
  			   struct dma_attrs *attrs)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2223
2224
2225
2226
  {
  	int i;
  	struct pci_dev *pdev = to_pci_dev(hwdev);
  	struct dmar_domain *domain;
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2227
2228
2229
2230
  	unsigned long start_addr;
  	struct iova *iova;
  	size_t size = 0;
  	void *addr;
c03ab37cb   FUJITA Tomonori   intel-iommu sg ch...
2231
  	struct scatterlist *sg;
8c11e798e   Weidong Han   iommu bitmap inst...
2232
  	struct intel_iommu *iommu;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2233

358dd8ac5   Keshavamurthy, Anil S   intel-iommu: fix ...
2234
  	if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2235
2236
2237
  		return;
  
  	domain = find_domain(pdev);
8c11e798e   Weidong Han   iommu bitmap inst...
2238
2239
2240
  	BUG_ON(!domain);
  
  	iommu = domain_get_iommu(domain);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2241

c03ab37cb   FUJITA Tomonori   intel-iommu sg ch...
2242
  	iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2243
2244
  	if (!iova)
  		return;
c03ab37cb   FUJITA Tomonori   intel-iommu sg ch...
2245
  	for_each_sg(sglist, sg, nelems, i) {
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2246
2247
2248
  		addr = SG_ENT_VIRT_ADDRESS(sg);
  		size += aligned_size((u64)addr, sg->length);
  	}
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2249
  	start_addr = iova->pfn_lo << PAGE_SHIFT;
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2250
2251
2252
2253
2254
  
  	/*  clear the whole page */
  	dma_pte_clear_range(domain, start_addr, start_addr + size);
  	/* free page tables */
  	dma_pte_free_pagetable(domain, start_addr, start_addr + size);
8c11e798e   Weidong Han   iommu bitmap inst...
2255
  	if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2256
  			size >> VTD_PAGE_SHIFT, 0))
8c11e798e   Weidong Han   iommu bitmap inst...
2257
  		iommu_flush_write_buffer(iommu);
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2258
2259
2260
  
  	/* free iova */
  	__free_iova(&domain->iovad, iova);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2261
  }
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2262
  static int intel_nontranslate_map_sg(struct device *hddev,
c03ab37cb   FUJITA Tomonori   intel-iommu sg ch...
2263
  	struct scatterlist *sglist, int nelems, int dir)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2264
2265
  {
  	int i;
c03ab37cb   FUJITA Tomonori   intel-iommu sg ch...
2266
  	struct scatterlist *sg;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2267

c03ab37cb   FUJITA Tomonori   intel-iommu sg ch...
2268
  	for_each_sg(sglist, sg, nelems, i) {
12d4d40e6   FUJITA Tomonori   intel-iommu: fix ...
2269
  		BUG_ON(!sg_page(sg));
c03ab37cb   FUJITA Tomonori   intel-iommu sg ch...
2270
2271
  		sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
  		sg->dma_length = sg->length;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2272
2273
2274
  	}
  	return nelems;
  }
d7ab5c46a   FUJITA Tomonori   intel-iommu: make...
2275
2276
  static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
  			enum dma_data_direction dir, struct dma_attrs *attrs)
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2277
2278
2279
  {
  	void *addr;
  	int i;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2280
2281
  	struct pci_dev *pdev = to_pci_dev(hwdev);
  	struct dmar_domain *domain;
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2282
2283
2284
2285
2286
  	size_t size = 0;
  	int prot = 0;
  	size_t offset = 0;
  	struct iova *iova = NULL;
  	int ret;
c03ab37cb   FUJITA Tomonori   intel-iommu sg ch...
2287
  	struct scatterlist *sg;
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2288
  	unsigned long start_addr;
8c11e798e   Weidong Han   iommu bitmap inst...
2289
  	struct intel_iommu *iommu;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2290
2291
  
  	BUG_ON(dir == DMA_NONE);
358dd8ac5   Keshavamurthy, Anil S   intel-iommu: fix ...
2292
  	if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
c03ab37cb   FUJITA Tomonori   intel-iommu sg ch...
2293
  		return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2294

f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2295
2296
2297
  	domain = get_valid_domain_for_dev(pdev);
  	if (!domain)
  		return 0;
8c11e798e   Weidong Han   iommu bitmap inst...
2298
  	iommu = domain_get_iommu(domain);
c03ab37cb   FUJITA Tomonori   intel-iommu sg ch...
2299
  	for_each_sg(sglist, sg, nelems, i) {
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2300
  		addr = SG_ENT_VIRT_ADDRESS(sg);
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2301
2302
2303
  		addr = (void *)virt_to_phys(addr);
  		size += aligned_size((u64)addr, sg->length);
  	}
bb9e6d650   FUJITA Tomonori   intel-iommu: use ...
2304
  	iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2305
  	if (!iova) {
c03ab37cb   FUJITA Tomonori   intel-iommu sg ch...
2306
  		sglist->dma_length = 0;
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2307
2308
2309
2310
2311
2312
2313
2314
  		return 0;
  	}
  
  	/*
  	 * Check if DMAR supports zero-length reads on write only
  	 * mappings..
  	 */
  	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
8c11e798e   Weidong Han   iommu bitmap inst...
2315
  			!cap_zlr(iommu->cap))
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2316
2317
2318
  		prot |= DMA_PTE_READ;
  	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
  		prot |= DMA_PTE_WRITE;
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2319
  	start_addr = iova->pfn_lo << PAGE_SHIFT;
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2320
  	offset = 0;
c03ab37cb   FUJITA Tomonori   intel-iommu sg ch...
2321
  	for_each_sg(sglist, sg, nelems, i) {
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2322
2323
2324
2325
  		addr = SG_ENT_VIRT_ADDRESS(sg);
  		addr = (void *)virt_to_phys(addr);
  		size = aligned_size((u64)addr, sg->length);
  		ret = domain_page_mapping(domain, start_addr + offset,
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2326
  			((u64)addr) & PAGE_MASK,
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
  			size, prot);
  		if (ret) {
  			/*  clear the page */
  			dma_pte_clear_range(domain, start_addr,
  				  start_addr + offset);
  			/* free page tables */
  			dma_pte_free_pagetable(domain, start_addr,
  				  start_addr + offset);
  			/* free iova */
  			__free_iova(&domain->iovad, iova);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2337
2338
  			return 0;
  		}
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2339
  		sg->dma_address = start_addr + offset +
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2340
  				((u64)addr & (~PAGE_MASK));
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2341
  		sg->dma_length = sg->length;
f76aec76e   Keshavamurthy, Anil S   intel-iommu: opti...
2342
  		offset += size;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2343
  	}
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2344
  	/* it's a non-present to present mapping */
8c11e798e   Weidong Han   iommu bitmap inst...
2345
  	if (iommu_flush_iotlb_psi(iommu, domain->id,
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
2346
  			start_addr, offset >> VTD_PAGE_SHIFT, 1))
8c11e798e   Weidong Han   iommu bitmap inst...
2347
  		iommu_flush_write_buffer(iommu);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2348
2349
  	return nelems;
  }
dfb805e83   FUJITA Tomonori   IA64: fix VT-d dm...
2350
2351
2352
2353
  static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
  {
  	return !dma_addr;
  }
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
2354
  struct dma_map_ops intel_dma_ops = {
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2355
2356
  	.alloc_coherent = intel_alloc_coherent,
  	.free_coherent = intel_free_coherent,
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2357
2358
  	.map_sg = intel_map_sg,
  	.unmap_sg = intel_unmap_sg,
ffbbef5c0   FUJITA Tomonori   intel-iommu: add ...
2359
2360
  	.map_page = intel_map_page,
  	.unmap_page = intel_unmap_page,
dfb805e83   FUJITA Tomonori   IA64: fix VT-d dm...
2361
  	.mapping_error = intel_mapping_error,
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
  };
  
  static inline int iommu_domain_cache_init(void)
  {
  	int ret = 0;
  
  	iommu_domain_cache = kmem_cache_create("iommu_domain",
  					 sizeof(struct dmar_domain),
  					 0,
  					 SLAB_HWCACHE_ALIGN,
  
  					 NULL);
  	if (!iommu_domain_cache) {
  		printk(KERN_ERR "Couldn't create iommu_domain cache
  ");
  		ret = -ENOMEM;
  	}
  
  	return ret;
  }
  
  static inline int iommu_devinfo_cache_init(void)
  {
  	int ret = 0;
  
  	iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
  					 sizeof(struct device_domain_info),
  					 0,
  					 SLAB_HWCACHE_ALIGN,
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
  					 NULL);
  	if (!iommu_devinfo_cache) {
  		printk(KERN_ERR "Couldn't create devinfo cache
  ");
  		ret = -ENOMEM;
  	}
  
  	return ret;
  }
  
  static inline int iommu_iova_cache_init(void)
  {
  	int ret = 0;
  
  	iommu_iova_cache = kmem_cache_create("iommu_iova",
  					 sizeof(struct iova),
  					 0,
  					 SLAB_HWCACHE_ALIGN,
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
  					 NULL);
  	if (!iommu_iova_cache) {
  		printk(KERN_ERR "Couldn't create iova cache
  ");
  		ret = -ENOMEM;
  	}
  
  	return ret;
  }
  
  static int __init iommu_init_mempool(void)
  {
  	int ret;
  	ret = iommu_iova_cache_init();
  	if (ret)
  		return ret;
  
  	ret = iommu_domain_cache_init();
  	if (ret)
  		goto domain_error;
  
  	ret = iommu_devinfo_cache_init();
  	if (!ret)
  		return ret;
  
  	kmem_cache_destroy(iommu_domain_cache);
  domain_error:
  	kmem_cache_destroy(iommu_iova_cache);
  
  	return -ENOMEM;
  }
  
  static void __init iommu_exit_mempool(void)
  {
  	kmem_cache_destroy(iommu_devinfo_cache);
  	kmem_cache_destroy(iommu_domain_cache);
  	kmem_cache_destroy(iommu_iova_cache);
  
  }
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
  static void __init init_no_remapping_devices(void)
  {
  	struct dmar_drhd_unit *drhd;
  
  	for_each_drhd_unit(drhd) {
  		if (!drhd->include_all) {
  			int i;
  			for (i = 0; i < drhd->devices_cnt; i++)
  				if (drhd->devices[i] != NULL)
  					break;
  			/* ignore DMAR unit if no pci devices exist */
  			if (i == drhd->devices_cnt)
  				drhd->ignored = 1;
  		}
  	}
  
  	if (dmar_map_gfx)
  		return;
  
  	for_each_drhd_unit(drhd) {
  		int i;
  		if (drhd->ignored || drhd->include_all)
  			continue;
  
  		for (i = 0; i < drhd->devices_cnt; i++)
  			if (drhd->devices[i] &&
  				!IS_GFX_DEVICE(drhd->devices[i]))
  				break;
  
  		if (i < drhd->devices_cnt)
  			continue;
  
  		/* bypass IOMMU if it is just for gfx devices */
  		drhd->ignored = 1;
  		for (i = 0; i < drhd->devices_cnt; i++) {
  			if (!drhd->devices[i])
  				continue;
358dd8ac5   Keshavamurthy, Anil S   intel-iommu: fix ...
2485
  			drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2486
2487
2488
2489
2490
2491
2492
  		}
  	}
  }
  
  int __init intel_iommu_init(void)
  {
  	int ret = 0;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2493
2494
  	if (dmar_table_init())
  		return 	-ENODEV;
1886e8a90   Suresh Siddha   x64, x2apic/intr-...
2495
2496
  	if (dmar_dev_scope_init())
  		return 	-ENODEV;
2ae210106   Suresh Siddha   x64, x2apic/intr-...
2497
2498
2499
2500
2501
2502
  	/*
  	 * Check the need for DMA-remapping initialization now.
  	 * Above initialization will also be used by Interrupt-remapping.
  	 */
  	if (no_iommu || swiotlb || dmar_disabled)
  		return -ENODEV;
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
  	iommu_init_mempool();
  	dmar_init_reserved_ranges();
  
  	init_no_remapping_devices();
  
  	ret = init_dmars();
  	if (ret) {
  		printk(KERN_ERR "IOMMU: dmar init failed
  ");
  		put_iova_domain(&reserved_iova_list);
  		iommu_exit_mempool();
  		return ret;
  	}
  	printk(KERN_INFO
  	"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O
  ");
5e0d2a6fc   mark gross   PCI: iommu: iotlb...
2519
  	init_timer(&unmap_timer);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2520
2521
  	force_iommu = 1;
  	dma_ops = &intel_dma_ops;
a8bcbb0de   Joerg Roedel   VT-d: register fu...
2522
2523
  
  	register_iommu(&intel_iommu_ops);
ba3959276   Keshavamurthy, Anil S   Intel IOMMU: Inte...
2524
2525
  	return 0;
  }
e820482cd   Keshavamurthy, Anil S   Intel IOMMU: Iomm...
2526

c7151a8df   Weidong Han   Add/remove domain...
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
  static int vm_domain_add_dev_info(struct dmar_domain *domain,
  				  struct pci_dev *pdev)
  {
  	struct device_domain_info *info;
  	unsigned long flags;
  
  	info = alloc_devinfo_mem();
  	if (!info)
  		return -ENOMEM;
  
  	info->bus = pdev->bus->number;
  	info->devfn = pdev->devfn;
  	info->dev = pdev;
  	info->domain = domain;
  
  	spin_lock_irqsave(&device_domain_lock, flags);
  	list_add(&info->link, &domain->devices);
  	list_add(&info->global, &device_domain_list);
  	pdev->dev.archdata.iommu = info;
  	spin_unlock_irqrestore(&device_domain_lock, flags);
  
  	return 0;
  }
  
  static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
  					  struct pci_dev *pdev)
  {
  	struct device_domain_info *info;
  	struct intel_iommu *iommu;
  	unsigned long flags;
  	int found = 0;
  	struct list_head *entry, *tmp;
  
  	iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
  	if (!iommu)
  		return;
  
  	spin_lock_irqsave(&device_domain_lock, flags);
  	list_for_each_safe(entry, tmp, &domain->devices) {
  		info = list_entry(entry, struct device_domain_info, link);
  		if (info->bus == pdev->bus->number &&
  		    info->devfn == pdev->devfn) {
  			list_del(&info->link);
  			list_del(&info->global);
  			if (info->dev)
  				info->dev->dev.archdata.iommu = NULL;
  			spin_unlock_irqrestore(&device_domain_lock, flags);
  
  			iommu_detach_dev(iommu, info->bus, info->devfn);
  			free_devinfo_mem(info);
  
  			spin_lock_irqsave(&device_domain_lock, flags);
  
  			if (found)
  				break;
  			else
  				continue;
  		}
  
  		/* if there is no other devices under the same iommu
  		 * owned by this domain, clear this iommu in iommu_bmp
  		 * update iommu count and coherency
  		 */
  		if (device_to_iommu(info->bus, info->devfn) == iommu)
  			found = 1;
  	}
  
  	if (found == 0) {
  		unsigned long tmp_flags;
  		spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
  		clear_bit(iommu->seq_id, &domain->iommu_bmp);
  		domain->iommu_count--;
  		domain_update_iommu_coherency(domain);
  		spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
  	}
  
  	spin_unlock_irqrestore(&device_domain_lock, flags);
  }
  
  static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
  {
  	struct device_domain_info *info;
  	struct intel_iommu *iommu;
  	unsigned long flags1, flags2;
  
  	spin_lock_irqsave(&device_domain_lock, flags1);
  	while (!list_empty(&domain->devices)) {
  		info = list_entry(domain->devices.next,
  			struct device_domain_info, link);
  		list_del(&info->link);
  		list_del(&info->global);
  		if (info->dev)
  			info->dev->dev.archdata.iommu = NULL;
  
  		spin_unlock_irqrestore(&device_domain_lock, flags1);
  
  		iommu = device_to_iommu(info->bus, info->devfn);
  		iommu_detach_dev(iommu, info->bus, info->devfn);
  
  		/* clear this iommu in iommu_bmp, update iommu count
  		 * and coherency
  		 */
  		spin_lock_irqsave(&domain->iommu_lock, flags2);
  		if (test_and_clear_bit(iommu->seq_id,
  				       &domain->iommu_bmp)) {
  			domain->iommu_count--;
  			domain_update_iommu_coherency(domain);
  		}
  		spin_unlock_irqrestore(&domain->iommu_lock, flags2);
  
  		free_devinfo_mem(info);
  		spin_lock_irqsave(&device_domain_lock, flags1);
  	}
  	spin_unlock_irqrestore(&device_domain_lock, flags1);
  }
5e98c4b1d   Weidong Han   Allocation and fr...
2642
2643
  /* domain id for virtual machine, it won't be set in context */
  static unsigned long vm_domid;
fe40f1e02   Weidong Han   Check agaw is suf...
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
  static int vm_domain_min_agaw(struct dmar_domain *domain)
  {
  	int i;
  	int min_agaw = domain->agaw;
  
  	i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
  	for (; i < g_num_of_iommus; ) {
  		if (min_agaw > g_iommus[i]->agaw)
  			min_agaw = g_iommus[i]->agaw;
  
  		i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
  	}
  
  	return min_agaw;
  }
5e98c4b1d   Weidong Han   Allocation and fr...
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
  static struct dmar_domain *iommu_alloc_vm_domain(void)
  {
  	struct dmar_domain *domain;
  
  	domain = alloc_domain_mem();
  	if (!domain)
  		return NULL;
  
  	domain->id = vm_domid++;
  	memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
  	domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
  
  	return domain;
  }
  
  static int vm_domain_init(struct dmar_domain *domain, int guest_width)
  {
  	int adjust_width;
  
  	init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
  	spin_lock_init(&domain->mapping_lock);
  	spin_lock_init(&domain->iommu_lock);
  
  	domain_reserve_special_ranges(domain);
  
  	/* calculate AGAW */
  	domain->gaw = guest_width;
  	adjust_width = guestwidth_to_adjustwidth(guest_width);
  	domain->agaw = width_to_agaw(adjust_width);
  
  	INIT_LIST_HEAD(&domain->devices);
  
  	domain->iommu_count = 0;
  	domain->iommu_coherency = 0;
fe40f1e02   Weidong Han   Check agaw is suf...
2693
  	domain->max_addr = 0;
5e98c4b1d   Weidong Han   Allocation and fr...
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
  
  	/* always allocate the top pgd */
  	domain->pgd = (struct dma_pte *)alloc_pgtable_page();
  	if (!domain->pgd)
  		return -ENOMEM;
  	domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
  	return 0;
  }
  
  static void iommu_free_vm_domain(struct dmar_domain *domain)
  {
  	unsigned long flags;
  	struct dmar_drhd_unit *drhd;
  	struct intel_iommu *iommu;
  	unsigned long i;
  	unsigned long ndomains;
  
  	for_each_drhd_unit(drhd) {
  		if (drhd->ignored)
  			continue;
  		iommu = drhd->iommu;
  
  		ndomains = cap_ndoms(iommu->cap);
  		i = find_first_bit(iommu->domain_ids, ndomains);
  		for (; i < ndomains; ) {
  			if (iommu->domains[i] == domain) {
  				spin_lock_irqsave(&iommu->lock, flags);
  				clear_bit(i, iommu->domain_ids);
  				iommu->domains[i] = NULL;
  				spin_unlock_irqrestore(&iommu->lock, flags);
  				break;
  			}
  			i = find_next_bit(iommu->domain_ids, ndomains, i+1);
  		}
  	}
  }
  
  static void vm_domain_exit(struct dmar_domain *domain)
  {
  	u64 end;
  
  	/* Domain 0 is reserved, so dont process it */
  	if (!domain)
  		return;
  
  	vm_domain_remove_all_dev_info(domain);
  	/* destroy iovas */
  	put_iova_domain(&domain->iovad);
  	end = DOMAIN_MAX_ADDR(domain->gaw);
  	end = end & (~VTD_PAGE_MASK);
  
  	/* clear ptes */
  	dma_pte_clear_range(domain, 0, end);
  
  	/* free page tables */
  	dma_pte_free_pagetable(domain, 0, end);
  
  	iommu_free_vm_domain(domain);
  	free_domain_mem(domain);
  }
5d450806e   Joerg Roedel   VT-d: adapt domai...
2754
  static int intel_iommu_domain_init(struct iommu_domain *domain)
387179464   Kay, Allen M   VT-d: Changes to ...
2755
  {
5d450806e   Joerg Roedel   VT-d: adapt domai...
2756
  	struct dmar_domain *dmar_domain;
387179464   Kay, Allen M   VT-d: Changes to ...
2757

5d450806e   Joerg Roedel   VT-d: adapt domai...
2758
2759
  	dmar_domain = iommu_alloc_vm_domain();
  	if (!dmar_domain) {
387179464   Kay, Allen M   VT-d: Changes to ...
2760
  		printk(KERN_ERR
5d450806e   Joerg Roedel   VT-d: adapt domai...
2761
2762
2763
  			"intel_iommu_domain_init: dmar_domain == NULL
  ");
  		return -ENOMEM;
387179464   Kay, Allen M   VT-d: Changes to ...
2764
  	}
5d450806e   Joerg Roedel   VT-d: adapt domai...
2765
  	if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
387179464   Kay, Allen M   VT-d: Changes to ...
2766
  		printk(KERN_ERR
5d450806e   Joerg Roedel   VT-d: adapt domai...
2767
2768
2769
2770
  			"intel_iommu_domain_init() failed
  ");
  		vm_domain_exit(dmar_domain);
  		return -ENOMEM;
387179464   Kay, Allen M   VT-d: Changes to ...
2771
  	}
5d450806e   Joerg Roedel   VT-d: adapt domai...
2772
  	domain->priv = dmar_domain;
faa3d6f5f   Weidong Han   Change intel iomm...
2773

5d450806e   Joerg Roedel   VT-d: adapt domai...
2774
  	return 0;
387179464   Kay, Allen M   VT-d: Changes to ...
2775
  }
387179464   Kay, Allen M   VT-d: Changes to ...
2776

5d450806e   Joerg Roedel   VT-d: adapt domai...
2777
  static void intel_iommu_domain_destroy(struct iommu_domain *domain)
387179464   Kay, Allen M   VT-d: Changes to ...
2778
  {
5d450806e   Joerg Roedel   VT-d: adapt domai...
2779
2780
2781
2782
  	struct dmar_domain *dmar_domain = domain->priv;
  
  	domain->priv = NULL;
  	vm_domain_exit(dmar_domain);
387179464   Kay, Allen M   VT-d: Changes to ...
2783
  }
387179464   Kay, Allen M   VT-d: Changes to ...
2784

4c5478c94   Joerg Roedel   VT-d: adapt devic...
2785
2786
  static int intel_iommu_attach_device(struct iommu_domain *domain,
  				     struct device *dev)
387179464   Kay, Allen M   VT-d: Changes to ...
2787
  {
4c5478c94   Joerg Roedel   VT-d: adapt devic...
2788
2789
  	struct dmar_domain *dmar_domain = domain->priv;
  	struct pci_dev *pdev = to_pci_dev(dev);
fe40f1e02   Weidong Han   Check agaw is suf...
2790
2791
2792
  	struct intel_iommu *iommu;
  	int addr_width;
  	u64 end;
faa3d6f5f   Weidong Han   Change intel iomm...
2793
2794
2795
2796
2797
2798
2799
2800
  	int ret;
  
  	/* normally pdev is not mapped */
  	if (unlikely(domain_context_mapped(pdev))) {
  		struct dmar_domain *old_domain;
  
  		old_domain = find_domain(pdev);
  		if (old_domain) {
4c5478c94   Joerg Roedel   VT-d: adapt devic...
2801
  			if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
faa3d6f5f   Weidong Han   Change intel iomm...
2802
2803
2804
2805
2806
  				vm_domain_remove_one_dev_info(old_domain, pdev);
  			else
  				domain_remove_dev_info(old_domain);
  		}
  	}
fe40f1e02   Weidong Han   Check agaw is suf...
2807
2808
2809
2810
2811
2812
2813
2814
  	iommu = device_to_iommu(pdev->bus->number, pdev->devfn);
  	if (!iommu)
  		return -ENODEV;
  
  	/* check if this iommu agaw is sufficient for max mapped address */
  	addr_width = agaw_to_width(iommu->agaw);
  	end = DOMAIN_MAX_ADDR(addr_width);
  	end = end & VTD_PAGE_MASK;
4c5478c94   Joerg Roedel   VT-d: adapt devic...
2815
  	if (end < dmar_domain->max_addr) {
fe40f1e02   Weidong Han   Check agaw is suf...
2816
2817
2818
  		printk(KERN_ERR "%s: iommu agaw (%d) is not "
  		       "sufficient for the mapped address (%llx)
  ",
4c5478c94   Joerg Roedel   VT-d: adapt devic...
2819
  		       __func__, iommu->agaw, dmar_domain->max_addr);
fe40f1e02   Weidong Han   Check agaw is suf...
2820
2821
  		return -EFAULT;
  	}
4c5478c94   Joerg Roedel   VT-d: adapt devic...
2822
  	ret = domain_context_mapping(dmar_domain, pdev);
faa3d6f5f   Weidong Han   Change intel iomm...
2823
2824
  	if (ret)
  		return ret;
4c5478c94   Joerg Roedel   VT-d: adapt devic...
2825
  	ret = vm_domain_add_dev_info(dmar_domain, pdev);
faa3d6f5f   Weidong Han   Change intel iomm...
2826
  	return ret;
387179464   Kay, Allen M   VT-d: Changes to ...
2827
  }
387179464   Kay, Allen M   VT-d: Changes to ...
2828

4c5478c94   Joerg Roedel   VT-d: adapt devic...
2829
2830
  static void intel_iommu_detach_device(struct iommu_domain *domain,
  				      struct device *dev)
387179464   Kay, Allen M   VT-d: Changes to ...
2831
  {
4c5478c94   Joerg Roedel   VT-d: adapt devic...
2832
2833
2834
2835
  	struct dmar_domain *dmar_domain = domain->priv;
  	struct pci_dev *pdev = to_pci_dev(dev);
  
  	vm_domain_remove_one_dev_info(dmar_domain, pdev);
faa3d6f5f   Weidong Han   Change intel iomm...
2836
  }
c7151a8df   Weidong Han   Add/remove domain...
2837

dde57a210   Joerg Roedel   VT-d: adapt domai...
2838
2839
2840
  static int intel_iommu_map_range(struct iommu_domain *domain,
  				 unsigned long iova, phys_addr_t hpa,
  				 size_t size, int iommu_prot)
faa3d6f5f   Weidong Han   Change intel iomm...
2841
  {
dde57a210   Joerg Roedel   VT-d: adapt domai...
2842
  	struct dmar_domain *dmar_domain = domain->priv;
fe40f1e02   Weidong Han   Check agaw is suf...
2843
2844
  	u64 max_addr;
  	int addr_width;
dde57a210   Joerg Roedel   VT-d: adapt domai...
2845
  	int prot = 0;
faa3d6f5f   Weidong Han   Change intel iomm...
2846
  	int ret;
fe40f1e02   Weidong Han   Check agaw is suf...
2847

dde57a210   Joerg Roedel   VT-d: adapt domai...
2848
2849
2850
2851
  	if (iommu_prot & IOMMU_READ)
  		prot |= DMA_PTE_READ;
  	if (iommu_prot & IOMMU_WRITE)
  		prot |= DMA_PTE_WRITE;
fe40f1e02   Weidong Han   Check agaw is suf...
2852
  	max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
dde57a210   Joerg Roedel   VT-d: adapt domai...
2853
  	if (dmar_domain->max_addr < max_addr) {
fe40f1e02   Weidong Han   Check agaw is suf...
2854
2855
2856
2857
  		int min_agaw;
  		u64 end;
  
  		/* check if minimum agaw is sufficient for mapped address */
dde57a210   Joerg Roedel   VT-d: adapt domai...
2858
  		min_agaw = vm_domain_min_agaw(dmar_domain);
fe40f1e02   Weidong Han   Check agaw is suf...
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
  		addr_width = agaw_to_width(min_agaw);
  		end = DOMAIN_MAX_ADDR(addr_width);
  		end = end & VTD_PAGE_MASK;
  		if (end < max_addr) {
  			printk(KERN_ERR "%s: iommu agaw (%d) is not "
  			       "sufficient for the mapped address (%llx)
  ",
  			       __func__, min_agaw, max_addr);
  			return -EFAULT;
  		}
dde57a210   Joerg Roedel   VT-d: adapt domai...
2869
  		dmar_domain->max_addr = max_addr;
fe40f1e02   Weidong Han   Check agaw is suf...
2870
  	}
dde57a210   Joerg Roedel   VT-d: adapt domai...
2871
  	ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot);
faa3d6f5f   Weidong Han   Change intel iomm...
2872
  	return ret;
387179464   Kay, Allen M   VT-d: Changes to ...
2873
  }
387179464   Kay, Allen M   VT-d: Changes to ...
2874

dde57a210   Joerg Roedel   VT-d: adapt domai...
2875
2876
  static void intel_iommu_unmap_range(struct iommu_domain *domain,
  				    unsigned long iova, size_t size)
387179464   Kay, Allen M   VT-d: Changes to ...
2877
  {
dde57a210   Joerg Roedel   VT-d: adapt domai...
2878
  	struct dmar_domain *dmar_domain = domain->priv;
faa3d6f5f   Weidong Han   Change intel iomm...
2879
2880
2881
2882
2883
  	dma_addr_t base;
  
  	/* The address might not be aligned */
  	base = iova & VTD_PAGE_MASK;
  	size = VTD_PAGE_ALIGN(size);
dde57a210   Joerg Roedel   VT-d: adapt domai...
2884
  	dma_pte_clear_range(dmar_domain, base, base + size);
fe40f1e02   Weidong Han   Check agaw is suf...
2885

dde57a210   Joerg Roedel   VT-d: adapt domai...
2886
2887
  	if (dmar_domain->max_addr == base + size)
  		dmar_domain->max_addr = base;
387179464   Kay, Allen M   VT-d: Changes to ...
2888
  }
387179464   Kay, Allen M   VT-d: Changes to ...
2889

d14d65777   Joerg Roedel   VT-d: adapt domai...
2890
2891
  static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
  					    unsigned long iova)
387179464   Kay, Allen M   VT-d: Changes to ...
2892
  {
d14d65777   Joerg Roedel   VT-d: adapt domai...
2893
  	struct dmar_domain *dmar_domain = domain->priv;
387179464   Kay, Allen M   VT-d: Changes to ...
2894
  	struct dma_pte *pte;
faa3d6f5f   Weidong Han   Change intel iomm...
2895
  	u64 phys = 0;
387179464   Kay, Allen M   VT-d: Changes to ...
2896

d14d65777   Joerg Roedel   VT-d: adapt domai...
2897
  	pte = addr_to_dma_pte(dmar_domain, iova);
387179464   Kay, Allen M   VT-d: Changes to ...
2898
  	if (pte)
faa3d6f5f   Weidong Han   Change intel iomm...
2899
  		phys = dma_pte_addr(pte);
387179464   Kay, Allen M   VT-d: Changes to ...
2900

faa3d6f5f   Weidong Han   Change intel iomm...
2901
  	return phys;
387179464   Kay, Allen M   VT-d: Changes to ...
2902
  }
a8bcbb0de   Joerg Roedel   VT-d: register fu...
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
  
  static struct iommu_ops intel_iommu_ops = {
  	.domain_init	= intel_iommu_domain_init,
  	.domain_destroy = intel_iommu_domain_destroy,
  	.attach_dev	= intel_iommu_attach_device,
  	.detach_dev	= intel_iommu_detach_device,
  	.map		= intel_iommu_map_range,
  	.unmap		= intel_iommu_unmap_range,
  	.iova_to_phys	= intel_iommu_iova_to_phys,
  };
9af88143b   David Woodhouse   iommu: fix Intel ...
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
  
  static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
  {
  	/*
  	 * Mobile 4 Series Chipset neglects to set RWBF capability,
  	 * but needs it:
  	 */
  	printk(KERN_INFO "DMAR: Forcing write-buffer flush capability
  ");
  	rwbf_quirk = 1;
  }
  
  DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);