Blame view

drivers/iommu/intel_irq_remapping.c 35.8 KB
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
1
2
  
  #define pr_fmt(fmt)     "DMAR-IR: " fmt
5aeecaf49   Yinghai Lu   irq: make irq2_io...
3
  #include <linux/interrupt.h>
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
4
  #include <linux/dmar.h>
2ae210106   Suresh Siddha   x64, x2apic/intr-...
5
  #include <linux/spinlock.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
6
  #include <linux/slab.h>
2ae210106   Suresh Siddha   x64, x2apic/intr-...
7
  #include <linux/jiffies.h>
20f3097bf   Suresh Siddha   intr-remap: gener...
8
  #include <linux/hpet.h>
2ae210106   Suresh Siddha   x64, x2apic/intr-...
9
  #include <linux/pci.h>
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
10
  #include <linux/irq.h>
8b48463f8   Lv Zheng   ACPI: Clean up in...
11
12
  #include <linux/intel-iommu.h>
  #include <linux/acpi.h>
b106ee63a   Jiang Liu   irq_remapping/vt-...
13
  #include <linux/irqdomain.h>
af3b358e4   Joerg Roedel   iommu/vt-d: Copy ...
14
  #include <linux/crash_dump.h>
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
15
  #include <asm/io_apic.h>
17483a1f3   Yinghai Lu   sparseirq: fix !S...
16
  #include <asm/smp.h>
6d652ea1d   Jaswinder Singh Rajput   x86: smp.h move b...
17
  #include <asm/cpu.h>
8a8f422d3   Suresh Siddha   iommu: rename int...
18
  #include <asm/irq_remapping.h>
f007e99c8   Weidong Han   Intel-IOMMU, intr...
19
  #include <asm/pci-direct.h>
5e2b930b0   Joerg Roedel   iommu/vt-d: Conve...
20
  #include <asm/msidef.h>
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
21

8a8f422d3   Suresh Siddha   iommu: rename int...
22
  #include "irq_remapping.h"
736baef44   Joerg Roedel   iommu/vt-d: Make ...
23

2705a3d2a   Feng Wu   iommu, x86: Save ...
24
25
26
27
  enum irq_mode {
  	IRQ_REMAPPING,
  	IRQ_POSTING,
  };
eef93fdb7   Joerg Roedel   iommu: Rename int...
28
29
30
31
32
33
34
35
36
37
38
39
40
  struct ioapic_scope {
  	struct intel_iommu *iommu;
  	unsigned int id;
  	unsigned int bus;	/* PCI bus number */
  	unsigned int devfn;	/* PCI devfn number */
  };
  
  struct hpet_scope {
  	struct intel_iommu *iommu;
  	u8 id;
  	unsigned int bus;
  	unsigned int devfn;
  };
099c5c034   Jiang Liu   irq_remapping/vt-...
41
42
43
44
45
  struct irq_2_iommu {
  	struct intel_iommu *iommu;
  	u16 irte_index;
  	u16 sub_handle;
  	u8  irte_mask;
2705a3d2a   Feng Wu   iommu, x86: Save ...
46
  	enum irq_mode mode;
099c5c034   Jiang Liu   irq_remapping/vt-...
47
  };
b106ee63a   Jiang Liu   irq_remapping/vt-...
48
49
50
51
52
53
54
  struct intel_ir_data {
  	struct irq_2_iommu			irq_2_iommu;
  	struct irte				irte_entry;
  	union {
  		struct msi_msg			msi_entry;
  	};
  };
eef93fdb7   Joerg Roedel   iommu: Rename int...
55
  #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
13d09b660   Jiang Liu   iommu/vt-d: Allow...
56
  #define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
eef93fdb7   Joerg Roedel   iommu: Rename int...
57

13d09b660   Jiang Liu   iommu/vt-d: Allow...
58
  static int __read_mostly eim_mode;
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
59
  static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
20f3097bf   Suresh Siddha   intr-remap: gener...
60
  static struct hpet_scope ir_hpet[MAX_HPET_TBS];
d1423d567   Chris Wright   intr-remap: allow...
61

3a5670e8a   Jiang Liu   iommu/vt-d: Intro...
62
63
64
65
66
67
68
69
70
71
72
  /*
   * Lock ordering:
   * ->dmar_global_lock
   *	->irq_2_ir_lock
   *		->qi->q_lock
   *	->iommu->register_lock
   * Note:
   * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called
   * in single-threaded environment with interrupt disabled, so no need to tabke
   * the dmar_global_lock.
   */
96f8e98bf   Thomas Gleixner   locking, x86, iom...
73
  static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
b106ee63a   Jiang Liu   irq_remapping/vt-...
74
  static struct irq_domain_ops intel_ir_domain_ops;
d585d060b   Thomas Gleixner   intr_remap: Simpl...
75

af3b358e4   Joerg Roedel   iommu/vt-d: Copy ...
76
  static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
694835dc2   Jiang Liu   iommu/vt-d: mark ...
77
  static int __init parse_ioapics_under_ir(void);
af3b358e4   Joerg Roedel   iommu/vt-d: Copy ...
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
  static bool ir_pre_enabled(struct intel_iommu *iommu)
  {
  	return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED);
  }
  
  static void clear_ir_pre_enabled(struct intel_iommu *iommu)
  {
  	iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
  }
  
  static void init_ir_status(struct intel_iommu *iommu)
  {
  	u32 gsts;
  
  	gsts = readl(iommu->reg + DMAR_GSTS_REG);
  	if (gsts & DMA_GSTS_IRES)
  		iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
  }
8dedf4cf5   Jiang Liu   irq_remapping/vt-...
96
97
  static int alloc_irte(struct intel_iommu *iommu, int irq,
  		      struct irq_2_iommu *irq_iommu, u16 count)
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
98
99
  {
  	struct ir_table *table = iommu->ir_table;
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
100
  	unsigned int mask = 0;
4c5502b1c   Suresh Siddha   x86, x2apic: fix ...
101
  	unsigned long flags;
9f4c7448f   Dan Carpenter   iommu/vt-d: Fix s...
102
  	int index;
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
103

d585d060b   Thomas Gleixner   intr_remap: Simpl...
104
  	if (!count || !irq_iommu)
e420dfb40   Yinghai Lu   x86: put irq_2_io...
105
  		return -1;
e420dfb40   Yinghai Lu   x86: put irq_2_io...
106

b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
107
108
109
110
111
112
  	if (count > 1) {
  		count = __roundup_pow_of_two(count);
  		mask = ilog2(count);
  	}
  
  	if (mask > ecap_max_handle_mask(iommu->ecap)) {
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
113
  		pr_err("Requested mask %x exceeds the max invalidation handle"
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
114
115
116
117
118
  		       " mask value %Lx
  ", mask,
  		       ecap_max_handle_mask(iommu->ecap));
  		return -1;
  	}
96f8e98bf   Thomas Gleixner   locking, x86, iom...
119
  	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
360eb3c56   Jiang Liu   iommu/vt-d: use d...
120
121
122
123
124
125
  	index = bitmap_find_free_region(table->bitmap,
  					INTR_REMAP_TABLE_ENTRIES, mask);
  	if (index < 0) {
  		pr_warn("IR%d: can't allocate an IRTE
  ", iommu->seq_id);
  	} else {
360eb3c56   Jiang Liu   iommu/vt-d: use d...
126
127
128
129
  		irq_iommu->iommu = iommu;
  		irq_iommu->irte_index =  index;
  		irq_iommu->sub_handle = 0;
  		irq_iommu->irte_mask = mask;
2705a3d2a   Feng Wu   iommu, x86: Save ...
130
  		irq_iommu->mode = IRQ_REMAPPING;
360eb3c56   Jiang Liu   iommu/vt-d: use d...
131
  	}
96f8e98bf   Thomas Gleixner   locking, x86, iom...
132
  	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
133
134
135
  
  	return index;
  }
704126ad8   Yu Zhao   VT-d: handle Inva...
136
  static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
137
138
139
140
141
142
  {
  	struct qi_desc desc;
  
  	desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
  		   | QI_IEC_SELECTIVE;
  	desc.high = 0;
704126ad8   Yu Zhao   VT-d: handle Inva...
143
  	return qi_submit_sync(&desc, iommu);
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
144
  }
8dedf4cf5   Jiang Liu   irq_remapping/vt-...
145
146
  static int modify_irte(struct irq_2_iommu *irq_iommu,
  		       struct irte *irte_modified)
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
147
  {
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
148
  	struct intel_iommu *iommu;
4c5502b1c   Suresh Siddha   x86, x2apic: fix ...
149
  	unsigned long flags;
d585d060b   Thomas Gleixner   intr_remap: Simpl...
150
151
  	struct irte *irte;
  	int rc, index;
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
152

d585d060b   Thomas Gleixner   intr_remap: Simpl...
153
  	if (!irq_iommu)
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
154
  		return -1;
d585d060b   Thomas Gleixner   intr_remap: Simpl...
155

96f8e98bf   Thomas Gleixner   locking, x86, iom...
156
  	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
157

e420dfb40   Yinghai Lu   x86: put irq_2_io...
158
  	iommu = irq_iommu->iommu;
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
159

e420dfb40   Yinghai Lu   x86: put irq_2_io...
160
  	index = irq_iommu->irte_index + irq_iommu->sub_handle;
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
161
  	irte = &iommu->ir_table->base[index];
344cb4e0b   Feng Wu   iommu/vt-d: Use c...
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
  #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
  	if ((irte->pst == 1) || (irte_modified->pst == 1)) {
  		bool ret;
  
  		ret = cmpxchg_double(&irte->low, &irte->high,
  				     irte->low, irte->high,
  				     irte_modified->low, irte_modified->high);
  		/*
  		 * We use cmpxchg16 to atomically update the 128-bit IRTE,
  		 * and it cannot be updated by the hardware or other processors
  		 * behind us, so the return value of cmpxchg16 should be the
  		 * same as the old value.
  		 */
  		WARN_ON(!ret);
  	} else
  #endif
  	{
  		set_64bit(&irte->low, irte_modified->low);
  		set_64bit(&irte->high, irte_modified->high);
  	}
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
182
  	__iommu_flush_cache(iommu, irte, sizeof(*irte));
704126ad8   Yu Zhao   VT-d: handle Inva...
183
  	rc = qi_flush_iec(iommu, index, 0);
2705a3d2a   Feng Wu   iommu, x86: Save ...
184
185
186
  
  	/* Update iommu mode according to the IRTE mode */
  	irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING;
96f8e98bf   Thomas Gleixner   locking, x86, iom...
187
  	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
704126ad8   Yu Zhao   VT-d: handle Inva...
188
189
  
  	return rc;
b6fcb33ad   Suresh Siddha   x64, x2apic/intr-...
190
  }
263b5e862   Joerg Roedel   x86, iommu/vt-d: ...
191
  static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
20f3097bf   Suresh Siddha   intr-remap: gener...
192
193
194
195
  {
  	int i;
  
  	for (i = 0; i < MAX_HPET_TBS; i++)
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
196
  		if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
20f3097bf   Suresh Siddha   intr-remap: gener...
197
198
199
  			return ir_hpet[i].iommu;
  	return NULL;
  }
263b5e862   Joerg Roedel   x86, iommu/vt-d: ...
200
  static struct intel_iommu *map_ioapic_to_ir(int apic)
89027d35a   Suresh Siddha   x64, x2apic/intr-...
201
202
203
204
  {
  	int i;
  
  	for (i = 0; i < MAX_IO_APICS; i++)
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
205
  		if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
89027d35a   Suresh Siddha   x64, x2apic/intr-...
206
207
208
  			return ir_ioapic[i].iommu;
  	return NULL;
  }
263b5e862   Joerg Roedel   x86, iommu/vt-d: ...
209
  static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
75c46fa61   Suresh Siddha   x64, x2apic/intr-...
210
211
212
213
214
215
216
217
218
  {
  	struct dmar_drhd_unit *drhd;
  
  	drhd = dmar_find_matched_drhd_unit(dev);
  	if (!drhd)
  		return NULL;
  
  	return drhd->iommu;
  }
c4658b4e7   Weidong Han   Intel-IOMMU, intr...
219
220
221
222
223
224
225
226
227
228
  static int clear_entries(struct irq_2_iommu *irq_iommu)
  {
  	struct irte *start, *entry, *end;
  	struct intel_iommu *iommu;
  	int index;
  
  	if (irq_iommu->sub_handle)
  		return 0;
  
  	iommu = irq_iommu->iommu;
8dedf4cf5   Jiang Liu   irq_remapping/vt-...
229
  	index = irq_iommu->irte_index;
c4658b4e7   Weidong Han   Intel-IOMMU, intr...
230
231
232
233
234
  
  	start = iommu->ir_table->base + index;
  	end = start + (1 << irq_iommu->irte_mask);
  
  	for (entry = start; entry < end; entry++) {
c513b67e6   Linus Torvalds   pci: fix type war...
235
236
  		set_64bit(&entry->low, 0);
  		set_64bit(&entry->high, 0);
c4658b4e7   Weidong Han   Intel-IOMMU, intr...
237
  	}
360eb3c56   Jiang Liu   iommu/vt-d: use d...
238
239
  	bitmap_release_region(iommu->ir_table->bitmap, index,
  			      irq_iommu->irte_mask);
c4658b4e7   Weidong Han   Intel-IOMMU, intr...
240
241
242
  
  	return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
  }
f007e99c8   Weidong Han   Intel-IOMMU, intr...
243
244
245
246
  /*
   * source validation type
   */
  #define SVT_NO_VERIFY		0x0  /* no verification is required */
25985edce   Lucas De Marchi   Fix common misspe...
247
  #define SVT_VERIFY_SID_SQ	0x1  /* verify using SID and SQ fields */
f007e99c8   Weidong Han   Intel-IOMMU, intr...
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
  #define SVT_VERIFY_BUS		0x2  /* verify bus of request-id */
  
  /*
   * source-id qualifier
   */
  #define SQ_ALL_16	0x0  /* verify all 16 bits of request-id */
  #define SQ_13_IGNORE_1	0x1  /* verify most significant 13 bits, ignore
  			      * the third least significant bit
  			      */
  #define SQ_13_IGNORE_2	0x2  /* verify most significant 13 bits, ignore
  			      * the second and third least significant bits
  			      */
  #define SQ_13_IGNORE_3	0x3  /* verify most significant 13 bits, ignore
  			      * the least three significant bits
  			      */
  
  /*
   * set SVT, SQ and SID fields of irte to verify
   * source ids of interrupt requests
   */
  static void set_irte_sid(struct irte *irte, unsigned int svt,
  			 unsigned int sq, unsigned int sid)
  {
d1423d567   Chris Wright   intr-remap: allow...
271
272
  	if (disable_sourceid_checking)
  		svt = SVT_NO_VERIFY;
f007e99c8   Weidong Han   Intel-IOMMU, intr...
273
274
275
276
  	irte->svt = svt;
  	irte->sq = sq;
  	irte->sid = sid;
  }
263b5e862   Joerg Roedel   x86, iommu/vt-d: ...
277
  static int set_ioapic_sid(struct irte *irte, int apic)
f007e99c8   Weidong Han   Intel-IOMMU, intr...
278
279
280
281
282
283
  {
  	int i;
  	u16 sid = 0;
  
  	if (!irte)
  		return -1;
3a5670e8a   Jiang Liu   iommu/vt-d: Intro...
284
  	down_read(&dmar_global_lock);
f007e99c8   Weidong Han   Intel-IOMMU, intr...
285
  	for (i = 0; i < MAX_IO_APICS; i++) {
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
286
  		if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
f007e99c8   Weidong Han   Intel-IOMMU, intr...
287
288
289
290
  			sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
  			break;
  		}
  	}
3a5670e8a   Jiang Liu   iommu/vt-d: Intro...
291
  	up_read(&dmar_global_lock);
f007e99c8   Weidong Han   Intel-IOMMU, intr...
292
293
  
  	if (sid == 0) {
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
294
295
  		pr_warn("Failed to set source-id of IOAPIC (%d)
  ", apic);
f007e99c8   Weidong Han   Intel-IOMMU, intr...
296
297
  		return -1;
  	}
2fe2c6025   Jiang Liu   iommu/vt-d, trivi...
298
  	set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
f007e99c8   Weidong Han   Intel-IOMMU, intr...
299
300
301
  
  	return 0;
  }
263b5e862   Joerg Roedel   x86, iommu/vt-d: ...
302
  static int set_hpet_sid(struct irte *irte, u8 id)
20f3097bf   Suresh Siddha   intr-remap: gener...
303
304
305
306
307
308
  {
  	int i;
  	u16 sid = 0;
  
  	if (!irte)
  		return -1;
3a5670e8a   Jiang Liu   iommu/vt-d: Intro...
309
  	down_read(&dmar_global_lock);
20f3097bf   Suresh Siddha   intr-remap: gener...
310
  	for (i = 0; i < MAX_HPET_TBS; i++) {
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
311
  		if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
20f3097bf   Suresh Siddha   intr-remap: gener...
312
313
314
315
  			sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
  			break;
  		}
  	}
3a5670e8a   Jiang Liu   iommu/vt-d: Intro...
316
  	up_read(&dmar_global_lock);
20f3097bf   Suresh Siddha   intr-remap: gener...
317
318
  
  	if (sid == 0) {
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
319
320
  		pr_warn("Failed to set source-id of HPET block (%d)
  ", id);
20f3097bf   Suresh Siddha   intr-remap: gener...
321
322
323
324
325
326
327
328
329
330
331
332
  		return -1;
  	}
  
  	/*
  	 * Should really use SQ_ALL_16. Some platforms are broken.
  	 * While we figure out the right quirks for these broken platforms, use
  	 * SQ_13_IGNORE_3 for now.
  	 */
  	set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
  
  	return 0;
  }
579305f75   Alex Williamson   iommu/vt-d: Updat...
333
334
335
336
337
338
339
340
341
342
343
344
345
346
  struct set_msi_sid_data {
  	struct pci_dev *pdev;
  	u16 alias;
  };
  
  static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
  {
  	struct set_msi_sid_data *data = opaque;
  
  	data->pdev = pdev;
  	data->alias = alias;
  
  	return 0;
  }
263b5e862   Joerg Roedel   x86, iommu/vt-d: ...
347
  static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
f007e99c8   Weidong Han   Intel-IOMMU, intr...
348
  {
579305f75   Alex Williamson   iommu/vt-d: Updat...
349
  	struct set_msi_sid_data data;
f007e99c8   Weidong Han   Intel-IOMMU, intr...
350
351
352
  
  	if (!irte || !dev)
  		return -1;
579305f75   Alex Williamson   iommu/vt-d: Updat...
353
  	pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
f007e99c8   Weidong Han   Intel-IOMMU, intr...
354

579305f75   Alex Williamson   iommu/vt-d: Updat...
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
  	/*
  	 * DMA alias provides us with a PCI device and alias.  The only case
  	 * where the it will return an alias on a different bus than the
  	 * device is the case of a PCIe-to-PCI bridge, where the alias is for
  	 * the subordinate bus.  In this case we can only verify the bus.
  	 *
  	 * If the alias device is on a different bus than our source device
  	 * then we have a topology based alias, use it.
  	 *
  	 * Otherwise, the alias is for a device DMA quirk and we cannot
  	 * assume that MSI uses the same requester ID.  Therefore use the
  	 * original device.
  	 */
  	if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
  		set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
  			     PCI_DEVID(PCI_BUS_NUM(data.alias),
  				       dev->bus->number));
  	else if (data.pdev->bus->number != dev->bus->number)
  		set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
  	else
  		set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
  			     PCI_DEVID(dev->bus->number, dev->devfn));
f007e99c8   Weidong Han   Intel-IOMMU, intr...
377
378
379
  
  	return 0;
  }
af3b358e4   Joerg Roedel   iommu/vt-d: Copy ...
380
381
  static int iommu_load_old_irte(struct intel_iommu *iommu)
  {
dfddb969e   Dan Williams   iommu/vt-d: Switc...
382
  	struct irte *old_ir_table;
af3b358e4   Joerg Roedel   iommu/vt-d: Copy ...
383
  	phys_addr_t irt_phys;
7c3c9876d   Joerg Roedel   iommu/vt-d: Make ...
384
  	unsigned int i;
af3b358e4   Joerg Roedel   iommu/vt-d: Copy ...
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
  	size_t size;
  	u64 irta;
  
  	if (!is_kdump_kernel()) {
  		pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode
  ",
  			iommu->name);
  		clear_ir_pre_enabled(iommu);
  		iommu_disable_irq_remapping(iommu);
  		return -EINVAL;
  	}
  
  	/* Check whether the old ir-table has the same size as ours */
  	irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
  	if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
  	     != INTR_REMAP_TABLE_REG_SIZE)
  		return -EINVAL;
  
  	irt_phys = irta & VTD_PAGE_MASK;
  	size     = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte);
  
  	/* Map the old IR table */
dfddb969e   Dan Williams   iommu/vt-d: Switc...
407
  	old_ir_table = memremap(irt_phys, size, MEMREMAP_WB);
af3b358e4   Joerg Roedel   iommu/vt-d: Copy ...
408
409
410
411
  	if (!old_ir_table)
  		return -ENOMEM;
  
  	/* Copy data over */
dfddb969e   Dan Williams   iommu/vt-d: Switc...
412
  	memcpy(iommu->ir_table->base, old_ir_table, size);
af3b358e4   Joerg Roedel   iommu/vt-d: Copy ...
413
414
  
  	__iommu_flush_cache(iommu, iommu->ir_table->base, size);
7c3c9876d   Joerg Roedel   iommu/vt-d: Make ...
415
416
417
418
419
420
421
422
  	/*
  	 * Now check the table for used entries and mark those as
  	 * allocated in the bitmap
  	 */
  	for (i = 0; i < INTR_REMAP_TABLE_ENTRIES; i++) {
  		if (iommu->ir_table->base[i].present)
  			bitmap_set(iommu->ir_table->bitmap, i, 1);
  	}
dfddb969e   Dan Williams   iommu/vt-d: Switc...
423
  	memunmap(old_ir_table);
50690762c   Dan Williams   iommu/vt-d: Fix l...
424

af3b358e4   Joerg Roedel   iommu/vt-d: Copy ...
425
426
  	return 0;
  }
95a02e976   Suresh Siddha   iommu: rename int...
427
  static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
2ae210106   Suresh Siddha   x64, x2apic/intr-...
428
  {
d4d1c0f3d   Joerg Roedel   iommu/vt-d: Set I...
429
  	unsigned long flags;
2ae210106   Suresh Siddha   x64, x2apic/intr-...
430
  	u64 addr;
c416daa98   David Woodhouse   intel-iommu: Tidy...
431
  	u32 sts;
2ae210106   Suresh Siddha   x64, x2apic/intr-...
432
433
  
  	addr = virt_to_phys((void *)iommu->ir_table->base);
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
434
  	raw_spin_lock_irqsave(&iommu->register_lock, flags);
2ae210106   Suresh Siddha   x64, x2apic/intr-...
435
436
437
438
439
  
  	dmar_writeq(iommu->reg + DMAR_IRTA_REG,
  		    (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
  
  	/* Set interrupt-remapping table pointer */
f63ef6902   Jan Kiszka   iommu/vt-d: Don't...
440
  	writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
2ae210106   Suresh Siddha   x64, x2apic/intr-...
441
442
443
  
  	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  		      readl, (sts & DMA_GSTS_IRTPS), sts);
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
444
  	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
2ae210106   Suresh Siddha   x64, x2apic/intr-...
445
446
  
  	/*
d4d1c0f3d   Joerg Roedel   iommu/vt-d: Set I...
447
448
  	 * Global invalidation of interrupt entry cache to make sure the
  	 * hardware uses the new irq remapping table.
2ae210106   Suresh Siddha   x64, x2apic/intr-...
449
450
  	 */
  	qi_global_iec(iommu);
d4d1c0f3d   Joerg Roedel   iommu/vt-d: Set I...
451
452
453
454
455
456
  }
  
  static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
  {
  	unsigned long flags;
  	u32 sts;
2ae210106   Suresh Siddha   x64, x2apic/intr-...
457

1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
458
  	raw_spin_lock_irqsave(&iommu->register_lock, flags);
2ae210106   Suresh Siddha   x64, x2apic/intr-...
459
460
  
  	/* Enable interrupt-remapping */
2ae210106   Suresh Siddha   x64, x2apic/intr-...
461
  	iommu->gcmd |= DMA_GCMD_IRE;
af8d102f9   Andy Lutomirski   x86/intel/irq_rem...
462
  	iommu->gcmd &= ~DMA_GCMD_CFI;  /* Block compatibility-format MSIs */
c416daa98   David Woodhouse   intel-iommu: Tidy...
463
  	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
2ae210106   Suresh Siddha   x64, x2apic/intr-...
464
465
466
  
  	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  		      readl, (sts & DMA_GSTS_IRES), sts);
af8d102f9   Andy Lutomirski   x86/intel/irq_rem...
467
468
469
470
471
472
473
474
475
476
477
  	/*
  	 * With CFI clear in the Global Command register, we should be
  	 * protected from dangerous (i.e. compatibility) interrupts
  	 * regardless of x2apic status.  Check just to be sure.
  	 */
  	if (sts & DMA_GSTS_CFIS)
  		WARN(1, KERN_WARNING
  			"Compatibility-format IRQs enabled despite intr remapping;
  "
  			"you are vulnerable to IRQ injection.
  ");
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
478
  	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
2ae210106   Suresh Siddha   x64, x2apic/intr-...
479
  }
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
480
  static int intel_setup_irq_remapping(struct intel_iommu *iommu)
2ae210106   Suresh Siddha   x64, x2apic/intr-...
481
482
483
  {
  	struct ir_table *ir_table;
  	struct page *pages;
360eb3c56   Jiang Liu   iommu/vt-d: use d...
484
  	unsigned long *bitmap;
2ae210106   Suresh Siddha   x64, x2apic/intr-...
485

a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
486
487
  	if (iommu->ir_table)
  		return 0;
2ae210106   Suresh Siddha   x64, x2apic/intr-...
488

e3a981d61   Thomas Gleixner   iommu/vt-d: Conve...
489
  	ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
490
  	if (!ir_table)
2ae210106   Suresh Siddha   x64, x2apic/intr-...
491
  		return -ENOMEM;
e3a981d61   Thomas Gleixner   iommu/vt-d: Conve...
492
  	pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
824cd75bf   Suresh Siddha   intr_remap: Alloc...
493
  				 INTR_REMAP_PAGE_ORDER);
2ae210106   Suresh Siddha   x64, x2apic/intr-...
494
  	if (!pages) {
360eb3c56   Jiang Liu   iommu/vt-d: use d...
495
496
497
  		pr_err("IR%d: failed to allocate pages of order %d
  ",
  		       iommu->seq_id, INTR_REMAP_PAGE_ORDER);
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
498
  		goto out_free_table;
2ae210106   Suresh Siddha   x64, x2apic/intr-...
499
  	}
360eb3c56   Jiang Liu   iommu/vt-d: use d...
500
501
502
503
504
  	bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
  			 sizeof(long), GFP_ATOMIC);
  	if (bitmap == NULL) {
  		pr_err("IR%d: failed to allocate bitmap
  ", iommu->seq_id);
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
505
  		goto out_free_pages;
360eb3c56   Jiang Liu   iommu/vt-d: use d...
506
  	}
b106ee63a   Jiang Liu   irq_remapping/vt-...
507
508
509
510
511
512
513
514
515
516
  	iommu->ir_domain = irq_domain_add_hierarchy(arch_get_ir_parent_domain(),
  						    0, INTR_REMAP_TABLE_ENTRIES,
  						    NULL, &intel_ir_domain_ops,
  						    iommu);
  	if (!iommu->ir_domain) {
  		pr_err("IR%d: failed to allocate irqdomain
  ", iommu->seq_id);
  		goto out_free_bitmap;
  	}
  	iommu->ir_msi_domain = arch_create_msi_irq_domain(iommu->ir_domain);
2ae210106   Suresh Siddha   x64, x2apic/intr-...
517
  	ir_table->base = page_address(pages);
360eb3c56   Jiang Liu   iommu/vt-d: use d...
518
  	ir_table->bitmap = bitmap;
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
519
  	iommu->ir_table = ir_table;
9e4e49dfd   Joerg Roedel   iommu/vt-d: Move ...
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
  
  	/*
  	 * If the queued invalidation is already initialized,
  	 * shouldn't disable it.
  	 */
  	if (!iommu->qi) {
  		/*
  		 * Clear previous faults.
  		 */
  		dmar_fault(-1, iommu);
  		dmar_disable_qi(iommu);
  
  		if (dmar_enable_qi(iommu)) {
  			pr_err("Failed to enable queued invalidation
  ");
  			goto out_free_bitmap;
  		}
  	}
af3b358e4   Joerg Roedel   iommu/vt-d: Copy ...
538
539
540
541
542
543
544
545
546
547
548
549
  	init_ir_status(iommu);
  
  	if (ir_pre_enabled(iommu)) {
  		if (iommu_load_old_irte(iommu))
  			pr_err("Failed to copy IR table for %s from previous kernel
  ",
  			       iommu->name);
  		else
  			pr_info("Copied IR table for %s from previous kernel
  ",
  				iommu->name);
  	}
d4d1c0f3d   Joerg Roedel   iommu/vt-d: Set I...
550
  	iommu_set_irq_remapping(iommu, eim_mode);
2ae210106   Suresh Siddha   x64, x2apic/intr-...
551
  	return 0;
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
552

b106ee63a   Jiang Liu   irq_remapping/vt-...
553
554
  out_free_bitmap:
  	kfree(bitmap);
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
555
556
557
558
  out_free_pages:
  	__free_pages(pages, INTR_REMAP_PAGE_ORDER);
  out_free_table:
  	kfree(ir_table);
9e4e49dfd   Joerg Roedel   iommu/vt-d: Move ...
559
560
  
  	iommu->ir_table  = NULL;
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
561
562
563
564
565
566
  	return -ENOMEM;
  }
  
  static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
  {
  	if (iommu && iommu->ir_table) {
b106ee63a   Jiang Liu   irq_remapping/vt-...
567
568
569
570
571
572
573
574
  		if (iommu->ir_msi_domain) {
  			irq_domain_remove(iommu->ir_msi_domain);
  			iommu->ir_msi_domain = NULL;
  		}
  		if (iommu->ir_domain) {
  			irq_domain_remove(iommu->ir_domain);
  			iommu->ir_domain = NULL;
  		}
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
575
576
577
578
579
580
  		free_pages((unsigned long)iommu->ir_table->base,
  			   INTR_REMAP_PAGE_ORDER);
  		kfree(iommu->ir_table->bitmap);
  		kfree(iommu->ir_table);
  		iommu->ir_table = NULL;
  	}
2ae210106   Suresh Siddha   x64, x2apic/intr-...
581
  }
eba67e5da   Suresh Siddha   x86, dmar: routin...
582
583
584
  /*
   * Disable Interrupt Remapping.
   */
95a02e976   Suresh Siddha   iommu: rename int...
585
  static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
eba67e5da   Suresh Siddha   x86, dmar: routin...
586
587
588
589
590
591
  {
  	unsigned long flags;
  	u32 sts;
  
  	if (!ecap_ir_support(iommu->ecap))
  		return;
b24696bc5   Fenghua Yu   Intel IOMMU Suspe...
592
593
594
595
596
  	/*
  	 * global invalidation of interrupt entry cache before disabling
  	 * interrupt-remapping.
  	 */
  	qi_global_iec(iommu);
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
597
  	raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5da   Suresh Siddha   x86, dmar: routin...
598

fda3bec12   CQ Tang   iommu/vt-d: Fix 6...
599
  	sts = readl(iommu->reg + DMAR_GSTS_REG);
eba67e5da   Suresh Siddha   x86, dmar: routin...
600
601
602
603
604
605
606
607
608
609
  	if (!(sts & DMA_GSTS_IRES))
  		goto end;
  
  	iommu->gcmd &= ~DMA_GCMD_IRE;
  	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  
  	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
  		      readl, !(sts & DMA_GSTS_IRES), sts);
  
  end:
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
610
  	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5da   Suresh Siddha   x86, dmar: routin...
611
  }
41750d31f   Suresh Siddha   x86, x2apic: Enab...
612
613
614
615
616
617
618
619
  static int __init dmar_x2apic_optout(void)
  {
  	struct acpi_table_dmar *dmar;
  	dmar = (struct acpi_table_dmar *)dmar_tbl;
  	if (!dmar || no_x2apic_optout)
  		return 0;
  	return dmar->flags & DMAR_X2APIC_OPT_OUT;
  }
111903024   Thomas Gleixner   iommu/vt-d: Move ...
620
621
622
623
624
625
626
627
628
629
630
631
632
  static void __init intel_cleanup_irq_remapping(void)
  {
  	struct dmar_drhd_unit *drhd;
  	struct intel_iommu *iommu;
  
  	for_each_iommu(iommu, drhd) {
  		if (ecap_ir_support(iommu->ecap)) {
  			iommu_disable_irq_remapping(iommu);
  			intel_teardown_irq_remapping(iommu);
  		}
  	}
  
  	if (x2apic_supported())
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
633
634
  		pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.
  ");
111903024   Thomas Gleixner   iommu/vt-d: Move ...
635
636
637
  }
  
  static int __init intel_prepare_irq_remapping(void)
2ae210106   Suresh Siddha   x64, x2apic/intr-...
638
639
  {
  	struct dmar_drhd_unit *drhd;
7c9197791   Jiang Liu   iommu/vt-d, trivi...
640
  	struct intel_iommu *iommu;
23256d0b3   Joerg Roedel   iommu/vt-d: Move ...
641
  	int eim = 0;
2ae210106   Suresh Siddha   x64, x2apic/intr-...
642

2966d9566   Jiang Liu   iommu/vt-d: Prepa...
643
  	if (irq_remap_broken) {
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
644
645
  		pr_warn("This system BIOS has enabled interrupt remapping
  "
2966d9566   Jiang Liu   iommu/vt-d: Prepa...
646
647
648
649
650
651
652
653
654
  			"on a chipset that contains an erratum making that
  "
  			"feature unstable.  To maintain system stability
  "
  			"interrupt remapping is being disabled.  Please
  "
  			"contact your BIOS vendor for an update
  ");
  		add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
2966d9566   Jiang Liu   iommu/vt-d: Prepa...
655
656
  		return -ENODEV;
  	}
111903024   Thomas Gleixner   iommu/vt-d: Move ...
657
  	if (dmar_table_init() < 0)
2966d9566   Jiang Liu   iommu/vt-d: Prepa...
658
659
660
661
  		return -ENODEV;
  
  	if (!dmar_ir_support())
  		return -ENODEV;
af8d102f9   Andy Lutomirski   x86/intel/irq_rem...
662

b61e5e80e   Joerg Roedel   iommu/vt-d: Fix r...
663
  	if (parse_ioapics_under_ir()) {
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
664
665
  		pr_info("Not enabling interrupt remapping
  ");
af8d102f9   Andy Lutomirski   x86/intel/irq_rem...
666
  		goto error;
e936d0773   Youquan Song   intel-iommu: Disa...
667
  	}
69cf1d8a1   Joerg Roedel   iommu/vt-d: Alloc...
668
  	/* First make sure all IOMMUs support IRQ remapping */
2966d9566   Jiang Liu   iommu/vt-d: Prepa...
669
  	for_each_iommu(iommu, drhd)
69cf1d8a1   Joerg Roedel   iommu/vt-d: Alloc...
670
671
  		if (!ecap_ir_support(iommu->ecap))
  			goto error;
23256d0b3   Joerg Roedel   iommu/vt-d: Move ...
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
  	/* Detect remapping mode: lapic or x2apic */
  	if (x2apic_supported()) {
  		eim = !dmar_x2apic_optout();
  		if (!eim) {
  			pr_info("x2apic is disabled because BIOS sets x2apic opt out bit.");
  			pr_info("Use 'intremap=no_x2apic_optout' to override the BIOS setting.
  ");
  		}
  	}
  
  	for_each_iommu(iommu, drhd) {
  		if (eim && !ecap_eim_support(iommu->ecap)) {
  			pr_info("%s does not support EIM
  ", iommu->name);
  			eim = 0;
  		}
  	}
  
  	eim_mode = eim;
  	if (eim)
  		pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.
  ");
9e4e49dfd   Joerg Roedel   iommu/vt-d: Move ...
694
695
696
697
698
699
  	/* Do the initializations early */
  	for_each_iommu(iommu, drhd) {
  		if (intel_setup_irq_remapping(iommu)) {
  			pr_err("Failed to setup irq remapping for %s
  ",
  			       iommu->name);
111903024   Thomas Gleixner   iommu/vt-d: Move ...
700
  			goto error;
9e4e49dfd   Joerg Roedel   iommu/vt-d: Move ...
701
702
  		}
  	}
69cf1d8a1   Joerg Roedel   iommu/vt-d: Alloc...
703

111903024   Thomas Gleixner   iommu/vt-d: Move ...
704
  	return 0;
2966d9566   Jiang Liu   iommu/vt-d: Prepa...
705

111903024   Thomas Gleixner   iommu/vt-d: Move ...
706
707
  error:
  	intel_cleanup_irq_remapping();
2966d9566   Jiang Liu   iommu/vt-d: Prepa...
708
  	return -ENODEV;
111903024   Thomas Gleixner   iommu/vt-d: Move ...
709
  }
3d9b98f4e   Feng Wu   iommu, x86: Setup...
710
711
712
713
714
715
716
717
718
  /*
   * Set Posted-Interrupts capability.
   */
  static inline void set_irq_posting_cap(void)
  {
  	struct dmar_drhd_unit *drhd;
  	struct intel_iommu *iommu;
  
  	if (!disable_irq_post) {
344cb4e0b   Feng Wu   iommu/vt-d: Use c...
719
720
721
722
723
724
725
726
  		/*
  		 * If IRTE is in posted format, the 'pda' field goes across the
  		 * 64-bit boundary, we need use cmpxchg16b to atomically update
  		 * it. We only expose posted-interrupt when X86_FEATURE_CX16
  		 * is supported. Actually, hardware platforms supporting PI
  		 * should have X86_FEATURE_CX16 support, this has been confirmed
  		 * with Intel hardware guys.
  		 */
362f924b6   Borislav Petkov   x86/cpufeature: R...
727
  		if (boot_cpu_has(X86_FEATURE_CX16))
344cb4e0b   Feng Wu   iommu/vt-d: Use c...
728
  			intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
3d9b98f4e   Feng Wu   iommu, x86: Setup...
729
730
731
732
733
734
735
736
737
  
  		for_each_iommu(iommu, drhd)
  			if (!cap_pi_support(iommu->cap)) {
  				intel_irq_remap_ops.capability &=
  						~(1 << IRQ_POSTING_CAP);
  				break;
  			}
  	}
  }
111903024   Thomas Gleixner   iommu/vt-d: Move ...
738
739
740
741
  static int __init intel_enable_irq_remapping(void)
  {
  	struct dmar_drhd_unit *drhd;
  	struct intel_iommu *iommu;
2f119c789   Quentin Lambert   iommu/vt-d: Conve...
742
  	bool setup = false;
2ae210106   Suresh Siddha   x64, x2apic/intr-...
743
744
745
746
  
  	/*
  	 * Setup Interrupt-remapping for all the DRHD's now.
  	 */
7c9197791   Jiang Liu   iommu/vt-d, trivi...
747
  	for_each_iommu(iommu, drhd) {
571dbbd4d   Joerg Roedel   iommu/vt-d: Don't...
748
749
  		if (!ir_pre_enabled(iommu))
  			iommu_enable_irq_remapping(iommu);
2f119c789   Quentin Lambert   iommu/vt-d: Conve...
750
  		setup = true;
2ae210106   Suresh Siddha   x64, x2apic/intr-...
751
752
753
754
  	}
  
  	if (!setup)
  		goto error;
95a02e976   Suresh Siddha   iommu: rename int...
755
  	irq_remapping_enabled = 1;
afcc8a40a   Joerg Roedel   x86, io_apic: Int...
756

3d9b98f4e   Feng Wu   iommu, x86: Setup...
757
  	set_irq_posting_cap();
23256d0b3   Joerg Roedel   iommu/vt-d: Move ...
758
759
  	pr_info("Enabled IRQ remapping in %s mode
  ", eim_mode ? "x2apic" : "xapic");
2ae210106   Suresh Siddha   x64, x2apic/intr-...
760

23256d0b3   Joerg Roedel   iommu/vt-d: Move ...
761
  	return eim_mode ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
2ae210106   Suresh Siddha   x64, x2apic/intr-...
762
763
  
  error:
111903024   Thomas Gleixner   iommu/vt-d: Move ...
764
  	intel_cleanup_irq_remapping();
2ae210106   Suresh Siddha   x64, x2apic/intr-...
765
766
  	return -1;
  }
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
767

a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
768
769
770
  static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
  				   struct intel_iommu *iommu,
  				   struct acpi_dmar_hardware_unit *drhd)
20f3097bf   Suresh Siddha   intr-remap: gener...
771
772
773
  {
  	struct acpi_dmar_pci_path *path;
  	u8 bus;
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
774
  	int count, free = -1;
20f3097bf   Suresh Siddha   intr-remap: gener...
775
776
777
778
779
780
781
782
783
784
785
  
  	bus = scope->bus;
  	path = (struct acpi_dmar_pci_path *)(scope + 1);
  	count = (scope->length - sizeof(struct acpi_dmar_device_scope))
  		/ sizeof(struct acpi_dmar_pci_path);
  
  	while (--count > 0) {
  		/*
  		 * Access PCI directly due to the PCI
  		 * subsystem isn't initialized yet.
  		 */
fa5f508f9   Lv Zheng   ACPICA: Update DM...
786
  		bus = read_pci_config_byte(bus, path->device, path->function,
20f3097bf   Suresh Siddha   intr-remap: gener...
787
788
789
  					   PCI_SECONDARY_BUS);
  		path++;
  	}
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
  
  	for (count = 0; count < MAX_HPET_TBS; count++) {
  		if (ir_hpet[count].iommu == iommu &&
  		    ir_hpet[count].id == scope->enumeration_id)
  			return 0;
  		else if (ir_hpet[count].iommu == NULL && free == -1)
  			free = count;
  	}
  	if (free == -1) {
  		pr_warn("Exceeded Max HPET blocks
  ");
  		return -ENOSPC;
  	}
  
  	ir_hpet[free].iommu = iommu;
  	ir_hpet[free].id    = scope->enumeration_id;
  	ir_hpet[free].bus   = bus;
  	ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
  	pr_info("HPET id %d under DRHD base 0x%Lx
  ",
  		scope->enumeration_id, drhd->address);
  
  	return 0;
20f3097bf   Suresh Siddha   intr-remap: gener...
813
  }
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
814
815
816
  static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
  				     struct intel_iommu *iommu,
  				     struct acpi_dmar_hardware_unit *drhd)
f007e99c8   Weidong Han   Intel-IOMMU, intr...
817
818
819
  {
  	struct acpi_dmar_pci_path *path;
  	u8 bus;
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
820
  	int count, free = -1;
f007e99c8   Weidong Han   Intel-IOMMU, intr...
821
822
823
824
825
826
827
828
829
830
831
  
  	bus = scope->bus;
  	path = (struct acpi_dmar_pci_path *)(scope + 1);
  	count = (scope->length - sizeof(struct acpi_dmar_device_scope))
  		/ sizeof(struct acpi_dmar_pci_path);
  
  	while (--count > 0) {
  		/*
  		 * Access PCI directly due to the PCI
  		 * subsystem isn't initialized yet.
  		 */
fa5f508f9   Lv Zheng   ACPICA: Update DM...
832
  		bus = read_pci_config_byte(bus, path->device, path->function,
f007e99c8   Weidong Han   Intel-IOMMU, intr...
833
834
835
  					   PCI_SECONDARY_BUS);
  		path++;
  	}
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
  	for (count = 0; count < MAX_IO_APICS; count++) {
  		if (ir_ioapic[count].iommu == iommu &&
  		    ir_ioapic[count].id == scope->enumeration_id)
  			return 0;
  		else if (ir_ioapic[count].iommu == NULL && free == -1)
  			free = count;
  	}
  	if (free == -1) {
  		pr_warn("Exceeded Max IO APICS
  ");
  		return -ENOSPC;
  	}
  
  	ir_ioapic[free].bus   = bus;
  	ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
  	ir_ioapic[free].iommu = iommu;
  	ir_ioapic[free].id    = scope->enumeration_id;
  	pr_info("IOAPIC id %d under DRHD base  0x%Lx IOMMU %d
  ",
  		scope->enumeration_id, drhd->address, iommu->seq_id);
  
  	return 0;
f007e99c8   Weidong Han   Intel-IOMMU, intr...
858
  }
20f3097bf   Suresh Siddha   intr-remap: gener...
859
860
  static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
  				      struct intel_iommu *iommu)
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
861
  {
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
862
  	int ret = 0;
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
863
864
865
866
867
  	struct acpi_dmar_hardware_unit *drhd;
  	struct acpi_dmar_device_scope *scope;
  	void *start, *end;
  
  	drhd = (struct acpi_dmar_hardware_unit *)header;
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
868
869
  	start = (void *)(drhd + 1);
  	end = ((void *)drhd) + header->length;
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
870
  	while (start < end && ret == 0) {
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
871
  		scope = start;
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
872
873
874
875
876
877
  		if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
  			ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
  		else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
  			ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
  		start += scope->length;
  	}
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
878

a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
879
880
  	return ret;
  }
20f3097bf   Suresh Siddha   intr-remap: gener...
881

a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
882
883
884
  static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
  {
  	int i;
20f3097bf   Suresh Siddha   intr-remap: gener...
885

a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
886
887
888
  	for (i = 0; i < MAX_HPET_TBS; i++)
  		if (ir_hpet[i].iommu == iommu)
  			ir_hpet[i].iommu = NULL;
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
889

a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
890
891
892
  	for (i = 0; i < MAX_IO_APICS; i++)
  		if (ir_ioapic[i].iommu == iommu)
  			ir_ioapic[i].iommu = NULL;
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
893
894
895
896
897
898
  }
  
  /*
   * Finds the assocaition between IOAPIC's and its Interrupt-remapping
   * hardware unit.
   */
694835dc2   Jiang Liu   iommu/vt-d: mark ...
899
  static int __init parse_ioapics_under_ir(void)
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
900
901
  {
  	struct dmar_drhd_unit *drhd;
7c9197791   Jiang Liu   iommu/vt-d, trivi...
902
  	struct intel_iommu *iommu;
2f119c789   Quentin Lambert   iommu/vt-d: Conve...
903
  	bool ir_supported = false;
32ab31e01   Seth Forshee   irq_remap: disabl...
904
  	int ioapic_idx;
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
905

66ef950d1   Joerg Roedel   iommu/vt-d: Propa...
906
907
  	for_each_iommu(iommu, drhd) {
  		int ret;
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
908

66ef950d1   Joerg Roedel   iommu/vt-d: Propa...
909
910
911
912
913
914
915
916
917
  		if (!ecap_ir_support(iommu->ecap))
  			continue;
  
  		ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu);
  		if (ret)
  			return ret;
  
  		ir_supported = true;
  	}
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
918

32ab31e01   Seth Forshee   irq_remap: disabl...
919
  	if (!ir_supported)
a13c8f27e   Baoquan He   iommu/vt-d: Adjus...
920
  		return -ENODEV;
32ab31e01   Seth Forshee   irq_remap: disabl...
921
922
923
924
925
926
927
928
929
930
  
  	for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
  		int ioapic_id = mpc_ioapic_id(ioapic_idx);
  		if (!map_ioapic_to_ir(ioapic_id)) {
  			pr_err(FW_BUG "ioapic %d has no mapping iommu, "
  			       "interrupt remapping will be disabled
  ",
  			       ioapic_id);
  			return -1;
  		}
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
931
  	}
a13c8f27e   Baoquan He   iommu/vt-d: Adjus...
932
  	return 0;
ad3ad3f6a   Suresh Siddha   x64, x2apic/intr-...
933
  }
b24696bc5   Fenghua Yu   Intel IOMMU Suspe...
934

6a7885c49   Rashika Kheria   iommu/vt-d: Mark ...
935
  static int __init ir_dev_scope_init(void)
c2c7286ac   Suresh Siddha   intr_remap: Call ...
936
  {
3a5670e8a   Jiang Liu   iommu/vt-d: Intro...
937
  	int ret;
95a02e976   Suresh Siddha   iommu: rename int...
938
  	if (!irq_remapping_enabled)
c2c7286ac   Suresh Siddha   intr_remap: Call ...
939
  		return 0;
3a5670e8a   Jiang Liu   iommu/vt-d: Intro...
940
941
942
943
944
  	down_write(&dmar_global_lock);
  	ret = dmar_dev_scope_init();
  	up_write(&dmar_global_lock);
  
  	return ret;
c2c7286ac   Suresh Siddha   intr_remap: Call ...
945
946
  }
  rootfs_initcall(ir_dev_scope_init);
95a02e976   Suresh Siddha   iommu: rename int...
947
  static void disable_irq_remapping(void)
b24696bc5   Fenghua Yu   Intel IOMMU Suspe...
948
949
950
951
952
953
954
955
956
957
  {
  	struct dmar_drhd_unit *drhd;
  	struct intel_iommu *iommu = NULL;
  
  	/*
  	 * Disable Interrupt-remapping for all the DRHD's now.
  	 */
  	for_each_iommu(iommu, drhd) {
  		if (!ecap_ir_support(iommu->ecap))
  			continue;
95a02e976   Suresh Siddha   iommu: rename int...
958
  		iommu_disable_irq_remapping(iommu);
b24696bc5   Fenghua Yu   Intel IOMMU Suspe...
959
  	}
3d9b98f4e   Feng Wu   iommu, x86: Setup...
960
961
962
963
964
965
  
  	/*
  	 * Clear Posted-Interrupts capability.
  	 */
  	if (!disable_irq_post)
  		intel_irq_remap_ops.capability &= ~(1 << IRQ_POSTING_CAP);
b24696bc5   Fenghua Yu   Intel IOMMU Suspe...
966
  }
95a02e976   Suresh Siddha   iommu: rename int...
967
  static int reenable_irq_remapping(int eim)
b24696bc5   Fenghua Yu   Intel IOMMU Suspe...
968
969
  {
  	struct dmar_drhd_unit *drhd;
2f119c789   Quentin Lambert   iommu/vt-d: Conve...
970
  	bool setup = false;
b24696bc5   Fenghua Yu   Intel IOMMU Suspe...
971
972
973
974
975
976
977
978
979
980
981
982
983
984
  	struct intel_iommu *iommu = NULL;
  
  	for_each_iommu(iommu, drhd)
  		if (iommu->qi)
  			dmar_reenable_qi(iommu);
  
  	/*
  	 * Setup Interrupt-remapping for all the DRHD's now.
  	 */
  	for_each_iommu(iommu, drhd) {
  		if (!ecap_ir_support(iommu->ecap))
  			continue;
  
  		/* Set up interrupt remapping for iommu.*/
95a02e976   Suresh Siddha   iommu: rename int...
985
  		iommu_set_irq_remapping(iommu, eim);
d4d1c0f3d   Joerg Roedel   iommu/vt-d: Set I...
986
  		iommu_enable_irq_remapping(iommu);
2f119c789   Quentin Lambert   iommu/vt-d: Conve...
987
  		setup = true;
b24696bc5   Fenghua Yu   Intel IOMMU Suspe...
988
989
990
991
  	}
  
  	if (!setup)
  		goto error;
3d9b98f4e   Feng Wu   iommu, x86: Setup...
992
  	set_irq_posting_cap();
b24696bc5   Fenghua Yu   Intel IOMMU Suspe...
993
994
995
996
997
998
999
1000
  	return 0;
  
  error:
  	/*
  	 * handle error condition gracefully here!
  	 */
  	return -1;
  }
3c6e56750   Jiang Liu   irq_remapping/vt-...
1001
  static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
0c3f173a8   Joerg Roedel   iommu/vt-d: Conve...
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
  {
  	memset(irte, 0, sizeof(*irte));
  
  	irte->present = 1;
  	irte->dst_mode = apic->irq_dest_mode;
  	/*
  	 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
  	 * actual level or edge trigger will be setup in the IO-APIC
  	 * RTE. This will help simplify level triggered irq migration.
  	 * For more details, see the comments (in io_apic.c) explainig IO-APIC
  	 * irq migration in the presence of interrupt-remapping.
  	*/
  	irte->trigger_mode = 0;
  	irte->dlvry_mode = apic->irq_delivery_mode;
  	irte->vector = vector;
  	irte->dest_id = IRTE_DEST(dest);
  	irte->redir_hint = 1;
  }
b106ee63a   Jiang Liu   irq_remapping/vt-...
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
  static struct irq_domain *intel_get_ir_irq_domain(struct irq_alloc_info *info)
  {
  	struct intel_iommu *iommu = NULL;
  
  	if (!info)
  		return NULL;
  
  	switch (info->type) {
  	case X86_IRQ_ALLOC_TYPE_IOAPIC:
  		iommu = map_ioapic_to_ir(info->ioapic_id);
  		break;
  	case X86_IRQ_ALLOC_TYPE_HPET:
  		iommu = map_hpet_to_ir(info->hpet_id);
  		break;
  	case X86_IRQ_ALLOC_TYPE_MSI:
  	case X86_IRQ_ALLOC_TYPE_MSIX:
  		iommu = map_dev_to_ir(info->msi_dev);
  		break;
  	default:
  		BUG_ON(1);
  		break;
  	}
  
  	return iommu ? iommu->ir_domain : NULL;
  }
  
  static struct irq_domain *intel_get_irq_domain(struct irq_alloc_info *info)
  {
  	struct intel_iommu *iommu;
  
  	if (!info)
  		return NULL;
  
  	switch (info->type) {
  	case X86_IRQ_ALLOC_TYPE_MSI:
  	case X86_IRQ_ALLOC_TYPE_MSIX:
  		iommu = map_dev_to_ir(info->msi_dev);
  		if (iommu)
  			return iommu->ir_msi_domain;
  		break;
  	default:
  		break;
  	}
  
  	return NULL;
  }
736baef44   Joerg Roedel   iommu/vt-d: Make ...
1066
  struct irq_remap_ops intel_irq_remap_ops = {
111903024   Thomas Gleixner   iommu/vt-d: Move ...
1067
  	.prepare		= intel_prepare_irq_remapping,
95a02e976   Suresh Siddha   iommu: rename int...
1068
1069
1070
  	.enable			= intel_enable_irq_remapping,
  	.disable		= disable_irq_remapping,
  	.reenable		= reenable_irq_remapping,
4f3d8b67a   Joerg Roedel   iommu/vt-d: Conve...
1071
  	.enable_faulting	= enable_drhd_fault_handling,
b106ee63a   Jiang Liu   irq_remapping/vt-...
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
  	.get_ir_irq_domain	= intel_get_ir_irq_domain,
  	.get_irq_domain		= intel_get_irq_domain,
  };
  
  /*
   * Migrate the IO-APIC irq in the presence of intr-remapping.
   *
   * For both level and edge triggered, irq migration is a simple atomic
   * update(of vector and cpu destination) of IRTE and flush the hardware cache.
   *
   * For level triggered, we eliminate the io-apic RTE modification (with the
   * updated vector information), by using a virtual vector (io-apic pin number).
   * Real vector that is used for interrupting cpu will be coming from
   * the interrupt-remapping table entry.
   *
   * As the migration is a simple atomic update of IRTE, the same mechanism
   * is used to migrate MSI irq's in the presence of interrupt-remapping.
   */
  static int
  intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
  		      bool force)
  {
  	struct intel_ir_data *ir_data = data->chip_data;
  	struct irte *irte = &ir_data->irte_entry;
  	struct irq_cfg *cfg = irqd_cfg(data);
  	struct irq_data *parent = data->parent_data;
  	int ret;
  
  	ret = parent->chip->irq_set_affinity(parent, mask, force);
  	if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
  		return ret;
  
  	/*
  	 * Atomically updates the IRTE with the new destination, vector
  	 * and flushes the interrupt entry cache.
  	 */
  	irte->vector = cfg->vector;
  	irte->dest_id = IRTE_DEST(cfg->dest_apicid);
d75f152fc   Feng Wu   iommu, x86: Avoid...
1110
1111
1112
1113
  
  	/* Update the hardware only if the interrupt is in remapped mode. */
  	if (ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
  		modify_irte(&ir_data->irq_2_iommu, irte);
b106ee63a   Jiang Liu   irq_remapping/vt-...
1114
1115
1116
1117
1118
1119
  
  	/*
  	 * After this point, all the interrupts will start arriving
  	 * at the new destination. So, time to cleanup the previous
  	 * vector allocation.
  	 */
c6c2002b7   Jiang Liu   x86/irq: Move che...
1120
  	send_cleanup_vector(cfg);
b106ee63a   Jiang Liu   irq_remapping/vt-...
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
  
  	return IRQ_SET_MASK_OK_DONE;
  }
  
  static void intel_ir_compose_msi_msg(struct irq_data *irq_data,
  				     struct msi_msg *msg)
  {
  	struct intel_ir_data *ir_data = irq_data->chip_data;
  
  	*msg = ir_data->msi_entry;
  }
8541186fa   Feng Wu   iommu, x86: Imple...
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
  static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
  {
  	struct intel_ir_data *ir_data = data->chip_data;
  	struct vcpu_data *vcpu_pi_info = info;
  
  	/* stop posting interrupts, back to remapping mode */
  	if (!vcpu_pi_info) {
  		modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
  	} else {
  		struct irte irte_pi;
  
  		/*
  		 * We are not caching the posted interrupt entry. We
  		 * copy the data from the remapped entry and modify
  		 * the fields which are relevant for posted mode. The
  		 * cached remapped entry is used for switching back to
  		 * remapped mode.
  		 */
  		memset(&irte_pi, 0, sizeof(irte_pi));
  		dmar_copy_shared_irte(&irte_pi, &ir_data->irte_entry);
  
  		/* Update the posted mode fields */
  		irte_pi.p_pst = 1;
  		irte_pi.p_urgent = 0;
  		irte_pi.p_vector = vcpu_pi_info->vector;
  		irte_pi.pda_l = (vcpu_pi_info->pi_desc_addr >>
  				(32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT);
  		irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) &
  				~(-1UL << PDA_HIGH_BIT);
  
  		modify_irte(&ir_data->irq_2_iommu, &irte_pi);
  	}
  
  	return 0;
  }
b106ee63a   Jiang Liu   irq_remapping/vt-...
1167
1168
1169
1170
  static struct irq_chip intel_ir_chip = {
  	.irq_ack = ir_ack_apic_edge,
  	.irq_set_affinity = intel_ir_set_affinity,
  	.irq_compose_msi_msg = intel_ir_compose_msi_msg,
8541186fa   Feng Wu   iommu, x86: Imple...
1171
  	.irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
b106ee63a   Jiang Liu   irq_remapping/vt-...
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
  };
  
  static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
  					     struct irq_cfg *irq_cfg,
  					     struct irq_alloc_info *info,
  					     int index, int sub_handle)
  {
  	struct IR_IO_APIC_route_entry *entry;
  	struct irte *irte = &data->irte_entry;
  	struct msi_msg *msg = &data->msi_entry;
  
  	prepare_irte(irte, irq_cfg->vector, irq_cfg->dest_apicid);
  	switch (info->type) {
  	case X86_IRQ_ALLOC_TYPE_IOAPIC:
  		/* Set source-id of interrupt request */
  		set_ioapic_sid(irte, info->ioapic_id);
  		apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)
  ",
  			info->ioapic_id, irte->present, irte->fpd,
  			irte->dst_mode, irte->redir_hint,
  			irte->trigger_mode, irte->dlvry_mode,
  			irte->avail, irte->vector, irte->dest_id,
  			irte->sid, irte->sq, irte->svt);
  
  		entry = (struct IR_IO_APIC_route_entry *)info->ioapic_entry;
  		info->ioapic_entry = NULL;
  		memset(entry, 0, sizeof(*entry));
  		entry->index2	= (index >> 15) & 0x1;
  		entry->zero	= 0;
  		entry->format	= 1;
  		entry->index	= (index & 0x7fff);
  		/*
  		 * IO-APIC RTE will be configured with virtual vector.
  		 * irq handler will do the explicit EOI to the io-apic.
  		 */
  		entry->vector	= info->ioapic_pin;
  		entry->mask	= 0;			/* enable IRQ */
  		entry->trigger	= info->ioapic_trigger;
  		entry->polarity	= info->ioapic_polarity;
  		if (info->ioapic_trigger)
  			entry->mask = 1; /* Mask level triggered irqs. */
  		break;
  
  	case X86_IRQ_ALLOC_TYPE_HPET:
  	case X86_IRQ_ALLOC_TYPE_MSI:
  	case X86_IRQ_ALLOC_TYPE_MSIX:
  		if (info->type == X86_IRQ_ALLOC_TYPE_HPET)
  			set_hpet_sid(irte, info->hpet_id);
  		else
  			set_msi_sid(irte, info->msi_dev);
  
  		msg->address_hi = MSI_ADDR_BASE_HI;
  		msg->data = sub_handle;
  		msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
  				  MSI_ADDR_IR_SHV |
  				  MSI_ADDR_IR_INDEX1(index) |
  				  MSI_ADDR_IR_INDEX2(index);
  		break;
  
  	default:
  		BUG_ON(1);
  		break;
  	}
  }
  
  static void intel_free_irq_resources(struct irq_domain *domain,
  				     unsigned int virq, unsigned int nr_irqs)
  {
  	struct irq_data *irq_data;
  	struct intel_ir_data *data;
  	struct irq_2_iommu *irq_iommu;
  	unsigned long flags;
  	int i;
b106ee63a   Jiang Liu   irq_remapping/vt-...
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
  	for (i = 0; i < nr_irqs; i++) {
  		irq_data = irq_domain_get_irq_data(domain, virq  + i);
  		if (irq_data && irq_data->chip_data) {
  			data = irq_data->chip_data;
  			irq_iommu = &data->irq_2_iommu;
  			raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
  			clear_entries(irq_iommu);
  			raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
  			irq_domain_reset_irq_data(irq_data);
  			kfree(data);
  		}
  	}
  }
  
  static int intel_irq_remapping_alloc(struct irq_domain *domain,
  				     unsigned int virq, unsigned int nr_irqs,
  				     void *arg)
  {
  	struct intel_iommu *iommu = domain->host_data;
  	struct irq_alloc_info *info = arg;
9d4c0313f   Thomas Gleixner   irq_remapping/vt-...
1265
  	struct intel_ir_data *data, *ird;
b106ee63a   Jiang Liu   irq_remapping/vt-...
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
  	struct irq_data *irq_data;
  	struct irq_cfg *irq_cfg;
  	int i, ret, index;
  
  	if (!info || !iommu)
  		return -EINVAL;
  	if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
  	    info->type != X86_IRQ_ALLOC_TYPE_MSIX)
  		return -EINVAL;
  
  	/*
  	 * With IRQ remapping enabled, don't need contiguous CPU vectors
  	 * to support multiple MSI interrupts.
  	 */
  	if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
  		info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
  
  	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
  	if (ret < 0)
  		return ret;
  
  	ret = -ENOMEM;
  	data = kzalloc(sizeof(*data), GFP_KERNEL);
  	if (!data)
  		goto out_free_parent;
  
  	down_read(&dmar_global_lock);
  	index = alloc_irte(iommu, virq, &data->irq_2_iommu, nr_irqs);
  	up_read(&dmar_global_lock);
  	if (index < 0) {
  		pr_warn("Failed to allocate IRTE
  ");
  		kfree(data);
  		goto out_free_parent;
  	}
  
  	for (i = 0; i < nr_irqs; i++) {
  		irq_data = irq_domain_get_irq_data(domain, virq + i);
  		irq_cfg = irqd_cfg(irq_data);
  		if (!irq_data || !irq_cfg) {
  			ret = -EINVAL;
  			goto out_free_data;
  		}
  
  		if (i > 0) {
9d4c0313f   Thomas Gleixner   irq_remapping/vt-...
1311
1312
  			ird = kzalloc(sizeof(*ird), GFP_KERNEL);
  			if (!ird)
b106ee63a   Jiang Liu   irq_remapping/vt-...
1313
  				goto out_free_data;
9d4c0313f   Thomas Gleixner   irq_remapping/vt-...
1314
1315
1316
1317
1318
  			/* Initialize the common data */
  			ird->irq_2_iommu = data->irq_2_iommu;
  			ird->irq_2_iommu.sub_handle = i;
  		} else {
  			ird = data;
b106ee63a   Jiang Liu   irq_remapping/vt-...
1319
  		}
9d4c0313f   Thomas Gleixner   irq_remapping/vt-...
1320

b106ee63a   Jiang Liu   irq_remapping/vt-...
1321
  		irq_data->hwirq = (index << 16) + i;
9d4c0313f   Thomas Gleixner   irq_remapping/vt-...
1322
  		irq_data->chip_data = ird;
b106ee63a   Jiang Liu   irq_remapping/vt-...
1323
  		irq_data->chip = &intel_ir_chip;
9d4c0313f   Thomas Gleixner   irq_remapping/vt-...
1324
  		intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
b106ee63a   Jiang Liu   irq_remapping/vt-...
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
  		irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
  	}
  	return 0;
  
  out_free_data:
  	intel_free_irq_resources(domain, virq, i);
  out_free_parent:
  	irq_domain_free_irqs_common(domain, virq, nr_irqs);
  	return ret;
  }
  
  static void intel_irq_remapping_free(struct irq_domain *domain,
  				     unsigned int virq, unsigned int nr_irqs)
  {
  	intel_free_irq_resources(domain, virq, nr_irqs);
  	irq_domain_free_irqs_common(domain, virq, nr_irqs);
  }
  
  static void intel_irq_remapping_activate(struct irq_domain *domain,
  					 struct irq_data *irq_data)
  {
  	struct intel_ir_data *data = irq_data->chip_data;
  
  	modify_irte(&data->irq_2_iommu, &data->irte_entry);
  }
  
  static void intel_irq_remapping_deactivate(struct irq_domain *domain,
  					   struct irq_data *irq_data)
  {
  	struct intel_ir_data *data = irq_data->chip_data;
  	struct irte entry;
  
  	memset(&entry, 0, sizeof(entry));
  	modify_irte(&data->irq_2_iommu, &entry);
  }
  
  static struct irq_domain_ops intel_ir_domain_ops = {
  	.alloc = intel_irq_remapping_alloc,
  	.free = intel_irq_remapping_free,
  	.activate = intel_irq_remapping_activate,
  	.deactivate = intel_irq_remapping_deactivate,
736baef44   Joerg Roedel   iommu/vt-d: Make ...
1366
  };
6b1972493   Jiang Liu   iommu/vt-d: Imple...
1367

a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
  /*
   * Support of Interrupt Remapping Unit Hotplug
   */
  static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
  {
  	int ret;
  	int eim = x2apic_enabled();
  
  	if (eim && !ecap_eim_support(iommu->ecap)) {
  		pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx
  ",
  			iommu->reg_phys, iommu->ecap);
  		return -ENODEV;
  	}
  
  	if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
  		pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET
  ",
  			iommu->reg_phys);
  		return -ENODEV;
  	}
  
  	/* TODO: check all IOAPICs are covered by IOMMU */
  
  	/* Setup Interrupt-remapping now. */
  	ret = intel_setup_irq_remapping(iommu);
  	if (ret) {
9e4e49dfd   Joerg Roedel   iommu/vt-d: Move ...
1395
1396
1397
  		pr_err("Failed to setup irq remapping for %s
  ",
  		       iommu->name);
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
1398
1399
  		intel_teardown_irq_remapping(iommu);
  		ir_remove_ioapic_hpet_scope(iommu);
9e4e49dfd   Joerg Roedel   iommu/vt-d: Move ...
1400
  	} else {
d4d1c0f3d   Joerg Roedel   iommu/vt-d: Set I...
1401
  		iommu_enable_irq_remapping(iommu);
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
1402
1403
1404
1405
  	}
  
  	return ret;
  }
6b1972493   Jiang Liu   iommu/vt-d: Imple...
1406
1407
  int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
  {
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
1408
1409
1410
1411
1412
1413
1414
1415
1416
  	int ret = 0;
  	struct intel_iommu *iommu = dmaru->iommu;
  
  	if (!irq_remapping_enabled)
  		return 0;
  	if (iommu == NULL)
  		return -EINVAL;
  	if (!ecap_ir_support(iommu->ecap))
  		return 0;
c1d993341   Feng Wu   iommu, x86: Prope...
1417
1418
1419
  	if (irq_remapping_cap(IRQ_POSTING_CAP) &&
  	    !cap_pi_support(iommu->cap))
  		return -EBUSY;
a7a3dad94   Jiang Liu   iommu/vt-d: Enhan...
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
  
  	if (insert) {
  		if (!iommu->ir_table)
  			ret = dmar_ir_add(dmaru, iommu);
  	} else {
  		if (iommu->ir_table) {
  			if (!bitmap_empty(iommu->ir_table->bitmap,
  					  INTR_REMAP_TABLE_ENTRIES)) {
  				ret = -EBUSY;
  			} else {
  				iommu_disable_irq_remapping(iommu);
  				intel_teardown_irq_remapping(iommu);
  				ir_remove_ioapic_hpet_scope(iommu);
  			}
  		}
  	}
  
  	return ret;
6b1972493   Jiang Liu   iommu/vt-d: Imple...
1438
  }