Blame view

drivers/iommu/dmar.c 54.8 KB
3b20eb237   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-only
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
2
3
4
  /*
   * Copyright (c) 2006, Intel Corporation.
   *
98bcef56c   mark gross   copyright owner a...
5
6
7
8
   * Copyright (C) 2006-2008 Intel Corporation
   * Author: Ashok Raj <ashok.raj@intel.com>
   * Author: Shaohua Li <shaohua.li@intel.com>
   * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
9
   *
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
10
   * This file implements early detection/parsing of Remapping Devices
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
11
12
   * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
   * tables.
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
13
14
   *
   * These routines are used by both DMA-remapping and Interrupt-remapping
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
15
   */
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
16
  #define pr_fmt(fmt)     "DMAR: " fmt
e9071b0be   Donald Dutile   iommu/dmar: Use p...
17

10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
18
19
  #include <linux/pci.h>
  #include <linux/dmar.h>
387179464   Kay, Allen M   VT-d: Changes to ...
20
21
  #include <linux/iova.h>
  #include <linux/intel-iommu.h>
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
22
  #include <linux/timer.h>
0ac2491f5   Suresh Siddha   x86, dmar: move p...
23
24
  #include <linux/irq.h>
  #include <linux/interrupt.h>
69575d388   Shane Wang   x86, intel_txt: c...
25
  #include <linux/tboot.h>
eb27cae8a   Len Brown   ACPI: linux/acpi....
26
  #include <linux/dmi.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
27
  #include <linux/slab.h>
a5459cfec   Alex Williamson   iommu/vt-d: Make ...
28
  #include <linux/iommu.h>
98fa15f34   Anshuman Khandual   mm: replace all o...
29
  #include <linux/numa.h>
03d524d70   Daniel Drake   iommu/vt-d: Ignor...
30
  #include <linux/limits.h>
8a8f422d3   Suresh Siddha   iommu: rename int...
31
  #include <asm/irq_remapping.h>
4db77ff32   Konrad Rzeszutek Wilk   x86, VT-d: Make I...
32
  #include <asm/iommu_table.h>
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
33

078e1ee26   Joerg Roedel   x86, irq: Move ir...
34
  #include "irq_remapping.h"
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
35
36
37
38
39
40
41
  typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
  struct dmar_res_callback {
  	dmar_res_handler_t	cb[ACPI_DMAR_TYPE_RESERVED];
  	void			*arg[ACPI_DMAR_TYPE_RESERVED];
  	bool			ignore_unhandled;
  	bool			print_entry;
  };
3a5670e8a   Jiang Liu   iommu/vt-d: Intro...
42
43
44
45
46
47
48
49
50
51
52
  /*
   * Assumptions:
   * 1) The hotplug framework guarentees that DMAR unit will be hot-added
   *    before IO devices managed by that unit.
   * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
   *    after IO devices managed by that unit.
   * 3) Hotplug events are rare.
   *
   * Locking rules for DMA and interrupt remapping related global data structures:
   * 1) Use dmar_global_lock in process context
   * 2) Use RCU in interrupt context
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
53
   */
3a5670e8a   Jiang Liu   iommu/vt-d: Intro...
54
  DECLARE_RWSEM(dmar_global_lock);
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
55
  LIST_HEAD(dmar_drhd_units);
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
56

41750d31f   Suresh Siddha   x86, x2apic: Enab...
57
  struct acpi_table_header * __initdata dmar_tbl;
2e4552893   Jiang Liu   iommu/vt-d: Unify...
58
  static int dmar_dev_scope_status = 1;
78d8e7046   Jiang Liu   iommu/vt-d: Dynam...
59
  static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
60

694835dc2   Jiang Liu   iommu/vt-d: mark ...
61
  static int alloc_iommu(struct dmar_drhd_unit *drhd);
a868e6b7b   Jiang Liu   iommu/vt-d: keep ...
62
  static void free_iommu(struct intel_iommu *iommu);
694835dc2   Jiang Liu   iommu/vt-d: mark ...
63

b0119e870   Joerg Roedel   iommu: Introduce ...
64
  extern const struct iommu_ops intel_iommu_ops;
6b1972493   Jiang Liu   iommu/vt-d: Imple...
65
  static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
66
67
68
69
70
71
  {
  	/*
  	 * add INCLUDE_ALL at the tail, so scan the list will find it at
  	 * the very end.
  	 */
  	if (drhd->include_all)
0e242612d   Jiang Liu   iommu/vt-d: Use R...
72
  		list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
73
  	else
0e242612d   Jiang Liu   iommu/vt-d: Use R...
74
  		list_add_rcu(&drhd->list, &dmar_drhd_units);
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
75
  }
bb3a6b784   Jiang Liu   iommu/vt-d: Facto...
76
  void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
77
78
  {
  	struct acpi_dmar_device_scope *scope;
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
79
80
81
82
  
  	*cnt = 0;
  	while (start < end) {
  		scope = start;
83118b0de   Bob Moore   ACPICA: Tables: U...
83
  		if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
07cb52ff6   David Woodhouse   iommu/vt-d: Alloc...
84
  		    scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
85
86
  		    scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
  			(*cnt)++;
ae3e7f3ab   Linn Crosetto   iommu/vt-d: Remov...
87
88
  		else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
  			scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
e9071b0be   Donald Dutile   iommu/dmar: Use p...
89
90
  			pr_warn("Unsupported device scope
  ");
5715f0f9d   Yinghai Lu   intel-iommu: Don'...
91
  		}
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
92
93
94
  		start += scope->length;
  	}
  	if (*cnt == 0)
bb3a6b784   Jiang Liu   iommu/vt-d: Facto...
95
  		return NULL;
832bd8586   David Woodhouse   iommu/vt-d: Chang...
96
  	return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
bb3a6b784   Jiang Liu   iommu/vt-d: Facto...
97
  }
832bd8586   David Woodhouse   iommu/vt-d: Chang...
98
  void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
ada4d4b2a   Jiang Liu   iommu/vt-d: fix P...
99
  {
b683b230a   Jiang Liu   iommu/vt-d: Intro...
100
  	int i;
832bd8586   David Woodhouse   iommu/vt-d: Chang...
101
  	struct device *tmp_dev;
b683b230a   Jiang Liu   iommu/vt-d: Intro...
102

ada4d4b2a   Jiang Liu   iommu/vt-d: fix P...
103
  	if (*devices && *cnt) {
b683b230a   Jiang Liu   iommu/vt-d: Intro...
104
  		for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
832bd8586   David Woodhouse   iommu/vt-d: Chang...
105
  			put_device(tmp_dev);
ada4d4b2a   Jiang Liu   iommu/vt-d: fix P...
106
  		kfree(*devices);
ada4d4b2a   Jiang Liu   iommu/vt-d: fix P...
107
  	}
0e242612d   Jiang Liu   iommu/vt-d: Use R...
108
109
110
  
  	*devices = NULL;
  	*cnt = 0;
ada4d4b2a   Jiang Liu   iommu/vt-d: fix P...
111
  }
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
112
113
114
115
116
117
118
119
120
121
122
123
  /* Optimize out kzalloc()/kfree() for normal cases */
  static char dmar_pci_notify_info_buf[64];
  
  static struct dmar_pci_notify_info *
  dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
  {
  	int level = 0;
  	size_t size;
  	struct pci_dev *tmp;
  	struct dmar_pci_notify_info *info;
  
  	BUG_ON(dev->is_virtfn);
03d524d70   Daniel Drake   iommu/vt-d: Ignor...
124
125
126
127
128
129
  	/*
  	 * Ignore devices that have a domain number higher than what can
  	 * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
  	 */
  	if (pci_domain_nr(dev->bus) > U16_MAX)
  		return NULL;
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
130
131
132
133
  	/* Only generate path[] for device addition event */
  	if (event == BUS_NOTIFY_ADD_DEVICE)
  		for (tmp = dev; tmp; tmp = tmp->bus->self)
  			level++;
553d66cb1   Gustavo A. R. Silva   iommu/vt-d: Use s...
134
  	size = struct_size(info, path, level);
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
135
136
137
138
139
140
141
142
  	if (size <= sizeof(dmar_pci_notify_info_buf)) {
  		info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
  	} else {
  		info = kzalloc(size, GFP_KERNEL);
  		if (!info) {
  			pr_warn("Out of memory when allocating notify_info "
  				"for %s.
  ", pci_name(dev));
2e4552893   Jiang Liu   iommu/vt-d: Unify...
143
144
  			if (dmar_dev_scope_status == 0)
  				dmar_dev_scope_status = -ENOMEM;
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
145
146
147
148
149
150
151
152
153
  			return NULL;
  		}
  	}
  
  	info->event = event;
  	info->dev = dev;
  	info->seg = pci_domain_nr(dev->bus);
  	info->level = level;
  	if (event == BUS_NOTIFY_ADD_DEVICE) {
5ae0566a0   Jiang Liu   iommu/vt-d: fix b...
154
155
  		for (tmp = dev; tmp; tmp = tmp->bus->self) {
  			level--;
57384592c   Joerg Roedel   iommu/vt-d: Store...
156
  			info->path[level].bus = tmp->bus->number;
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
  			info->path[level].device = PCI_SLOT(tmp->devfn);
  			info->path[level].function = PCI_FUNC(tmp->devfn);
  			if (pci_is_root_bus(tmp->bus))
  				info->bus = tmp->bus->number;
  		}
  	}
  
  	return info;
  }
  
  static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
  {
  	if ((void *)info != dmar_pci_notify_info_buf)
  		kfree(info);
  }
  
  static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
  				struct acpi_dmar_pci_path *path, int count)
  {
  	int i;
  
  	if (info->bus != bus)
80f7b3d1b   Joerg Roedel   iommu/vt-d: Work ...
179
  		goto fallback;
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
180
  	if (info->level != count)
80f7b3d1b   Joerg Roedel   iommu/vt-d: Work ...
181
  		goto fallback;
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
182
183
184
185
  
  	for (i = 0; i < count; i++) {
  		if (path[i].device != info->path[i].device ||
  		    path[i].function != info->path[i].function)
80f7b3d1b   Joerg Roedel   iommu/vt-d: Work ...
186
  			goto fallback;
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
187
188
189
  	}
  
  	return true;
80f7b3d1b   Joerg Roedel   iommu/vt-d: Work ...
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
  
  fallback:
  
  	if (count != 1)
  		return false;
  
  	i = info->level - 1;
  	if (bus              == info->path[i].bus &&
  	    path[0].device   == info->path[i].device &&
  	    path[0].function == info->path[i].function) {
  		pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround
  ",
  			bus, path[0].device, path[0].function);
  		return true;
  	}
  
  	return false;
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
207
208
209
210
211
  }
  
  /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
  int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
  			  void *start, void*end, u16 segment,
832bd8586   David Woodhouse   iommu/vt-d: Chang...
212
213
  			  struct dmar_dev_scope *devices,
  			  int devices_cnt)
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
214
215
  {
  	int i, level;
832bd8586   David Woodhouse   iommu/vt-d: Chang...
216
  	struct device *tmp, *dev = &info->dev->dev;
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
  	struct acpi_dmar_device_scope *scope;
  	struct acpi_dmar_pci_path *path;
  
  	if (segment != info->seg)
  		return 0;
  
  	for (; start < end; start += scope->length) {
  		scope = start;
  		if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
  		    scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
  			continue;
  
  		path = (struct acpi_dmar_pci_path *)(scope + 1);
  		level = (scope->length - sizeof(*scope)) / sizeof(*path);
  		if (!dmar_match_pci_path(info, scope->bus, path, level))
  			continue;
ffb2d1eb8   Roland Dreier   iommu/vt-d: Don't...
233
234
235
236
237
238
239
240
241
242
243
244
245
246
  		/*
  		 * We expect devices with endpoint scope to have normal PCI
  		 * headers, and devices with bridge scope to have bridge PCI
  		 * headers.  However PCI NTB devices may be listed in the
  		 * DMAR table with bridge scope, even though they have a
  		 * normal PCI header.  NTB devices are identified by class
  		 * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch
  		 * for this special case.
  		 */
  		if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
  		     info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
  		    (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
  		     (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
  		      info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
247
248
  			pr_warn("Device scope type does not match for %s
  ",
832bd8586   David Woodhouse   iommu/vt-d: Chang...
249
  				pci_name(info->dev));
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
250
251
252
253
254
  			return -EINVAL;
  		}
  
  		for_each_dev_scope(devices, devices_cnt, i, tmp)
  			if (tmp == NULL) {
832bd8586   David Woodhouse   iommu/vt-d: Chang...
255
256
257
258
  				devices[i].bus = info->dev->bus->number;
  				devices[i].devfn = info->dev->devfn;
  				rcu_assign_pointer(devices[i].dev,
  						   get_device(dev));
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
259
260
261
262
263
264
265
266
267
  				return 1;
  			}
  		BUG_ON(i >= devices_cnt);
  	}
  
  	return 0;
  }
  
  int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
832bd8586   David Woodhouse   iommu/vt-d: Chang...
268
  			  struct dmar_dev_scope *devices, int count)
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
269
270
  {
  	int index;
832bd8586   David Woodhouse   iommu/vt-d: Chang...
271
  	struct device *tmp;
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
272
273
274
275
276
  
  	if (info->seg != segment)
  		return 0;
  
  	for_each_active_dev_scope(devices, count, index, tmp)
832bd8586   David Woodhouse   iommu/vt-d: Chang...
277
  		if (tmp == &info->dev->dev) {
eecbad7d0   Andreea-Cristina Bernat   iommu: Replace rc...
278
  			RCU_INIT_POINTER(devices[index].dev, NULL);
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
279
  			synchronize_rcu();
832bd8586   David Woodhouse   iommu/vt-d: Chang...
280
  			put_device(tmp);
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
  			return 1;
  		}
  
  	return 0;
  }
  
  static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
  {
  	int ret = 0;
  	struct dmar_drhd_unit *dmaru;
  	struct acpi_dmar_hardware_unit *drhd;
  
  	for_each_drhd_unit(dmaru) {
  		if (dmaru->include_all)
  			continue;
  
  		drhd = container_of(dmaru->hdr,
  				    struct acpi_dmar_hardware_unit, header);
  		ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
  				((void *)drhd) + drhd->header.length,
  				dmaru->segment,
  				dmaru->devices, dmaru->devices_cnt);
f9808079a   Andy Shevchenko   iommu/dmar: Remov...
303
  		if (ret)
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
304
305
306
307
  			break;
  	}
  	if (ret >= 0)
  		ret = dmar_iommu_notify_scope_dev(info);
2e4552893   Jiang Liu   iommu/vt-d: Unify...
308
309
  	if (ret < 0 && dmar_dev_scope_status == 0)
  		dmar_dev_scope_status = ret;
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
  
  	return ret;
  }
  
  static void  dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
  {
  	struct dmar_drhd_unit *dmaru;
  
  	for_each_drhd_unit(dmaru)
  		if (dmar_remove_dev_scope(info, dmaru->segment,
  			dmaru->devices, dmaru->devices_cnt))
  			break;
  	dmar_iommu_notify_scope_dev(info);
  }
  
  static int dmar_pci_bus_notifier(struct notifier_block *nb,
  				 unsigned long action, void *data)
  {
  	struct pci_dev *pdev = to_pci_dev(data);
  	struct dmar_pci_notify_info *info;
1c387188c   Ashok Raj   iommu/vt-d: Fix I...
330
331
332
  	/* Only care about add/remove events for physical functions.
  	 * For VFs we actually do the lookup based on the corresponding
  	 * PF in device_to_iommu() anyway. */
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
333
334
  	if (pdev->is_virtfn)
  		return NOTIFY_DONE;
e6a8c9b33   Joerg Roedel   iommu/vt-d: Use B...
335
336
  	if (action != BUS_NOTIFY_ADD_DEVICE &&
  	    action != BUS_NOTIFY_REMOVED_DEVICE)
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
337
338
339
340
341
342
343
344
345
  		return NOTIFY_DONE;
  
  	info = dmar_alloc_pci_notify_info(pdev, action);
  	if (!info)
  		return NOTIFY_DONE;
  
  	down_write(&dmar_global_lock);
  	if (action == BUS_NOTIFY_ADD_DEVICE)
  		dmar_pci_bus_add_dev(info);
e6a8c9b33   Joerg Roedel   iommu/vt-d: Use B...
346
  	else if (action == BUS_NOTIFY_REMOVED_DEVICE)
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
347
348
349
350
351
352
353
354
355
356
357
358
  		dmar_pci_bus_del_dev(info);
  	up_write(&dmar_global_lock);
  
  	dmar_free_pci_notify_info(info);
  
  	return NOTIFY_OK;
  }
  
  static struct notifier_block dmar_pci_bus_nb = {
  	.notifier_call = dmar_pci_bus_notifier,
  	.priority = INT_MIN,
  };
6b1972493   Jiang Liu   iommu/vt-d: Imple...
359
360
361
362
  static struct dmar_drhd_unit *
  dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
  {
  	struct dmar_drhd_unit *dmaru;
f8de95a23   Qian Cai   iommu/vt-d: Silen...
363
364
  	list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
  				dmar_rcu_check())
6b1972493   Jiang Liu   iommu/vt-d: Imple...
365
366
367
368
369
370
  		if (dmaru->segment == drhd->segment &&
  		    dmaru->reg_base_addr == drhd->address)
  			return dmaru;
  
  	return NULL;
  }
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
371
372
373
374
375
  /**
   * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
   * structure which uniquely represent one DMA remapping hardware unit
   * present in the platform
   */
6b1972493   Jiang Liu   iommu/vt-d: Imple...
376
  static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
377
378
379
  {
  	struct acpi_dmar_hardware_unit *drhd;
  	struct dmar_drhd_unit *dmaru;
3f6db6591   Andy Shevchenko   iommu/dmar: Remov...
380
  	int ret;
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
381

e523b38e2   David Woodhouse   intel-iommu: Avoi...
382
  	drhd = (struct acpi_dmar_hardware_unit *)header;
6b1972493   Jiang Liu   iommu/vt-d: Imple...
383
384
385
386
387
  	dmaru = dmar_find_dmaru(drhd);
  	if (dmaru)
  		goto out;
  
  	dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
388
389
  	if (!dmaru)
  		return -ENOMEM;
6b1972493   Jiang Liu   iommu/vt-d: Imple...
390
391
392
393
394
395
  	/*
  	 * If header is allocated from slab by ACPI _DSM method, we need to
  	 * copy the content because the memory buffer will be freed on return.
  	 */
  	dmaru->hdr = (void *)(dmaru + 1);
  	memcpy(dmaru->hdr, header, header->length);
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
396
  	dmaru->reg_base_addr = drhd->address;
276dbf997   David Woodhouse   intel-iommu: Hand...
397
  	dmaru->segment = drhd->segment;
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
398
  	dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
07cb52ff6   David Woodhouse   iommu/vt-d: Alloc...
399
400
401
402
403
404
  	dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
  					      ((void *)drhd) + drhd->header.length,
  					      &dmaru->devices_cnt);
  	if (dmaru->devices_cnt && dmaru->devices == NULL) {
  		kfree(dmaru);
  		return -ENOMEM;
2e4552893   Jiang Liu   iommu/vt-d: Unify...
405
  	}
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
406

1886e8a90   Suresh Siddha   x64, x2apic/intr-...
407
408
  	ret = alloc_iommu(dmaru);
  	if (ret) {
07cb52ff6   David Woodhouse   iommu/vt-d: Alloc...
409
410
  		dmar_free_dev_scope(&dmaru->devices,
  				    &dmaru->devices_cnt);
1886e8a90   Suresh Siddha   x64, x2apic/intr-...
411
412
413
414
  		kfree(dmaru);
  		return ret;
  	}
  	dmar_register_drhd_unit(dmaru);
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
415

6b1972493   Jiang Liu   iommu/vt-d: Imple...
416
  out:
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
417
418
  	if (arg)
  		(*(int *)arg)++;
1886e8a90   Suresh Siddha   x64, x2apic/intr-...
419
420
  	return 0;
  }
a868e6b7b   Jiang Liu   iommu/vt-d: keep ...
421
422
423
424
425
426
427
428
  static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
  {
  	if (dmaru->devices && dmaru->devices_cnt)
  		dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
  	if (dmaru->iommu)
  		free_iommu(dmaru->iommu);
  	kfree(dmaru);
  }
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
429
430
  static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
  				      void *arg)
e625b4a95   David Woodhouse   iommu/vt-d: Parse...
431
432
433
434
  {
  	struct acpi_dmar_andd *andd = (void *)header;
  
  	/* Check for NUL termination within the designated length */
83118b0de   Bob Moore   ACPICA: Tables: U...
435
  	if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
798c1441b   Hans de Goede   iommu/vt-d: dmar:...
436
  		pr_warn(FW_BUG
e625b4a95   David Woodhouse   iommu/vt-d: Parse...
437
438
439
440
441
442
443
  			   "Your BIOS is broken; ANDD object name is not NUL-terminated
  "
  			   "BIOS vendor: %s; Ver: %s; Product Version: %s
  ",
  			   dmi_get_system_info(DMI_BIOS_VENDOR),
  			   dmi_get_system_info(DMI_BIOS_VERSION),
  			   dmi_get_system_info(DMI_PRODUCT_VERSION));
798c1441b   Hans de Goede   iommu/vt-d: dmar:...
444
  		add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
e625b4a95   David Woodhouse   iommu/vt-d: Parse...
445
446
447
448
  		return -EINVAL;
  	}
  	pr_info("ANDD device: %x name: %s
  ", andd->device_number,
83118b0de   Bob Moore   ACPICA: Tables: U...
449
  		andd->device_name);
e625b4a95   David Woodhouse   iommu/vt-d: Parse...
450
451
452
  
  	return 0;
  }
aa697079e   David Woodhouse   dmar: Fix build f...
453
  #ifdef CONFIG_ACPI_NUMA
6b1972493   Jiang Liu   iommu/vt-d: Imple...
454
  static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
ee34b32d8   Suresh Siddha   dmar: support for...
455
456
457
458
459
  {
  	struct acpi_dmar_rhsa *rhsa;
  	struct dmar_drhd_unit *drhd;
  
  	rhsa = (struct acpi_dmar_rhsa *)header;
aa697079e   David Woodhouse   dmar: Fix build f...
460
  	for_each_drhd_unit(drhd) {
ee34b32d8   Suresh Siddha   dmar: support for...
461
462
463
464
  		if (drhd->reg_base_addr == rhsa->base_address) {
  			int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
  
  			if (!node_online(node))
98fa15f34   Anshuman Khandual   mm: replace all o...
465
  				node = NUMA_NO_NODE;
ee34b32d8   Suresh Siddha   dmar: support for...
466
  			drhd->iommu->node = node;
aa697079e   David Woodhouse   dmar: Fix build f...
467
468
  			return 0;
  		}
ee34b32d8   Suresh Siddha   dmar: support for...
469
  	}
798c1441b   Hans de Goede   iommu/vt-d: dmar:...
470
  	pr_warn(FW_BUG
fd0c88948   Ben Hutchings   intel-iommu: Set ...
471
472
473
474
  		"Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx
  "
  		"BIOS vendor: %s; Ver: %s; Product Version: %s
  ",
9493a6361   Zhenzhong Duan   iommu/vt-d: Fix t...
475
  		rhsa->base_address,
fd0c88948   Ben Hutchings   intel-iommu: Set ...
476
477
478
  		dmi_get_system_info(DMI_BIOS_VENDOR),
  		dmi_get_system_info(DMI_BIOS_VERSION),
  		dmi_get_system_info(DMI_PRODUCT_VERSION));
798c1441b   Hans de Goede   iommu/vt-d: dmar:...
479
  	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
ee34b32d8   Suresh Siddha   dmar: support for...
480

aa697079e   David Woodhouse   dmar: Fix build f...
481
  	return 0;
ee34b32d8   Suresh Siddha   dmar: support for...
482
  }
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
483
484
  #else
  #define	dmar_parse_one_rhsa		dmar_res_noop
aa697079e   David Woodhouse   dmar: Fix build f...
485
  #endif
ee34b32d8   Suresh Siddha   dmar: support for...
486

3bd71e18c   Arnd Bergmann   iommu/vt-d: Fix h...
487
  static void
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
488
489
490
491
  dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
  {
  	struct acpi_dmar_hardware_unit *drhd;
  	struct acpi_dmar_reserved_memory *rmrr;
aa5d2b515   Yu Zhao   VT-d: parse ATSR ...
492
  	struct acpi_dmar_atsr *atsr;
17b609775   Roland Dreier   intel-iommu: Deco...
493
  	struct acpi_dmar_rhsa *rhsa;
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
494
495
496
  
  	switch (header->type) {
  	case ACPI_DMAR_TYPE_HARDWARE_UNIT:
aa5d2b515   Yu Zhao   VT-d: parse ATSR ...
497
498
  		drhd = container_of(header, struct acpi_dmar_hardware_unit,
  				    header);
e9071b0be   Donald Dutile   iommu/dmar: Use p...
499
500
  		pr_info("DRHD base: %#016Lx flags: %#x
  ",
aa5d2b515   Yu Zhao   VT-d: parse ATSR ...
501
  			(unsigned long long)drhd->address, drhd->flags);
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
502
503
  		break;
  	case ACPI_DMAR_TYPE_RESERVED_MEMORY:
aa5d2b515   Yu Zhao   VT-d: parse ATSR ...
504
505
  		rmrr = container_of(header, struct acpi_dmar_reserved_memory,
  				    header);
e9071b0be   Donald Dutile   iommu/dmar: Use p...
506
507
  		pr_info("RMRR base: %#016Lx end: %#016Lx
  ",
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
508
509
  			(unsigned long long)rmrr->base_address,
  			(unsigned long long)rmrr->end_address);
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
510
  		break;
83118b0de   Bob Moore   ACPICA: Tables: U...
511
  	case ACPI_DMAR_TYPE_ROOT_ATS:
aa5d2b515   Yu Zhao   VT-d: parse ATSR ...
512
  		atsr = container_of(header, struct acpi_dmar_atsr, header);
e9071b0be   Donald Dutile   iommu/dmar: Use p...
513
514
  		pr_info("ATSR flags: %#x
  ", atsr->flags);
aa5d2b515   Yu Zhao   VT-d: parse ATSR ...
515
  		break;
83118b0de   Bob Moore   ACPICA: Tables: U...
516
  	case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
17b609775   Roland Dreier   intel-iommu: Deco...
517
  		rhsa = container_of(header, struct acpi_dmar_rhsa, header);
e9071b0be   Donald Dutile   iommu/dmar: Use p...
518
519
  		pr_info("RHSA base: %#016Lx proximity domain: %#x
  ",
17b609775   Roland Dreier   intel-iommu: Deco...
520
521
522
  		       (unsigned long long)rhsa->base_address,
  		       rhsa->proximity_domain);
  		break;
83118b0de   Bob Moore   ACPICA: Tables: U...
523
  	case ACPI_DMAR_TYPE_NAMESPACE:
e625b4a95   David Woodhouse   iommu/vt-d: Parse...
524
525
526
  		/* We don't print this here because we need to sanity-check
  		   it first. So print it in dmar_parse_one_andd() instead. */
  		break;
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
527
528
  	}
  }
f6dd5c310   Yinghai Lu   dmar: fix using e...
529
530
531
532
533
534
535
536
  /**
   * dmar_table_detect - checks to see if the platform supports DMAR devices
   */
  static int __init dmar_table_detect(void)
  {
  	acpi_status status = AE_OK;
  
  	/* if we could find DMAR table, then there are DMAR devices */
6b11d1d67   Lv Zheng   ACPI / osl: Remov...
537
  	status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
f6dd5c310   Yinghai Lu   dmar: fix using e...
538
539
  
  	if (ACPI_SUCCESS(status) && !dmar_tbl) {
e9071b0be   Donald Dutile   iommu/dmar: Use p...
540
541
  		pr_warn("Unable to map DMAR
  ");
f6dd5c310   Yinghai Lu   dmar: fix using e...
542
543
  		status = AE_NOT_FOUND;
  	}
8326c5d20   Andy Shevchenko   iommu/dmar: Recti...
544
  	return ACPI_SUCCESS(status) ? 0 : -ENOENT;
f6dd5c310   Yinghai Lu   dmar: fix using e...
545
  }
aaa9d1dd6   Suresh Siddha   x64, x2apic/intr-...
546

c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
547
548
549
  static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
  				       size_t len, struct dmar_res_callback *cb)
  {
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
550
551
  	struct acpi_dmar_header *iter, *next;
  	struct acpi_dmar_header *end = ((void *)start) + len;
4a8ed2b81   Andy Shevchenko   iommu/dmar: Retur...
552
  	for (iter = start; iter < end; iter = next) {
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
553
554
555
556
557
558
559
560
  		next = (void *)iter + iter->length;
  		if (iter->length == 0) {
  			/* Avoid looping forever on bad ACPI tables */
  			pr_debug(FW_BUG "Invalid 0-length structure
  ");
  			break;
  		} else if (next > end) {
  			/* Avoid passing table end */
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
561
562
  			pr_warn(FW_BUG "Record passes table end
  ");
4a8ed2b81   Andy Shevchenko   iommu/dmar: Retur...
563
  			return -EINVAL;
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
564
565
566
567
568
569
570
571
572
573
574
  		}
  
  		if (cb->print_entry)
  			dmar_table_print_dmar_entry(iter);
  
  		if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
  			/* continue for forward compatibility */
  			pr_debug("Unknown DMAR structure type %d
  ",
  				 iter->type);
  		} else if (cb->cb[iter->type]) {
4a8ed2b81   Andy Shevchenko   iommu/dmar: Retur...
575
  			int ret;
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
576
  			ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
4a8ed2b81   Andy Shevchenko   iommu/dmar: Retur...
577
578
  			if (ret)
  				return ret;
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
579
580
581
582
  		} else if (!cb->ignore_unhandled) {
  			pr_warn("No handler for DMAR structure type %d
  ",
  				iter->type);
4a8ed2b81   Andy Shevchenko   iommu/dmar: Retur...
583
  			return -EINVAL;
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
584
585
  		}
  	}
4a8ed2b81   Andy Shevchenko   iommu/dmar: Retur...
586
  	return 0;
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
587
588
589
590
591
592
593
594
  }
  
  static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
  				       struct dmar_res_callback *cb)
  {
  	return dmar_walk_remapping_entries((void *)(dmar + 1),
  			dmar->header.length - sizeof(*dmar), cb);
  }
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
595
596
597
598
599
600
601
  /**
   * parse_dmar_table - parses the DMA reporting table
   */
  static int __init
  parse_dmar_table(void)
  {
  	struct acpi_table_dmar *dmar;
7cef33471   Li, Zhen-Hua   iommu/vt-d: DMAR ...
602
  	int drhd_count = 0;
3f6db6591   Andy Shevchenko   iommu/dmar: Remov...
603
  	int ret;
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
604
605
606
607
608
609
610
611
612
613
  	struct dmar_res_callback cb = {
  		.print_entry = true,
  		.ignore_unhandled = true,
  		.arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
  		.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
  		.cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
  		.cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
  		.cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
  		.cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
  	};
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
614

f6dd5c310   Yinghai Lu   dmar: fix using e...
615
616
617
618
619
  	/*
  	 * Do it again, earlier dmar_tbl mapping could be mapped with
  	 * fixed map.
  	 */
  	dmar_table_detect();
a59b50e99   Joseph Cihula   intel_txt: Force ...
620
621
622
623
624
  	/*
  	 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
  	 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
  	 */
  	dmar_tbl = tboot_get_dmar_table(dmar_tbl);
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
625
626
627
  	dmar = (struct acpi_table_dmar *)dmar_tbl;
  	if (!dmar)
  		return -ENODEV;
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
628
  	if (dmar->width < PAGE_SHIFT - 1) {
e9071b0be   Donald Dutile   iommu/dmar: Use p...
629
630
  		pr_warn("Invalid DMAR haw
  ");
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
631
632
  		return -EINVAL;
  	}
e9071b0be   Donald Dutile   iommu/dmar: Use p...
633
634
  	pr_info("Host address width %d
  ", dmar->width + 1);
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
635
636
  	ret = dmar_walk_dmar_table(dmar, &cb);
  	if (ret == 0 && drhd_count == 0)
7cef33471   Li, Zhen-Hua   iommu/vt-d: DMAR ...
637
638
  		pr_warn(FW_BUG "No DRHD structure found in DMAR table
  ");
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
639

10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
640
641
  	return ret;
  }
832bd8586   David Woodhouse   iommu/vt-d: Chang...
642
643
  static int dmar_pci_device_match(struct dmar_dev_scope devices[],
  				 int cnt, struct pci_dev *dev)
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
644
645
  {
  	int index;
832bd8586   David Woodhouse   iommu/vt-d: Chang...
646
  	struct device *tmp;
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
647
648
  
  	while (dev) {
b683b230a   Jiang Liu   iommu/vt-d: Intro...
649
  		for_each_active_dev_scope(devices, cnt, index, tmp)
832bd8586   David Woodhouse   iommu/vt-d: Chang...
650
  			if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
651
652
653
654
655
656
657
658
659
660
661
662
  				return 1;
  
  		/* Check our parent */
  		dev = dev->bus->self;
  	}
  
  	return 0;
  }
  
  struct dmar_drhd_unit *
  dmar_find_matched_drhd_unit(struct pci_dev *dev)
  {
0e242612d   Jiang Liu   iommu/vt-d: Use R...
663
  	struct dmar_drhd_unit *dmaru;
2e824f792   Yu Zhao   VT-d: fix segment...
664
  	struct acpi_dmar_hardware_unit *drhd;
dda565492   Yinghai   intel-iommu: use ...
665
  	dev = pci_physfn(dev);
0e242612d   Jiang Liu   iommu/vt-d: Use R...
666
  	rcu_read_lock();
8b161f0ee   Yijing Wang   iommu/vt-d: Use f...
667
  	for_each_drhd_unit(dmaru) {
2e824f792   Yu Zhao   VT-d: fix segment...
668
669
670
671
672
673
  		drhd = container_of(dmaru->hdr,
  				    struct acpi_dmar_hardware_unit,
  				    header);
  
  		if (dmaru->include_all &&
  		    drhd->segment == pci_domain_nr(dev->bus))
0e242612d   Jiang Liu   iommu/vt-d: Use R...
674
  			goto out;
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
675

2e824f792   Yu Zhao   VT-d: fix segment...
676
677
  		if (dmar_pci_device_match(dmaru->devices,
  					  dmaru->devices_cnt, dev))
0e242612d   Jiang Liu   iommu/vt-d: Use R...
678
  			goto out;
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
679
  	}
0e242612d   Jiang Liu   iommu/vt-d: Use R...
680
681
682
  	dmaru = NULL;
  out:
  	rcu_read_unlock();
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
683

0e242612d   Jiang Liu   iommu/vt-d: Use R...
684
  	return dmaru;
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
685
  }
ed40356b5   David Woodhouse   iommu/vt-d: Add A...
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
  static void __init dmar_acpi_insert_dev_scope(u8 device_number,
  					      struct acpi_device *adev)
  {
  	struct dmar_drhd_unit *dmaru;
  	struct acpi_dmar_hardware_unit *drhd;
  	struct acpi_dmar_device_scope *scope;
  	struct device *tmp;
  	int i;
  	struct acpi_dmar_pci_path *path;
  
  	for_each_drhd_unit(dmaru) {
  		drhd = container_of(dmaru->hdr,
  				    struct acpi_dmar_hardware_unit,
  				    header);
  
  		for (scope = (void *)(drhd + 1);
  		     (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
  		     scope = ((void *)scope) + scope->length) {
83118b0de   Bob Moore   ACPICA: Tables: U...
704
  			if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
ed40356b5   David Woodhouse   iommu/vt-d: Add A...
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
  				continue;
  			if (scope->enumeration_id != device_number)
  				continue;
  
  			path = (void *)(scope + 1);
  			pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d
  ",
  				dev_name(&adev->dev), dmaru->reg_base_addr,
  				scope->bus, path->device, path->function);
  			for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
  				if (tmp == NULL) {
  					dmaru->devices[i].bus = scope->bus;
  					dmaru->devices[i].devfn = PCI_DEVFN(path->device,
  									    path->function);
  					rcu_assign_pointer(dmaru->devices[i].dev,
  							   get_device(&adev->dev));
  					return;
  				}
  			BUG_ON(i >= dmaru->devices_cnt);
  		}
  	}
  	pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)
  ",
  		device_number, dev_name(&adev->dev));
  }
  
  static int __init dmar_acpi_dev_scope_init(void)
  {
11f1a7768   Joerg Roedel   iommu/vt-d: Check...
733
734
735
736
  	struct acpi_dmar_andd *andd;
  
  	if (dmar_tbl == NULL)
  		return -ENODEV;
7713ec066   David Woodhouse   iommu/vt-d: Fix e...
737
738
739
  	for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
  	     ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
  	     andd = ((void *)andd) + andd->header.length) {
83118b0de   Bob Moore   ACPICA: Tables: U...
740
  		if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
ed40356b5   David Woodhouse   iommu/vt-d: Add A...
741
742
743
744
  			acpi_handle h;
  			struct acpi_device *adev;
  
  			if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
83118b0de   Bob Moore   ACPICA: Tables: U...
745
  							  andd->device_name,
ed40356b5   David Woodhouse   iommu/vt-d: Add A...
746
747
748
  							  &h))) {
  				pr_err("Failed to find handle for ACPI object %s
  ",
83118b0de   Bob Moore   ACPICA: Tables: U...
749
  				       andd->device_name);
ed40356b5   David Woodhouse   iommu/vt-d: Add A...
750
751
  				continue;
  			}
c0df975f9   Joerg Roedel   iommu/vt-d: Check...
752
  			if (acpi_bus_get_device(h, &adev)) {
ed40356b5   David Woodhouse   iommu/vt-d: Add A...
753
754
  				pr_err("Failed to get device for ACPI object %s
  ",
83118b0de   Bob Moore   ACPICA: Tables: U...
755
  				       andd->device_name);
ed40356b5   David Woodhouse   iommu/vt-d: Add A...
756
757
758
759
  				continue;
  			}
  			dmar_acpi_insert_dev_scope(andd->device_number, adev);
  		}
ed40356b5   David Woodhouse   iommu/vt-d: Add A...
760
761
762
  	}
  	return 0;
  }
1886e8a90   Suresh Siddha   x64, x2apic/intr-...
763
764
  int __init dmar_dev_scope_init(void)
  {
2e4552893   Jiang Liu   iommu/vt-d: Unify...
765
766
  	struct pci_dev *dev = NULL;
  	struct dmar_pci_notify_info *info;
1886e8a90   Suresh Siddha   x64, x2apic/intr-...
767

2e4552893   Jiang Liu   iommu/vt-d: Unify...
768
769
  	if (dmar_dev_scope_status != 1)
  		return dmar_dev_scope_status;
c2c7286ac   Suresh Siddha   intr_remap: Call ...
770

2e4552893   Jiang Liu   iommu/vt-d: Unify...
771
772
773
774
  	if (list_empty(&dmar_drhd_units)) {
  		dmar_dev_scope_status = -ENODEV;
  	} else {
  		dmar_dev_scope_status = 0;
63b426247   David Woodhouse   iommu/vt-d: Only ...
775
  		dmar_acpi_dev_scope_init();
2e4552893   Jiang Liu   iommu/vt-d: Unify...
776
777
778
779
780
781
782
783
784
785
786
787
788
  		for_each_pci_dev(dev) {
  			if (dev->is_virtfn)
  				continue;
  
  			info = dmar_alloc_pci_notify_info(dev,
  					BUS_NOTIFY_ADD_DEVICE);
  			if (!info) {
  				return dmar_dev_scope_status;
  			} else {
  				dmar_pci_bus_add_dev(info);
  				dmar_free_pci_notify_info(info);
  			}
  		}
1886e8a90   Suresh Siddha   x64, x2apic/intr-...
789
  	}
2e4552893   Jiang Liu   iommu/vt-d: Unify...
790
  	return dmar_dev_scope_status;
1886e8a90   Suresh Siddha   x64, x2apic/intr-...
791
  }
d15a339ea   Dmitry Safonov   iommu/vt-d: Add _...
792
  void __init dmar_register_bus_notifier(void)
ec154bf56   Joerg Roedel   iommu/vt-d: Don't...
793
794
795
  {
  	bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
  }
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
796
797
798
  
  int __init dmar_table_init(void)
  {
1886e8a90   Suresh Siddha   x64, x2apic/intr-...
799
  	static int dmar_table_initialized;
093f87d27   Fenghua Yu   PCI: More Sanity ...
800
  	int ret;
cc05301fd   Jiang Liu   iommu/vt-d: fix w...
801
802
803
804
  	if (dmar_table_initialized == 0) {
  		ret = parse_dmar_table();
  		if (ret < 0) {
  			if (ret != -ENODEV)
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
805
806
  				pr_info("Parse DMAR table failure.
  ");
cc05301fd   Jiang Liu   iommu/vt-d: fix w...
807
808
809
810
811
  		} else  if (list_empty(&dmar_drhd_units)) {
  			pr_info("No DMAR devices found
  ");
  			ret = -ENODEV;
  		}
093f87d27   Fenghua Yu   PCI: More Sanity ...
812

cc05301fd   Jiang Liu   iommu/vt-d: fix w...
813
814
815
816
  		if (ret < 0)
  			dmar_table_initialized = ret;
  		else
  			dmar_table_initialized = 1;
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
817
  	}
093f87d27   Fenghua Yu   PCI: More Sanity ...
818

cc05301fd   Jiang Liu   iommu/vt-d: fix w...
819
  	return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
10e5247f4   Keshavamurthy, Anil S   Intel IOMMU: DMAR...
820
  }
3a8663ee6   Ben Hutchings   intel-iommu: Comb...
821
822
  static void warn_invalid_dmar(u64 addr, const char *message)
  {
798c1441b   Hans de Goede   iommu/vt-d: dmar:...
823
  	pr_warn_once(FW_BUG
fd0c88948   Ben Hutchings   intel-iommu: Set ...
824
825
826
827
828
829
830
831
  		"Your BIOS is broken; DMAR reported at address %llx%s!
  "
  		"BIOS vendor: %s; Ver: %s; Product Version: %s
  ",
  		addr, message,
  		dmi_get_system_info(DMI_BIOS_VENDOR),
  		dmi_get_system_info(DMI_BIOS_VERSION),
  		dmi_get_system_info(DMI_PRODUCT_VERSION));
798c1441b   Hans de Goede   iommu/vt-d: dmar:...
832
  	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
3a8663ee6   Ben Hutchings   intel-iommu: Comb...
833
  }
6ecbf01c7   David Woodhouse   intel-iommu: Appl...
834

c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
835
836
  static int __ref
  dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
86cf898e1   David Woodhouse   intel-iommu: Chec...
837
  {
86cf898e1   David Woodhouse   intel-iommu: Chec...
838
  	struct acpi_dmar_hardware_unit *drhd;
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
839
840
  	void __iomem *addr;
  	u64 cap, ecap;
86cf898e1   David Woodhouse   intel-iommu: Chec...
841

c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
842
843
844
845
846
  	drhd = (void *)entry;
  	if (!drhd->address) {
  		warn_invalid_dmar(0, "");
  		return -EINVAL;
  	}
2c9922081   Chris Wright   intel-iommu: Dete...
847

6b1972493   Jiang Liu   iommu/vt-d: Imple...
848
849
850
851
  	if (arg)
  		addr = ioremap(drhd->address, VTD_PAGE_SIZE);
  	else
  		addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
852
  	if (!addr) {
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
853
854
  		pr_warn("Can't validate DRHD address: %llx
  ", drhd->address);
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
855
856
  		return -EINVAL;
  	}
6b1972493   Jiang Liu   iommu/vt-d: Imple...
857

c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
858
859
  	cap = dmar_readq(addr + DMAR_CAP_REG);
  	ecap = dmar_readq(addr + DMAR_ECAP_REG);
6b1972493   Jiang Liu   iommu/vt-d: Imple...
860
861
862
863
864
  
  	if (arg)
  		iounmap(addr);
  	else
  		early_iounmap(addr, VTD_PAGE_SIZE);
86cf898e1   David Woodhouse   intel-iommu: Chec...
865

c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
866
867
868
  	if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
  		warn_invalid_dmar(drhd->address, " returns all ones");
  		return -EINVAL;
86cf898e1   David Woodhouse   intel-iommu: Chec...
869
  	}
2c9922081   Chris Wright   intel-iommu: Dete...
870

2c9922081   Chris Wright   intel-iommu: Dete...
871
  	return 0;
86cf898e1   David Woodhouse   intel-iommu: Chec...
872
  }
480125ba4   Konrad Rzeszutek Wilk   x86, iommu: Make ...
873
  int __init detect_intel_iommu(void)
2ae210106   Suresh Siddha   x64, x2apic/intr-...
874
875
  {
  	int ret;
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
876
877
878
879
  	struct dmar_res_callback validate_drhd_cb = {
  		.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
  		.ignore_unhandled = true,
  	};
2ae210106   Suresh Siddha   x64, x2apic/intr-...
880

3a5670e8a   Jiang Liu   iommu/vt-d: Intro...
881
  	down_write(&dmar_global_lock);
f6dd5c310   Yinghai Lu   dmar: fix using e...
882
  	ret = dmar_table_detect();
8326c5d20   Andy Shevchenko   iommu/dmar: Recti...
883
884
885
  	if (!ret)
  		ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
  					   &validate_drhd_cb);
ede796e5a   Lu Baolu   iommu/vt-d: Enabl...
886
887
  	if (!ret && !no_iommu && !iommu_detected &&
  	    (!dmar_disabled || dmar_platform_optin())) {
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
888
889
890
891
  		iommu_detected = 1;
  		/* Make sure ACS will be enabled */
  		pci_request_acs();
  	}
f5d1b97bc   Suresh Siddha   iommu: Cleanup if...
892

9d5ce73a6   FUJITA Tomonori   x86: intel-iommu:...
893
  #ifdef CONFIG_X86
8326c5d20   Andy Shevchenko   iommu/dmar: Recti...
894
  	if (!ret)
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
895
  		x86_init.iommu.iommu_init = intel_iommu_init;
9d5ce73a6   FUJITA Tomonori   x86: intel-iommu:...
896
  #endif
c2a0b538d   Jiang Liu   iommu/vt-d: Intro...
897

696c7f8e0   Rafael J. Wysocki   ACPI / DMAR: Avoi...
898
899
900
901
  	if (dmar_tbl) {
  		acpi_put_table(dmar_tbl);
  		dmar_tbl = NULL;
  	}
3a5670e8a   Jiang Liu   iommu/vt-d: Intro...
902
  	up_write(&dmar_global_lock);
480125ba4   Konrad Rzeszutek Wilk   x86, iommu: Make ...
903

8326c5d20   Andy Shevchenko   iommu/dmar: Recti...
904
  	return ret ? ret : 1;
2ae210106   Suresh Siddha   x64, x2apic/intr-...
905
  }
6f5cf5211   Donald Dutile   iommu/dmar: Reser...
906
907
908
909
910
911
912
913
914
915
  static void unmap_iommu(struct intel_iommu *iommu)
  {
  	iounmap(iommu->reg);
  	release_mem_region(iommu->reg_phys, iommu->reg_size);
  }
  
  /**
   * map_iommu: map the iommu's registers
   * @iommu: the iommu to map
   * @phys_addr: the physical address of the base resgister
e9071b0be   Donald Dutile   iommu/dmar: Use p...
916
   *
6f5cf5211   Donald Dutile   iommu/dmar: Reser...
917
   * Memory map the iommu's registers.  Start w/ a single page, and
e9071b0be   Donald Dutile   iommu/dmar: Use p...
918
   * possibly expand if that turns out to be insufficent.
6f5cf5211   Donald Dutile   iommu/dmar: Reser...
919
920
921
922
923
924
925
926
927
   */
  static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
  {
  	int map_size, err=0;
  
  	iommu->reg_phys = phys_addr;
  	iommu->reg_size = VTD_PAGE_SIZE;
  
  	if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
928
929
  		pr_err("Can't reserve memory
  ");
6f5cf5211   Donald Dutile   iommu/dmar: Reser...
930
931
932
933
934
935
  		err = -EBUSY;
  		goto out;
  	}
  
  	iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
  	if (!iommu->reg) {
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
936
937
  		pr_err("Can't map the region
  ");
6f5cf5211   Donald Dutile   iommu/dmar: Reser...
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
  		err = -ENOMEM;
  		goto release;
  	}
  
  	iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
  	iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
  
  	if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
  		err = -EINVAL;
  		warn_invalid_dmar(phys_addr, " returns all ones");
  		goto unmap;
  	}
  
  	/* the registers might be more than one page */
  	map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
  			 cap_max_fault_reg_offset(iommu->cap));
  	map_size = VTD_PAGE_ALIGN(map_size);
  	if (map_size > iommu->reg_size) {
  		iounmap(iommu->reg);
  		release_mem_region(iommu->reg_phys, iommu->reg_size);
  		iommu->reg_size = map_size;
  		if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
  					iommu->name)) {
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
961
962
  			pr_err("Can't reserve memory
  ");
6f5cf5211   Donald Dutile   iommu/dmar: Reser...
963
964
965
966
967
  			err = -EBUSY;
  			goto out;
  		}
  		iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
  		if (!iommu->reg) {
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
968
969
  			pr_err("Can't map the region
  ");
6f5cf5211   Donald Dutile   iommu/dmar: Reser...
970
971
972
973
974
975
976
977
978
979
980
981
982
983
  			err = -ENOMEM;
  			goto release;
  		}
  	}
  	err = 0;
  	goto out;
  
  unmap:
  	iounmap(iommu->reg);
  release:
  	release_mem_region(iommu->reg_phys, iommu->reg_size);
  out:
  	return err;
  }
78d8e7046   Jiang Liu   iommu/vt-d: Dynam...
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
  static int dmar_alloc_seq_id(struct intel_iommu *iommu)
  {
  	iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
  					    DMAR_UNITS_SUPPORTED);
  	if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
  		iommu->seq_id = -1;
  	} else {
  		set_bit(iommu->seq_id, dmar_seq_ids);
  		sprintf(iommu->name, "dmar%d", iommu->seq_id);
  	}
  
  	return iommu->seq_id;
  }
  
  static void dmar_free_seq_id(struct intel_iommu *iommu)
  {
  	if (iommu->seq_id >= 0) {
  		clear_bit(iommu->seq_id, dmar_seq_ids);
  		iommu->seq_id = -1;
  	}
  }
694835dc2   Jiang Liu   iommu/vt-d: mark ...
1005
  static int alloc_iommu(struct dmar_drhd_unit *drhd)
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
1006
  {
c42d9f324   Suresh Siddha   x64, x2apic/intr-...
1007
  	struct intel_iommu *iommu;
3a93c841c   Takao Indoh   iommu/vt-d: Disab...
1008
  	u32 ver, sts;
43f7392ba   Joerg Roedel   intel-iommu: fix ...
1009
  	int agaw = 0;
4ed0d3e6c   Fenghua Yu   Intel IOMMU Pass ...
1010
  	int msagaw = 0;
6f5cf5211   Donald Dutile   iommu/dmar: Reser...
1011
  	int err;
c42d9f324   Suresh Siddha   x64, x2apic/intr-...
1012

6ecbf01c7   David Woodhouse   intel-iommu: Appl...
1013
  	if (!drhd->reg_base_addr) {
3a8663ee6   Ben Hutchings   intel-iommu: Comb...
1014
  		warn_invalid_dmar(0, "");
6ecbf01c7   David Woodhouse   intel-iommu: Appl...
1015
1016
  		return -EINVAL;
  	}
c42d9f324   Suresh Siddha   x64, x2apic/intr-...
1017
1018
  	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
  	if (!iommu)
1886e8a90   Suresh Siddha   x64, x2apic/intr-...
1019
  		return -ENOMEM;
c42d9f324   Suresh Siddha   x64, x2apic/intr-...
1020

78d8e7046   Jiang Liu   iommu/vt-d: Dynam...
1021
  	if (dmar_alloc_seq_id(iommu) < 0) {
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
1022
1023
  		pr_err("Failed to allocate seq_id
  ");
78d8e7046   Jiang Liu   iommu/vt-d: Dynam...
1024
1025
1026
  		err = -ENOSPC;
  		goto error;
  	}
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
1027

6f5cf5211   Donald Dutile   iommu/dmar: Reser...
1028
1029
  	err = map_iommu(iommu, drhd->reg_base_addr);
  	if (err) {
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
1030
1031
  		pr_err("Failed to map %s
  ", iommu->name);
78d8e7046   Jiang Liu   iommu/vt-d: Dynam...
1032
  		goto error_free_seq_id;
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
1033
  	}
0815565ad   David Woodhouse   intel-iommu: Cope...
1034

6f5cf5211   Donald Dutile   iommu/dmar: Reser...
1035
  	err = -EINVAL;
1b5736839   Weidong Han   calculate agaw fo...
1036
1037
  	agaw = iommu_calculate_agaw(iommu);
  	if (agaw < 0) {
bf947fcb7   Donald Dutile   iommu/dmar: Repla...
1038
1039
1040
  		pr_err("Cannot get a valid agaw for iommu (seq_id = %d)
  ",
  			iommu->seq_id);
0815565ad   David Woodhouse   intel-iommu: Cope...
1041
  		goto err_unmap;
4ed0d3e6c   Fenghua Yu   Intel IOMMU Pass ...
1042
1043
1044
  	}
  	msagaw = iommu_calculate_max_sagaw(iommu);
  	if (msagaw < 0) {
bf947fcb7   Donald Dutile   iommu/dmar: Repla...
1045
1046
  		pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)
  ",
1b5736839   Weidong Han   calculate agaw fo...
1047
  			iommu->seq_id);
0815565ad   David Woodhouse   intel-iommu: Cope...
1048
  		goto err_unmap;
1b5736839   Weidong Han   calculate agaw fo...
1049
1050
  	}
  	iommu->agaw = agaw;
4ed0d3e6c   Fenghua Yu   Intel IOMMU Pass ...
1051
  	iommu->msagaw = msagaw;
67ccac41f   David Woodhouse   iommu/vt-d: Store...
1052
  	iommu->segment = drhd->segment;
1b5736839   Weidong Han   calculate agaw fo...
1053

98fa15f34   Anshuman Khandual   mm: replace all o...
1054
  	iommu->node = NUMA_NO_NODE;
ee34b32d8   Suresh Siddha   dmar: support for...
1055

e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
1056
  	ver = readl(iommu->reg + DMAR_VER_REG);
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
1057
1058
1059
  	pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx
  ",
  		iommu->name,
5b6985ce8   Fenghua Yu   intel-iommu: IA64...
1060
1061
1062
1063
  		(unsigned long long)drhd->reg_base_addr,
  		DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
  		(unsigned long long)iommu->cap,
  		(unsigned long long)iommu->ecap);
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
1064

3a93c841c   Takao Indoh   iommu/vt-d: Disab...
1065
1066
1067
1068
1069
1070
1071
1072
  	/* Reflect status in gcmd */
  	sts = readl(iommu->reg + DMAR_GSTS_REG);
  	if (sts & DMA_GSTS_IRES)
  		iommu->gcmd |= DMA_GCMD_IRE;
  	if (sts & DMA_GSTS_TES)
  		iommu->gcmd |= DMA_GCMD_TE;
  	if (sts & DMA_GSTS_QIES)
  		iommu->gcmd |= DMA_GCMD_QIE;
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1073
  	raw_spin_lock_init(&iommu->register_lock);
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
1074

bc8474549   Joerg Roedel   iommu/vt-d: Fix u...
1075
  	if (intel_iommu_enabled) {
39ab9555c   Joerg Roedel   iommu: Add sysfs ...
1076
1077
1078
1079
  		err = iommu_device_sysfs_add(&iommu->iommu, NULL,
  					     intel_iommu_groups,
  					     "%s", iommu->name);
  		if (err)
bc8474549   Joerg Roedel   iommu/vt-d: Fix u...
1080
  			goto err_unmap;
a5459cfec   Alex Williamson   iommu/vt-d: Make ...
1081

b0119e870   Joerg Roedel   iommu: Introduce ...
1082
1083
1084
1085
  		iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
  
  		err = iommu_device_register(&iommu->iommu);
  		if (err)
bc8474549   Joerg Roedel   iommu/vt-d: Fix u...
1086
  			goto err_unmap;
592033790   Nicholas Krause   iommu/vt-d: Check...
1087
  	}
bc8474549   Joerg Roedel   iommu/vt-d: Fix u...
1088
  	drhd->iommu = iommu;
1886e8a90   Suresh Siddha   x64, x2apic/intr-...
1089
  	return 0;
0815565ad   David Woodhouse   intel-iommu: Cope...
1090

78d8e7046   Jiang Liu   iommu/vt-d: Dynam...
1091
  err_unmap:
6f5cf5211   Donald Dutile   iommu/dmar: Reser...
1092
  	unmap_iommu(iommu);
78d8e7046   Jiang Liu   iommu/vt-d: Dynam...
1093
1094
1095
  error_free_seq_id:
  	dmar_free_seq_id(iommu);
  error:
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
1096
  	kfree(iommu);
6f5cf5211   Donald Dutile   iommu/dmar: Reser...
1097
  	return err;
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
1098
  }
a868e6b7b   Jiang Liu   iommu/vt-d: keep ...
1099
  static void free_iommu(struct intel_iommu *iommu)
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
1100
  {
c37a01779   Andy Shevchenko   iommu/vt-d: Fix c...
1101
1102
1103
1104
  	if (intel_iommu_enabled) {
  		iommu_device_unregister(&iommu->iommu);
  		iommu_device_sysfs_remove(&iommu->iommu);
  	}
a5459cfec   Alex Williamson   iommu/vt-d: Make ...
1105

a868e6b7b   Jiang Liu   iommu/vt-d: keep ...
1106
  	if (iommu->irq) {
1208225cf   David Woodhouse   iommu/vt-d: Gener...
1107
1108
1109
1110
1111
  		if (iommu->pr_irq) {
  			free_irq(iommu->pr_irq, iommu);
  			dmar_free_hwirq(iommu->pr_irq);
  			iommu->pr_irq = 0;
  		}
a868e6b7b   Jiang Liu   iommu/vt-d: keep ...
1112
  		free_irq(iommu->irq, iommu);
a553b142b   Thomas Gleixner   iommu: dmar: Prov...
1113
  		dmar_free_hwirq(iommu->irq);
34742db8e   Jiang Liu   iommu/vt-d: Refin...
1114
  		iommu->irq = 0;
a868e6b7b   Jiang Liu   iommu/vt-d: keep ...
1115
  	}
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
1116

a84da70b7   Jiang Liu   iommu/vt-d: relea...
1117
1118
1119
1120
1121
  	if (iommu->qi) {
  		free_page((unsigned long)iommu->qi->desc);
  		kfree(iommu->qi->desc_status);
  		kfree(iommu->qi);
  	}
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
1122
  	if (iommu->reg)
6f5cf5211   Donald Dutile   iommu/dmar: Reser...
1123
  		unmap_iommu(iommu);
78d8e7046   Jiang Liu   iommu/vt-d: Dynam...
1124
  	dmar_free_seq_id(iommu);
e61d98d8d   Suresh Siddha   x64, x2apic/intr-...
1125
1126
  	kfree(iommu);
  }
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1127
1128
1129
1130
1131
1132
  
  /*
   * Reclaim all the submitted descriptors which have completed its work.
   */
  static inline void reclaim_free_desc(struct q_inval *qi)
  {
6ba6c3a4c   Yu Zhao   VT-d: add device ...
1133
1134
  	while (qi->desc_status[qi->free_tail] == QI_DONE ||
  	       qi->desc_status[qi->free_tail] == QI_ABORT) {
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1135
1136
1137
1138
1139
  		qi->desc_status[qi->free_tail] = QI_FREE;
  		qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
  		qi->free_cnt++;
  	}
  }
704126ad8   Yu Zhao   VT-d: handle Inva...
1140
1141
1142
  static int qi_check_fault(struct intel_iommu *iommu, int index)
  {
  	u32 fault;
6ba6c3a4c   Yu Zhao   VT-d: add device ...
1143
  	int head, tail;
704126ad8   Yu Zhao   VT-d: handle Inva...
1144
1145
  	struct q_inval *qi = iommu->qi;
  	int wait_index = (index + 1) % QI_LENGTH;
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1146
  	int shift = qi_shift(iommu);
704126ad8   Yu Zhao   VT-d: handle Inva...
1147

6ba6c3a4c   Yu Zhao   VT-d: add device ...
1148
1149
  	if (qi->desc_status[wait_index] == QI_ABORT)
  		return -EAGAIN;
704126ad8   Yu Zhao   VT-d: handle Inva...
1150
1151
1152
1153
1154
1155
1156
1157
1158
  	fault = readl(iommu->reg + DMAR_FSTS_REG);
  
  	/*
  	 * If IQE happens, the head points to the descriptor associated
  	 * with the error. No new descriptors are fetched until the IQE
  	 * is cleared.
  	 */
  	if (fault & DMA_FSTS_IQE) {
  		head = readl(iommu->reg + DMAR_IQH_REG);
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
  		if ((head >> shift) == index) {
  			struct qi_desc *desc = qi->desc + head;
  
  			/*
  			 * desc->qw2 and desc->qw3 are either reserved or
  			 * used by software as private data. We won't print
  			 * out these two qw's for security consideration.
  			 */
  			pr_err("VT-d detected invalid descriptor: qw0 = %llx, qw1 = %llx
  ",
  			       (unsigned long long)desc->qw0,
  			       (unsigned long long)desc->qw1);
  			memcpy(desc, qi->desc + (wait_index << shift),
  			       1 << shift);
704126ad8   Yu Zhao   VT-d: handle Inva...
1173
1174
1175
1176
  			writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
  			return -EINVAL;
  		}
  	}
6ba6c3a4c   Yu Zhao   VT-d: add device ...
1177
1178
1179
1180
1181
1182
  	/*
  	 * If ITE happens, all pending wait_desc commands are aborted.
  	 * No new descriptors are fetched until the ITE is cleared.
  	 */
  	if (fault & DMA_FSTS_ITE) {
  		head = readl(iommu->reg + DMAR_IQH_REG);
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1183
  		head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
6ba6c3a4c   Yu Zhao   VT-d: add device ...
1184
1185
  		head |= 1;
  		tail = readl(iommu->reg + DMAR_IQT_REG);
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1186
  		tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
6ba6c3a4c   Yu Zhao   VT-d: add device ...
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
  
  		writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
  
  		do {
  			if (qi->desc_status[head] == QI_IN_USE)
  				qi->desc_status[head] = QI_ABORT;
  			head = (head - 2 + QI_LENGTH) % QI_LENGTH;
  		} while (head != tail);
  
  		if (qi->desc_status[wait_index] == QI_ABORT)
  			return -EAGAIN;
  	}
  
  	if (fault & DMA_FSTS_ICE)
  		writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
704126ad8   Yu Zhao   VT-d: handle Inva...
1202
1203
  	return 0;
  }
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1204
1205
1206
1207
  /*
   * Submit the queued invalidation descriptor to the remapping
   * hardware unit and wait for its completion.
   */
704126ad8   Yu Zhao   VT-d: handle Inva...
1208
  int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1209
  {
6ba6c3a4c   Yu Zhao   VT-d: add device ...
1210
  	int rc;
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1211
  	struct q_inval *qi = iommu->qi;
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1212
1213
  	int offset, shift, length;
  	struct qi_desc wait_desc;
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1214
1215
1216
1217
  	int wait_index, index;
  	unsigned long flags;
  
  	if (!qi)
704126ad8   Yu Zhao   VT-d: handle Inva...
1218
  		return 0;
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1219

6ba6c3a4c   Yu Zhao   VT-d: add device ...
1220
1221
  restart:
  	rc = 0;
3b8f40481   Thomas Gleixner   locking, x86, iom...
1222
  	raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1223
  	while (qi->free_cnt < 3) {
3b8f40481   Thomas Gleixner   locking, x86, iom...
1224
  		raw_spin_unlock_irqrestore(&qi->q_lock, flags);
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1225
  		cpu_relax();
3b8f40481   Thomas Gleixner   locking, x86, iom...
1226
  		raw_spin_lock_irqsave(&qi->q_lock, flags);
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1227
1228
1229
1230
  	}
  
  	index = qi->free_head;
  	wait_index = (index + 1) % QI_LENGTH;
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1231
1232
  	shift = qi_shift(iommu);
  	length = 1 << shift;
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1233
1234
  
  	qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1235
1236
1237
  	offset = index << shift;
  	memcpy(qi->desc + offset, desc, length);
  	wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
704126ad8   Yu Zhao   VT-d: handle Inva...
1238
  			QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1239
1240
1241
  	wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
  	wait_desc.qw2 = 0;
  	wait_desc.qw3 = 0;
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1242

5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1243
1244
  	offset = wait_index << shift;
  	memcpy(qi->desc + offset, &wait_desc, length);
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1245

fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1246
1247
  	qi->free_head = (qi->free_head + 2) % QI_LENGTH;
  	qi->free_cnt -= 2;
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1248
1249
1250
1251
  	/*
  	 * update the HW tail register indicating the presence of
  	 * new descriptors.
  	 */
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1252
  	writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1253
1254
  
  	while (qi->desc_status[wait_index] != QI_DONE) {
f05810c99   Suresh Siddha   dmar: use spin_lo...
1255
1256
1257
1258
1259
1260
1261
  		/*
  		 * We will leave the interrupts disabled, to prevent interrupt
  		 * context to queue another cmd while a cmd is already submitted
  		 * and waiting for completion on this cpu. This is to avoid
  		 * a deadlock where the interrupt context can wait indefinitely
  		 * for free slots in the queue.
  		 */
704126ad8   Yu Zhao   VT-d: handle Inva...
1262
1263
  		rc = qi_check_fault(iommu, index);
  		if (rc)
6ba6c3a4c   Yu Zhao   VT-d: add device ...
1264
  			break;
704126ad8   Yu Zhao   VT-d: handle Inva...
1265

3b8f40481   Thomas Gleixner   locking, x86, iom...
1266
  		raw_spin_unlock(&qi->q_lock);
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1267
  		cpu_relax();
3b8f40481   Thomas Gleixner   locking, x86, iom...
1268
  		raw_spin_lock(&qi->q_lock);
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1269
  	}
6ba6c3a4c   Yu Zhao   VT-d: add device ...
1270
1271
  
  	qi->desc_status[index] = QI_DONE;
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1272
1273
  
  	reclaim_free_desc(qi);
3b8f40481   Thomas Gleixner   locking, x86, iom...
1274
  	raw_spin_unlock_irqrestore(&qi->q_lock, flags);
704126ad8   Yu Zhao   VT-d: handle Inva...
1275

6ba6c3a4c   Yu Zhao   VT-d: add device ...
1276
1277
  	if (rc == -EAGAIN)
  		goto restart;
704126ad8   Yu Zhao   VT-d: handle Inva...
1278
  	return rc;
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1279
1280
1281
1282
1283
1284
1285
1286
  }
  
  /*
   * Flush the global interrupt entry cache.
   */
  void qi_global_iec(struct intel_iommu *iommu)
  {
  	struct qi_desc desc;
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1287
1288
1289
1290
  	desc.qw0 = QI_IEC_TYPE;
  	desc.qw1 = 0;
  	desc.qw2 = 0;
  	desc.qw3 = 0;
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1291

704126ad8   Yu Zhao   VT-d: handle Inva...
1292
  	/* should never fail */
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1293
1294
  	qi_submit_sync(&desc, iommu);
  }
4c25a2c1b   David Woodhouse   intel-iommu: Clea...
1295
1296
  void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
  		      u64 type)
3481f2109   Youquan Song   dmar: context cac...
1297
  {
3481f2109   Youquan Song   dmar: context cac...
1298
  	struct qi_desc desc;
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1299
  	desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
3481f2109   Youquan Song   dmar: context cac...
1300
  			| QI_CC_GRAN(type) | QI_CC_TYPE;
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1301
1302
1303
  	desc.qw1 = 0;
  	desc.qw2 = 0;
  	desc.qw3 = 0;
3481f2109   Youquan Song   dmar: context cac...
1304

4c25a2c1b   David Woodhouse   intel-iommu: Clea...
1305
  	qi_submit_sync(&desc, iommu);
3481f2109   Youquan Song   dmar: context cac...
1306
  }
1f0ef2aa1   David Woodhouse   intel-iommu: Clea...
1307
1308
  void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
  		    unsigned int size_order, u64 type)
3481f2109   Youquan Song   dmar: context cac...
1309
1310
1311
1312
1313
  {
  	u8 dw = 0, dr = 0;
  
  	struct qi_desc desc;
  	int ih = 0;
3481f2109   Youquan Song   dmar: context cac...
1314
1315
1316
1317
1318
  	if (cap_write_drain(iommu->cap))
  		dw = 1;
  
  	if (cap_read_drain(iommu->cap))
  		dr = 1;
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1319
  	desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
3481f2109   Youquan Song   dmar: context cac...
1320
  		| QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1321
  	desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
3481f2109   Youquan Song   dmar: context cac...
1322
  		| QI_IOTLB_AM(size_order);
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1323
1324
  	desc.qw2 = 0;
  	desc.qw3 = 0;
3481f2109   Youquan Song   dmar: context cac...
1325

1f0ef2aa1   David Woodhouse   intel-iommu: Clea...
1326
  	qi_submit_sync(&desc, iommu);
3481f2109   Youquan Song   dmar: context cac...
1327
  }
1c48db449   Jacob Pan   iommu/vt-d: Fix d...
1328
1329
  void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
  			u16 qdep, u64 addr, unsigned mask)
6ba6c3a4c   Yu Zhao   VT-d: add device ...
1330
1331
1332
1333
  {
  	struct qi_desc desc;
  
  	if (mask) {
c8acb28b3   Joerg Roedel   iommu/vt-d: Allow...
1334
  		addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1335
  		desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
6ba6c3a4c   Yu Zhao   VT-d: add device ...
1336
  	} else
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1337
  		desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
6ba6c3a4c   Yu Zhao   VT-d: add device ...
1338
1339
1340
  
  	if (qdep >= QI_DEV_IOTLB_MAX_INVS)
  		qdep = 0;
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1341
  	desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1c48db449   Jacob Pan   iommu/vt-d: Fix d...
1342
  		   QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1343
1344
  	desc.qw2 = 0;
  	desc.qw3 = 0;
6ba6c3a4c   Yu Zhao   VT-d: add device ...
1345
1346
1347
  
  	qi_submit_sync(&desc, iommu);
  }
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1348
  /*
eba67e5da   Suresh Siddha   x86, dmar: routin...
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
   * Disable Queued Invalidation interface.
   */
  void dmar_disable_qi(struct intel_iommu *iommu)
  {
  	unsigned long flags;
  	u32 sts;
  	cycles_t start_time = get_cycles();
  
  	if (!ecap_qis(iommu->ecap))
  		return;
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1359
  	raw_spin_lock_irqsave(&iommu->register_lock, flags);
eba67e5da   Suresh Siddha   x86, dmar: routin...
1360

fda3bec12   CQ Tang   iommu/vt-d: Fix 6...
1361
  	sts =  readl(iommu->reg + DMAR_GSTS_REG);
eba67e5da   Suresh Siddha   x86, dmar: routin...
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
  	if (!(sts & DMA_GSTS_QIES))
  		goto end;
  
  	/*
  	 * Give a chance to HW to complete the pending invalidation requests.
  	 */
  	while ((readl(iommu->reg + DMAR_IQT_REG) !=
  		readl(iommu->reg + DMAR_IQH_REG)) &&
  		(DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
  		cpu_relax();
  
  	iommu->gcmd &= ~DMA_GCMD_QIE;
eba67e5da   Suresh Siddha   x86, dmar: routin...
1374
1375
1376
1377
1378
  	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
  
  	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
  		      !(sts & DMA_GSTS_QIES), sts);
  end:
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1379
  	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eba67e5da   Suresh Siddha   x86, dmar: routin...
1380
1381
1382
  }
  
  /*
eb4a52bc6   Fenghua Yu   Intel IOMMU Suspe...
1383
1384
1385
1386
   * Enable queued invalidation.
   */
  static void __dmar_enable_qi(struct intel_iommu *iommu)
  {
c416daa98   David Woodhouse   intel-iommu: Tidy...
1387
  	u32 sts;
eb4a52bc6   Fenghua Yu   Intel IOMMU Suspe...
1388
1389
  	unsigned long flags;
  	struct q_inval *qi = iommu->qi;
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1390
  	u64 val = virt_to_phys(qi->desc);
eb4a52bc6   Fenghua Yu   Intel IOMMU Suspe...
1391
1392
1393
  
  	qi->free_head = qi->free_tail = 0;
  	qi->free_cnt = QI_LENGTH;
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1394
1395
1396
1397
1398
1399
  	/*
  	 * Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability
  	 * is present.
  	 */
  	if (ecap_smts(iommu->ecap))
  		val |= (1 << 11) | 1;
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1400
  	raw_spin_lock_irqsave(&iommu->register_lock, flags);
eb4a52bc6   Fenghua Yu   Intel IOMMU Suspe...
1401
1402
1403
  
  	/* write zero to the tail reg */
  	writel(0, iommu->reg + DMAR_IQT_REG);
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1404
  	dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
eb4a52bc6   Fenghua Yu   Intel IOMMU Suspe...
1405

eb4a52bc6   Fenghua Yu   Intel IOMMU Suspe...
1406
  	iommu->gcmd |= DMA_GCMD_QIE;
c416daa98   David Woodhouse   intel-iommu: Tidy...
1407
  	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
eb4a52bc6   Fenghua Yu   Intel IOMMU Suspe...
1408
1409
1410
  
  	/* Make sure hardware complete it */
  	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1411
  	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
eb4a52bc6   Fenghua Yu   Intel IOMMU Suspe...
1412
1413
1414
  }
  
  /*
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1415
1416
1417
1418
1419
1420
   * Enable Queued Invalidation interface. This is a must to support
   * interrupt-remapping. Also used by DMA-remapping, which replaces
   * register based IOTLB invalidation.
   */
  int dmar_enable_qi(struct intel_iommu *iommu)
  {
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1421
  	struct q_inval *qi;
751cafe3a   Suresh Siddha   dmar: Allocate qu...
1422
  	struct page *desc_page;
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1423
1424
1425
1426
1427
1428
1429
1430
1431
  
  	if (!ecap_qis(iommu->ecap))
  		return -ENOENT;
  
  	/*
  	 * queued invalidation is already setup and enabled.
  	 */
  	if (iommu->qi)
  		return 0;
fa4b57cc0   Suresh Siddha   x86, dmar: use at...
1432
  	iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1433
1434
1435
1436
  	if (!iommu->qi)
  		return -ENOMEM;
  
  	qi = iommu->qi;
5d308fc1e   Lu Baolu   iommu/vt-d: Add 2...
1437
1438
1439
1440
1441
1442
  	/*
  	 * Need two pages to accommodate 256 descriptors of 256 bits each
  	 * if the remapping hardware supports scalable mode translation.
  	 */
  	desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
  				     !!ecap_smts(iommu->ecap));
751cafe3a   Suresh Siddha   dmar: Allocate qu...
1443
  	if (!desc_page) {
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1444
  		kfree(qi);
b707cb027   Jiang Liu   iommu/vt-d, trivi...
1445
  		iommu->qi = NULL;
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1446
1447
  		return -ENOMEM;
  	}
751cafe3a   Suresh Siddha   dmar: Allocate qu...
1448
  	qi->desc = page_address(desc_page);
6396bb221   Kees Cook   treewide: kzalloc...
1449
  	qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1450
1451
1452
  	if (!qi->desc_status) {
  		free_page((unsigned long) qi->desc);
  		kfree(qi);
b707cb027   Jiang Liu   iommu/vt-d, trivi...
1453
  		iommu->qi = NULL;
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1454
1455
  		return -ENOMEM;
  	}
3b8f40481   Thomas Gleixner   locking, x86, iom...
1456
  	raw_spin_lock_init(&qi->q_lock);
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1457

eb4a52bc6   Fenghua Yu   Intel IOMMU Suspe...
1458
  	__dmar_enable_qi(iommu);
fe962e90c   Suresh Siddha   x64, x2apic/intr-...
1459
1460
1461
  
  	return 0;
  }
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1462
1463
  
  /* iommu interrupt handling. Most stuff are MSI-like. */
9d783ba04   Suresh Siddha   x86, x2apic: enab...
1464
1465
1466
1467
1468
1469
1470
  enum faulttype {
  	DMA_REMAP,
  	INTR_REMAP,
  	UNKNOWN,
  };
  
  static const char *dma_remap_fault_reasons[] =
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
  {
  	"Software",
  	"Present bit in root entry is clear",
  	"Present bit in context entry is clear",
  	"Invalid context entry",
  	"Access beyond MGAW",
  	"PTE Write access is not set",
  	"PTE Read access is not set",
  	"Next page table ptr is invalid",
  	"Root table address invalid",
  	"Context table ptr is invalid",
  	"non-zero reserved fields in RTP",
  	"non-zero reserved fields in CTP",
  	"non-zero reserved fields in PTE",
4ecccd9ed   Li, Zhen-Hua   iommu, x86: Add D...
1485
  	"PCE for translation request specifies blocking",
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1486
  };
9d783ba04   Suresh Siddha   x86, x2apic: enab...
1487

fd730007a   Kyung Min Park   iommu/vt-d: Add S...
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
  static const char * const dma_remap_sm_fault_reasons[] = {
  	"SM: Invalid Root Table Address",
  	"SM: TTM 0 for request with PASID",
  	"SM: TTM 0 for page group request",
  	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */
  	"SM: Error attempting to access Root Entry",
  	"SM: Present bit in Root Entry is clear",
  	"SM: Non-zero reserved field set in Root Entry",
  	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */
  	"SM: Error attempting to access Context Entry",
  	"SM: Present bit in Context Entry is clear",
  	"SM: Non-zero reserved field set in the Context Entry",
  	"SM: Invalid Context Entry",
  	"SM: DTE field in Context Entry is clear",
  	"SM: PASID Enable field in Context Entry is clear",
  	"SM: PASID is larger than the max in Context Entry",
  	"SM: PRE field in Context-Entry is clear",
  	"SM: RID_PASID field error in Context-Entry",
  	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */
  	"SM: Error attempting to access the PASID Directory Entry",
  	"SM: Present bit in Directory Entry is clear",
  	"SM: Non-zero reserved field set in PASID Directory Entry",
  	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */
  	"SM: Error attempting to access PASID Table Entry",
  	"SM: Present bit in PASID Table Entry is clear",
  	"SM: Non-zero reserved field set in PASID Table Entry",
  	"SM: Invalid Scalable-Mode PASID Table Entry",
  	"SM: ERE field is clear in PASID Table Entry",
  	"SM: SRE field is clear in PASID Table Entry",
  	"Unknown", "Unknown",/* 0x5E-0x5F */
  	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */
  	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */
  	"SM: Error attempting to access first-level paging entry",
  	"SM: Present bit in first-level paging entry is clear",
  	"SM: Non-zero reserved field set in first-level paging entry",
  	"SM: Error attempting to access FL-PML4 entry",
  	"SM: First-level entry address beyond MGAW in Nested translation",
  	"SM: Read permission error in FL-PML4 entry in Nested translation",
  	"SM: Read permission error in first-level paging entry in Nested translation",
  	"SM: Write permission error in first-level paging entry in Nested translation",
  	"SM: Error attempting to access second-level paging entry",
  	"SM: Read/Write permission error in second-level paging entry",
  	"SM: Non-zero reserved field set in second-level paging entry",
  	"SM: Invalid second-level page table pointer",
  	"SM: A/D bit update needed in second-level entry when set up in no snoop",
  	"Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */
  	"SM: Address in first-level translation is not canonical",
  	"SM: U/S set 0 for first-level translation with user privilege",
  	"SM: No execute permission for request with PASID and ER=1",
  	"SM: Address beyond the DMA hardware max",
  	"SM: Second-level entry address beyond the max",
  	"SM: No write permission for Write/AtomicOp request",
  	"SM: No read permission for Read/AtomicOp request",
  	"SM: Invalid address-interrupt address",
  	"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */
  	"SM: A/D bit update needed in first-level entry when set up in no snoop",
  };
95a02e976   Suresh Siddha   iommu: rename int...
1545
  static const char *irq_remap_fault_reasons[] =
9d783ba04   Suresh Siddha   x86, x2apic: enab...
1546
1547
1548
1549
1550
1551
1552
1553
1554
  {
  	"Detected reserved fields in the decoded interrupt-remapped request",
  	"Interrupt index exceeded the interrupt-remapping table size",
  	"Present field in the IRTE entry is clear",
  	"Error accessing interrupt-remapping table pointed by IRTA_REG",
  	"Detected reserved fields in the IRTE entry",
  	"Blocked a compatibility format interrupt request",
  	"Blocked an interrupt request due to source-id verification failure",
  };
21004dcd3   Rashika Kheria   iommu/vt-d: Mark ...
1555
  static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1556
  {
fefe1ed13   Dan Carpenter   iommu: Fix off by...
1557
1558
  	if (fault_reason >= 0x20 && (fault_reason - 0x20 <
  					ARRAY_SIZE(irq_remap_fault_reasons))) {
9d783ba04   Suresh Siddha   x86, x2apic: enab...
1559
  		*fault_type = INTR_REMAP;
95a02e976   Suresh Siddha   iommu: rename int...
1560
  		return irq_remap_fault_reasons[fault_reason - 0x20];
fd730007a   Kyung Min Park   iommu/vt-d: Add S...
1561
1562
1563
1564
  	} else if (fault_reason >= 0x30 && (fault_reason - 0x30 <
  			ARRAY_SIZE(dma_remap_sm_fault_reasons))) {
  		*fault_type = DMA_REMAP;
  		return dma_remap_sm_fault_reasons[fault_reason - 0x30];
9d783ba04   Suresh Siddha   x86, x2apic: enab...
1565
1566
1567
1568
1569
  	} else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
  		*fault_type = DMA_REMAP;
  		return dma_remap_fault_reasons[fault_reason];
  	} else {
  		*fault_type = UNKNOWN;
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1570
  		return "Unknown";
9d783ba04   Suresh Siddha   x86, x2apic: enab...
1571
  	}
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1572
  }
1208225cf   David Woodhouse   iommu/vt-d: Gener...
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
  
  static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
  {
  	if (iommu->irq == irq)
  		return DMAR_FECTL_REG;
  	else if (iommu->pr_irq == irq)
  		return DMAR_PECTL_REG;
  	else
  		BUG();
  }
5c2837fba   Thomas Gleixner   dmar: Convert to ...
1583
  void dmar_msi_unmask(struct irq_data *data)
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1584
  {
dced35aeb   Thomas Gleixner   drivers: Final ir...
1585
  	struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1208225cf   David Woodhouse   iommu/vt-d: Gener...
1586
  	int reg = dmar_msi_reg(iommu, data->irq);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1587
1588
1589
  	unsigned long flag;
  
  	/* unmask it */
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1590
  	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1208225cf   David Woodhouse   iommu/vt-d: Gener...
1591
  	writel(0, iommu->reg + reg);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1592
  	/* Read a reg to force flush the post write */
1208225cf   David Woodhouse   iommu/vt-d: Gener...
1593
  	readl(iommu->reg + reg);
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1594
  	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1595
  }
5c2837fba   Thomas Gleixner   dmar: Convert to ...
1596
  void dmar_msi_mask(struct irq_data *data)
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1597
  {
dced35aeb   Thomas Gleixner   drivers: Final ir...
1598
  	struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1208225cf   David Woodhouse   iommu/vt-d: Gener...
1599
1600
  	int reg = dmar_msi_reg(iommu, data->irq);
  	unsigned long flag;
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1601
1602
  
  	/* mask it */
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1603
  	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1208225cf   David Woodhouse   iommu/vt-d: Gener...
1604
  	writel(DMA_FECTL_IM, iommu->reg + reg);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1605
  	/* Read a reg to force flush the post write */
1208225cf   David Woodhouse   iommu/vt-d: Gener...
1606
  	readl(iommu->reg + reg);
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1607
  	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1608
1609
1610
1611
  }
  
  void dmar_msi_write(int irq, struct msi_msg *msg)
  {
dced35aeb   Thomas Gleixner   drivers: Final ir...
1612
  	struct intel_iommu *iommu = irq_get_handler_data(irq);
1208225cf   David Woodhouse   iommu/vt-d: Gener...
1613
  	int reg = dmar_msi_reg(iommu, irq);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1614
  	unsigned long flag;
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1615
  	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1208225cf   David Woodhouse   iommu/vt-d: Gener...
1616
1617
1618
  	writel(msg->data, iommu->reg + reg + 4);
  	writel(msg->address_lo, iommu->reg + reg + 8);
  	writel(msg->address_hi, iommu->reg + reg + 12);
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1619
  	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1620
1621
1622
1623
  }
  
  void dmar_msi_read(int irq, struct msi_msg *msg)
  {
dced35aeb   Thomas Gleixner   drivers: Final ir...
1624
  	struct intel_iommu *iommu = irq_get_handler_data(irq);
1208225cf   David Woodhouse   iommu/vt-d: Gener...
1625
  	int reg = dmar_msi_reg(iommu, irq);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1626
  	unsigned long flag;
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1627
  	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1208225cf   David Woodhouse   iommu/vt-d: Gener...
1628
1629
1630
  	msg->data = readl(iommu->reg + reg + 4);
  	msg->address_lo = readl(iommu->reg + reg + 8);
  	msg->address_hi = readl(iommu->reg + reg + 12);
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1631
  	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1632
1633
1634
  }
  
  static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
fd730007a   Kyung Min Park   iommu/vt-d: Add S...
1635
1636
  		u8 fault_reason, int pasid, u16 source_id,
  		unsigned long long addr)
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1637
1638
  {
  	const char *reason;
9d783ba04   Suresh Siddha   x86, x2apic: enab...
1639
  	int fault_type;
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1640

9d783ba04   Suresh Siddha   x86, x2apic: enab...
1641
  	reason = dmar_get_fault_reason(fault_reason, &fault_type);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1642

9d783ba04   Suresh Siddha   x86, x2apic: enab...
1643
  	if (fault_type == INTR_REMAP)
a0fe14d7d   Alex Williamson   iommu/vt-d: Impro...
1644
1645
1646
  		pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s
  ",
  			source_id >> 8, PCI_SLOT(source_id & 0xFF),
9d783ba04   Suresh Siddha   x86, x2apic: enab...
1647
1648
1649
  			PCI_FUNC(source_id & 0xFF), addr >> 48,
  			fault_reason, reason);
  	else
fd730007a   Kyung Min Park   iommu/vt-d: Add S...
1650
1651
  		pr_err("[%s] Request device [%02x:%02x.%d] PASID %x fault addr %llx [fault reason %02d] %s
  ",
a0fe14d7d   Alex Williamson   iommu/vt-d: Impro...
1652
1653
  		       type ? "DMA Read" : "DMA Write",
  		       source_id >> 8, PCI_SLOT(source_id & 0xFF),
fd730007a   Kyung Min Park   iommu/vt-d: Add S...
1654
1655
  		       PCI_FUNC(source_id & 0xFF), pasid, addr,
  		       fault_reason, reason);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1656
1657
1658
1659
  	return 0;
  }
  
  #define PRIMARY_FAULT_REG_LEN (16)
1531a6a6b   Suresh Siddha   x86, dmar: start ...
1660
  irqreturn_t dmar_fault(int irq, void *dev_id)
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1661
1662
1663
1664
1665
  {
  	struct intel_iommu *iommu = dev_id;
  	int reg, fault_index;
  	u32 fault_status;
  	unsigned long flag;
c43fce4ee   Alex Williamson   iommu/vt-d: Ratel...
1666
1667
1668
  	static DEFINE_RATELIMIT_STATE(rs,
  				      DEFAULT_RATELIMIT_INTERVAL,
  				      DEFAULT_RATELIMIT_BURST);
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1669
  	raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1670
  	fault_status = readl(iommu->reg + DMAR_FSTS_REG);
6c50d79f6   Dmitry Safonov   iommu/vt-d: Ratel...
1671
  	if (fault_status && __ratelimit(&rs))
bf947fcb7   Donald Dutile   iommu/dmar: Repla...
1672
1673
  		pr_err("DRHD: handling fault status reg %x
  ", fault_status);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1674
1675
1676
  
  	/* TBD: ignore advanced fault log currently */
  	if (!(fault_status & DMA_FSTS_PPF))
bd5cdad0c   Li, Zhen-Hua   iommu/vt-d: dmar_...
1677
  		goto unlock_exit;
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1678
1679
1680
1681
  
  	fault_index = dma_fsts_fault_record_index(fault_status);
  	reg = cap_fault_reg_offset(iommu->cap);
  	while (1) {
6c50d79f6   Dmitry Safonov   iommu/vt-d: Ratel...
1682
1683
  		/* Disable printing, simply clear the fault when ratelimited */
  		bool ratelimited = !__ratelimit(&rs);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1684
1685
1686
  		u8 fault_reason;
  		u16 source_id;
  		u64 guest_addr;
fd730007a   Kyung Min Park   iommu/vt-d: Add S...
1687
  		int type, pasid;
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1688
  		u32 data;
fd730007a   Kyung Min Park   iommu/vt-d: Add S...
1689
  		bool pasid_present;
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1690
1691
1692
1693
1694
1695
  
  		/* highest 32 bits */
  		data = readl(iommu->reg + reg +
  				fault_index * PRIMARY_FAULT_REG_LEN + 12);
  		if (!(data & DMA_FRCD_F))
  			break;
c43fce4ee   Alex Williamson   iommu/vt-d: Ratel...
1696
1697
1698
  		if (!ratelimited) {
  			fault_reason = dma_frcd_fault_reason(data);
  			type = dma_frcd_type(data);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1699

fd730007a   Kyung Min Park   iommu/vt-d: Add S...
1700
  			pasid = dma_frcd_pasid_value(data);
c43fce4ee   Alex Williamson   iommu/vt-d: Ratel...
1701
1702
1703
  			data = readl(iommu->reg + reg +
  				     fault_index * PRIMARY_FAULT_REG_LEN + 8);
  			source_id = dma_frcd_source_id(data);
fd730007a   Kyung Min Park   iommu/vt-d: Add S...
1704
  			pasid_present = dma_frcd_pasid_present(data);
c43fce4ee   Alex Williamson   iommu/vt-d: Ratel...
1705
1706
1707
1708
  			guest_addr = dmar_readq(iommu->reg + reg +
  					fault_index * PRIMARY_FAULT_REG_LEN);
  			guest_addr = dma_frcd_page_addr(guest_addr);
  		}
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1709

0ac2491f5   Suresh Siddha   x86, dmar: move p...
1710
1711
1712
  		/* clear the fault */
  		writel(DMA_FRCD_F, iommu->reg + reg +
  			fault_index * PRIMARY_FAULT_REG_LEN + 12);
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1713
  		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1714

c43fce4ee   Alex Williamson   iommu/vt-d: Ratel...
1715
  		if (!ratelimited)
fd730007a   Kyung Min Park   iommu/vt-d: Add S...
1716
  			/* Using pasid -1 if pasid is not present */
c43fce4ee   Alex Williamson   iommu/vt-d: Ratel...
1717
  			dmar_fault_do_one(iommu, type, fault_reason,
fd730007a   Kyung Min Park   iommu/vt-d: Add S...
1718
  					  pasid_present ? pasid : -1,
c43fce4ee   Alex Williamson   iommu/vt-d: Ratel...
1719
  					  source_id, guest_addr);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1720
1721
  
  		fault_index++;
8211a7b58   Troy Heber   pci/dmar: correct...
1722
  		if (fault_index >= cap_num_fault_regs(iommu->cap))
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1723
  			fault_index = 0;
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1724
  		raw_spin_lock_irqsave(&iommu->register_lock, flag);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1725
  	}
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1726

973b54645   Lu Baolu   iommu/vt-d: Clear...
1727
1728
  	writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
  	       iommu->reg + DMAR_FSTS_REG);
bd5cdad0c   Li, Zhen-Hua   iommu/vt-d: dmar_...
1729
1730
  
  unlock_exit:
1f5b3c3fd   Thomas Gleixner   locking, x86, iom...
1731
  	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1732
1733
1734
1735
1736
1737
  	return IRQ_HANDLED;
  }
  
  int dmar_set_interrupt(struct intel_iommu *iommu)
  {
  	int irq, ret;
9d783ba04   Suresh Siddha   x86, x2apic: enab...
1738
1739
1740
1741
1742
  	/*
  	 * Check if the fault interrupt is already initialized.
  	 */
  	if (iommu->irq)
  		return 0;
34742db8e   Jiang Liu   iommu/vt-d: Refin...
1743
1744
1745
1746
  	irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
  	if (irq > 0) {
  		iommu->irq = irq;
  	} else {
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
1747
1748
  		pr_err("No free IRQ vectors
  ");
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1749
1750
  		return -EINVAL;
  	}
477694e71   Thomas Gleixner   x86, iommu: Mark ...
1751
  	ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1752
  	if (ret)
9f10e5bf6   Joerg Roedel   iommu/vt-d: Clean...
1753
1754
  		pr_err("Can't request irq
  ");
0ac2491f5   Suresh Siddha   x86, dmar: move p...
1755
1756
  	return ret;
  }
9d783ba04   Suresh Siddha   x86, x2apic: enab...
1757
1758
1759
1760
  
  int __init enable_drhd_fault_handling(void)
  {
  	struct dmar_drhd_unit *drhd;
7c9197791   Jiang Liu   iommu/vt-d, trivi...
1761
  	struct intel_iommu *iommu;
9d783ba04   Suresh Siddha   x86, x2apic: enab...
1762
1763
1764
1765
  
  	/*
  	 * Enable fault control interrupt.
  	 */
7c9197791   Jiang Liu   iommu/vt-d, trivi...
1766
  	for_each_iommu(iommu, drhd) {
bd5cdad0c   Li, Zhen-Hua   iommu/vt-d: dmar_...
1767
  		u32 fault_status;
7c9197791   Jiang Liu   iommu/vt-d, trivi...
1768
  		int ret = dmar_set_interrupt(iommu);
9d783ba04   Suresh Siddha   x86, x2apic: enab...
1769
1770
  
  		if (ret) {
e9071b0be   Donald Dutile   iommu/dmar: Use p...
1771
1772
  			pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d
  ",
9d783ba04   Suresh Siddha   x86, x2apic: enab...
1773
1774
1775
  			       (unsigned long long)drhd->reg_base_addr, ret);
  			return -1;
  		}
7f99d946e   Suresh Siddha   x86, vt-d: Handle...
1776
1777
1778
1779
1780
  
  		/*
  		 * Clear any previous faults.
  		 */
  		dmar_fault(iommu->irq, iommu);
bd5cdad0c   Li, Zhen-Hua   iommu/vt-d: dmar_...
1781
1782
  		fault_status = readl(iommu->reg + DMAR_FSTS_REG);
  		writel(fault_status, iommu->reg + DMAR_FSTS_REG);
9d783ba04   Suresh Siddha   x86, x2apic: enab...
1783
1784
1785
1786
  	}
  
  	return 0;
  }
eb4a52bc6   Fenghua Yu   Intel IOMMU Suspe...
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
  
  /*
   * Re-enable Queued Invalidation interface.
   */
  int dmar_reenable_qi(struct intel_iommu *iommu)
  {
  	if (!ecap_qis(iommu->ecap))
  		return -ENOENT;
  
  	if (!iommu->qi)
  		return -ENOENT;
  
  	/*
  	 * First disable queued invalidation.
  	 */
  	dmar_disable_qi(iommu);
  	/*
  	 * Then enable queued invalidation again. Since there is no pending
  	 * invalidation requests now, it's safe to re-enable queued
  	 * invalidation.
  	 */
  	__dmar_enable_qi(iommu);
  
  	return 0;
  }
074835f01   Youquan Song   intel-iommu: Fix ...
1812
1813
1814
1815
  
  /*
   * Check interrupt remapping support in DMAR table description.
   */
0b8973a81   Tony Luck   intel-iommu: Fix ...
1816
  int __init dmar_ir_support(void)
074835f01   Youquan Song   intel-iommu: Fix ...
1817
1818
1819
  {
  	struct acpi_table_dmar *dmar;
  	dmar = (struct acpi_table_dmar *)dmar_tbl;
4f506e07e   Arnaud Patard   intel-iommu: Fix ...
1820
1821
  	if (!dmar)
  		return 0;
074835f01   Youquan Song   intel-iommu: Fix ...
1822
1823
  	return dmar->flags & 0x1;
  }
694835dc2   Jiang Liu   iommu/vt-d: mark ...
1824

6b1972493   Jiang Liu   iommu/vt-d: Imple...
1825
1826
1827
1828
1829
  /* Check whether DMAR units are in use */
  static inline bool dmar_in_use(void)
  {
  	return irq_remapping_enabled || intel_iommu_enabled;
  }
a868e6b7b   Jiang Liu   iommu/vt-d: keep ...
1830
1831
1832
  static int __init dmar_free_unused_resources(void)
  {
  	struct dmar_drhd_unit *dmaru, *dmaru_n;
6b1972493   Jiang Liu   iommu/vt-d: Imple...
1833
  	if (dmar_in_use())
a868e6b7b   Jiang Liu   iommu/vt-d: keep ...
1834
  		return 0;
2e4552893   Jiang Liu   iommu/vt-d: Unify...
1835
1836
  	if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
  		bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
59ce0515c   Jiang Liu   iommu/vt-d: Updat...
1837

3a5670e8a   Jiang Liu   iommu/vt-d: Intro...
1838
  	down_write(&dmar_global_lock);
a868e6b7b   Jiang Liu   iommu/vt-d: keep ...
1839
1840
1841
1842
  	list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
  		list_del(&dmaru->list);
  		dmar_free_drhd(dmaru);
  	}
3a5670e8a   Jiang Liu   iommu/vt-d: Intro...
1843
  	up_write(&dmar_global_lock);
a868e6b7b   Jiang Liu   iommu/vt-d: keep ...
1844
1845
1846
1847
1848
  
  	return 0;
  }
  
  late_initcall(dmar_free_unused_resources);
4db77ff32   Konrad Rzeszutek Wilk   x86, VT-d: Make I...
1849
  IOMMU_INIT_POST(detect_intel_iommu);
6b1972493   Jiang Liu   iommu/vt-d: Imple...
1850
1851
1852
1853
1854
1855
1856
  
  /*
   * DMAR Hotplug Support
   * For more details, please refer to Intel(R) Virtualization Technology
   * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
   * "Remapping Hardware Unit Hot Plug".
   */
94116f812   Andy Shevchenko   ACPI: Switch to u...
1857
1858
1859
  static guid_t dmar_hp_guid =
  	GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
  		  0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
6b1972493   Jiang Liu   iommu/vt-d: Imple...
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
  
  /*
   * Currently there's only one revision and BIOS will not check the revision id,
   * so use 0 for safety.
   */
  #define	DMAR_DSM_REV_ID			0
  #define	DMAR_DSM_FUNC_DRHD		1
  #define	DMAR_DSM_FUNC_ATSR		2
  #define	DMAR_DSM_FUNC_RHSA		3
  
  static inline bool dmar_detect_dsm(acpi_handle handle, int func)
  {
94116f812   Andy Shevchenko   ACPI: Switch to u...
1872
  	return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
6b1972493   Jiang Liu   iommu/vt-d: Imple...
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
  }
  
  static int dmar_walk_dsm_resource(acpi_handle handle, int func,
  				  dmar_res_handler_t handler, void *arg)
  {
  	int ret = -ENODEV;
  	union acpi_object *obj;
  	struct acpi_dmar_header *start;
  	struct dmar_res_callback callback;
  	static int res_type[] = {
  		[DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
  		[DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
  		[DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
  	};
  
  	if (!dmar_detect_dsm(handle, func))
  		return 0;
94116f812   Andy Shevchenko   ACPI: Switch to u...
1890
  	obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
6b1972493   Jiang Liu   iommu/vt-d: Imple...
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
  				      func, NULL, ACPI_TYPE_BUFFER);
  	if (!obj)
  		return -ENODEV;
  
  	memset(&callback, 0, sizeof(callback));
  	callback.cb[res_type[func]] = handler;
  	callback.arg[res_type[func]] = arg;
  	start = (struct acpi_dmar_header *)obj->buffer.pointer;
  	ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
  
  	ACPI_FREE(obj);
  
  	return ret;
  }
  
  static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
  {
  	int ret;
  	struct dmar_drhd_unit *dmaru;
  
  	dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
  	if (!dmaru)
  		return -ENODEV;
  
  	ret = dmar_ir_hotplug(dmaru, true);
  	if (ret == 0)
  		ret = dmar_iommu_hotplug(dmaru, true);
  
  	return ret;
  }
  
  static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
  {
  	int i, ret;
  	struct device *dev;
  	struct dmar_drhd_unit *dmaru;
  
  	dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
  	if (!dmaru)
  		return 0;
  
  	/*
  	 * All PCI devices managed by this unit should have been destroyed.
  	 */
194dc870a   Linus Torvalds   Add braces to avo...
1935
  	if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
6b1972493   Jiang Liu   iommu/vt-d: Imple...
1936
1937
1938
  		for_each_active_dev_scope(dmaru->devices,
  					  dmaru->devices_cnt, i, dev)
  			return -EBUSY;
194dc870a   Linus Torvalds   Add braces to avo...
1939
  	}
6b1972493   Jiang Liu   iommu/vt-d: Imple...
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
  
  	ret = dmar_ir_hotplug(dmaru, false);
  	if (ret == 0)
  		ret = dmar_iommu_hotplug(dmaru, false);
  
  	return ret;
  }
  
  static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
  {
  	struct dmar_drhd_unit *dmaru;
  
  	dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
  	if (dmaru) {
  		list_del_rcu(&dmaru->list);
  		synchronize_rcu();
  		dmar_free_drhd(dmaru);
  	}
  
  	return 0;
  }
  
  static int dmar_hotplug_insert(acpi_handle handle)
  {
  	int ret;
  	int drhd_count = 0;
  
  	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
  				     &dmar_validate_one_drhd, (void *)1);
  	if (ret)
  		goto out;
  
  	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
  				     &dmar_parse_one_drhd, (void *)&drhd_count);
  	if (ret == 0 && drhd_count == 0) {
  		pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method
  ");
  		goto out;
  	} else if (ret) {
  		goto release_drhd;
  	}
  
  	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
  				     &dmar_parse_one_rhsa, NULL);
  	if (ret)
  		goto release_drhd;
  
  	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
  				     &dmar_parse_one_atsr, NULL);
  	if (ret)
  		goto release_atsr;
  
  	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
  				     &dmar_hp_add_drhd, NULL);
  	if (!ret)
  		return 0;
  
  	dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
  			       &dmar_hp_remove_drhd, NULL);
  release_atsr:
  	dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
  			       &dmar_release_one_atsr, NULL);
  release_drhd:
  	dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
  			       &dmar_hp_release_drhd, NULL);
  out:
  	return ret;
  }
  
  static int dmar_hotplug_remove(acpi_handle handle)
  {
  	int ret;
  
  	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
  				     &dmar_check_one_atsr, NULL);
  	if (ret)
  		return ret;
  
  	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
  				     &dmar_hp_remove_drhd, NULL);
  	if (ret == 0) {
  		WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
  					       &dmar_release_one_atsr, NULL));
  		WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
  					       &dmar_hp_release_drhd, NULL));
  	} else {
  		dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
  				       &dmar_hp_add_drhd, NULL);
  	}
  
  	return ret;
  }
d35165a95   Jiang Liu   iommu/vt-d: Searc...
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
  static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
  				       void *context, void **retval)
  {
  	acpi_handle *phdl = retval;
  
  	if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
  		*phdl = handle;
  		return AE_CTRL_TERMINATE;
  	}
  
  	return AE_OK;
  }
6b1972493   Jiang Liu   iommu/vt-d: Imple...
2044
2045
2046
  static int dmar_device_hotplug(acpi_handle handle, bool insert)
  {
  	int ret;
d35165a95   Jiang Liu   iommu/vt-d: Searc...
2047
2048
  	acpi_handle tmp = NULL;
  	acpi_status status;
6b1972493   Jiang Liu   iommu/vt-d: Imple...
2049
2050
2051
  
  	if (!dmar_in_use())
  		return 0;
d35165a95   Jiang Liu   iommu/vt-d: Searc...
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
  	if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
  		tmp = handle;
  	} else {
  		status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
  					     ACPI_UINT32_MAX,
  					     dmar_get_dsm_handle,
  					     NULL, NULL, &tmp);
  		if (ACPI_FAILURE(status)) {
  			pr_warn("Failed to locate _DSM method.
  ");
  			return -ENXIO;
  		}
  	}
  	if (tmp == NULL)
6b1972493   Jiang Liu   iommu/vt-d: Imple...
2066
2067
2068
2069
  		return 0;
  
  	down_write(&dmar_global_lock);
  	if (insert)
d35165a95   Jiang Liu   iommu/vt-d: Searc...
2070
  		ret = dmar_hotplug_insert(tmp);
6b1972493   Jiang Liu   iommu/vt-d: Imple...
2071
  	else
d35165a95   Jiang Liu   iommu/vt-d: Searc...
2072
  		ret = dmar_hotplug_remove(tmp);
6b1972493   Jiang Liu   iommu/vt-d: Imple...
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
  	up_write(&dmar_global_lock);
  
  	return ret;
  }
  
  int dmar_device_add(acpi_handle handle)
  {
  	return dmar_device_hotplug(handle, true);
  }
  
  int dmar_device_remove(acpi_handle handle)
  {
  	return dmar_device_hotplug(handle, false);
  }
89a6079df   Lu Baolu   iommu/vt-d: Force...
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
  
  /*
   * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table
   *
   * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in
   * the ACPI DMAR table. This means that the platform boot firmware has made
   * sure no device can issue DMA outside of RMRR regions.
   */
  bool dmar_platform_optin(void)
  {
  	struct acpi_table_dmar *dmar;
  	acpi_status status;
  	bool ret;
  
  	status = acpi_get_table(ACPI_SIG_DMAR, 0,
  				(struct acpi_table_header **)&dmar);
  	if (ACPI_FAILURE(status))
  		return false;
  
  	ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
  	acpi_put_table((struct acpi_table_header *)dmar);
  
  	return ret;
  }
  EXPORT_SYMBOL_GPL(dmar_platform_optin);