Blame view

drivers/iommu/amd_iommu.c 82.3 KB
b6c02715c   Joerg Roedel   x86, AMD IOMMU: a...
1
  /*
5d0d71569   Joerg Roedel   x86/amd-iommu: Up...
2
   * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
b6c02715c   Joerg Roedel   x86, AMD IOMMU: a...
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
   * Author: Joerg Roedel <joerg.roedel@amd.com>
   *         Leo Duran <leo.duran@amd.com>
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License version 2 as published
   * by the Free Software Foundation.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
   */
72e1dcc41   Joerg Roedel   iommu/amd: Implem...
19
  #include <linux/ratelimit.h>
b6c02715c   Joerg Roedel   x86, AMD IOMMU: a...
20
  #include <linux/pci.h>
cb41ed85e   Joerg Roedel   x86/amd-iommu: Fl...
21
  #include <linux/pci-ats.h>
a66022c45   Akinobu Mita   iommu-helper: use...
22
  #include <linux/bitmap.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
23
  #include <linux/slab.h>
7f26508bb   Joerg Roedel   AMD IOMMU: add in...
24
  #include <linux/debugfs.h>
b6c02715c   Joerg Roedel   x86, AMD IOMMU: a...
25
  #include <linux/scatterlist.h>
51491367c   FUJITA Tomonori   x86, AMD IOMMU: a...
26
  #include <linux/dma-mapping.h>
b6c02715c   Joerg Roedel   x86, AMD IOMMU: a...
27
  #include <linux/iommu-helper.h>
c156e347d   Joerg Roedel   AMD IOMMU: add do...
28
  #include <linux/iommu.h>
815b33fdc   Joerg Roedel   x86/amd-iommu: Cl...
29
  #include <linux/delay.h>
403f81d8e   Joerg Roedel   iommu/amd: Move m...
30
  #include <linux/amd-iommu.h>
72e1dcc41   Joerg Roedel   iommu/amd: Implem...
31
32
  #include <linux/notifier.h>
  #include <linux/export.h>
17f5b569e   Joerg Roedel   iommu/amd: Don't ...
33
  #include <asm/msidef.h>
b6c02715c   Joerg Roedel   x86, AMD IOMMU: a...
34
  #include <asm/proto.h>
46a7fa270   FUJITA Tomonori   x86: make only GA...
35
  #include <asm/iommu.h>
1d9b16d16   Joerg Roedel   x86: move GART sp...
36
  #include <asm/gart.h>
27c2127a1   Joerg Roedel   x86/amd-iommu: Us...
37
  #include <asm/dma.h>
403f81d8e   Joerg Roedel   iommu/amd: Move m...
38
39
40
  
  #include "amd_iommu_proto.h"
  #include "amd_iommu_types.h"
b6c02715c   Joerg Roedel   x86, AMD IOMMU: a...
41
42
  
  #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
815b33fdc   Joerg Roedel   x86/amd-iommu: Cl...
43
  #define LOOP_TIMEOUT	100000
136f78a19   Joerg Roedel   x86, AMD IOMMU: a...
44

aa3de9c05   Ohad Ben-Cohen   iommu/amd: announ...
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
  /*
   * This bitmap is used to advertise the page sizes our hardware support
   * to the IOMMU core, which will then use this information to split
   * physically contiguous memory regions it is mapping into page sizes
   * that we support.
   *
   * Traditionally the IOMMU core just handed us the mappings directly,
   * after making sure the size is an order of a 4KiB page and that the
   * mapping has natural alignment.
   *
   * To retain this behavior, we currently advertise that we support
   * all page sizes that are an order of 4KiB.
   *
   * If at some point we'd like to utilize the IOMMU core's new behavior,
   * we could change this to advertise the real page sizes we support.
   */
  #define AMD_IOMMU_PGSIZES	(~0xFFFUL)
b6c02715c   Joerg Roedel   x86, AMD IOMMU: a...
62
  static DEFINE_RWLOCK(amd_iommu_devtable_lock);
bd60b735c   Joerg Roedel   AMD IOMMU: don't ...
63
64
65
  /* A list of preallocated protection domains */
  static LIST_HEAD(iommu_pd_list);
  static DEFINE_SPINLOCK(iommu_pd_list_lock);
8fa5f802a   Joerg Roedel   x86/amd-iommu: In...
66
67
68
  /* List of all available dev_data structures */
  static LIST_HEAD(dev_data_list);
  static DEFINE_SPINLOCK(dev_data_list_lock);
0feae533d   Joerg Roedel   x86/amd-iommu: Ad...
69
70
71
72
73
  /*
   * Domain for untranslated devices - only allocated
   * if iommu=pt passed on kernel cmd line.
   */
  static struct protection_domain *pt_domain;
26961efe0   Joerg Roedel   AMD IOMMU: regist...
74
  static struct iommu_ops amd_iommu_ops;
26961efe0   Joerg Roedel   AMD IOMMU: regist...
75

72e1dcc41   Joerg Roedel   iommu/amd: Implem...
76
  static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
52815b756   Joerg Roedel   iommu/amd: Add su...
77
  int amd_iommu_max_glx_val = -1;
72e1dcc41   Joerg Roedel   iommu/amd: Implem...
78

431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
79
80
81
  /*
   * general struct to manage commands send to an IOMMU
   */
d64495366   Joerg Roedel   x86, AMD IOMMU: r...
82
  struct iommu_cmd {
b6c02715c   Joerg Roedel   x86, AMD IOMMU: a...
83
84
  	u32 data[4];
  };
04bfdd840   Joerg Roedel   x86/amd-iommu: Fl...
85
  static void update_domain(struct protection_domain *domain);
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
86
  static int __init alloc_passthrough_domain(void);
c1eee67b2   Chris Wright   amd iommu: proper...
87

15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
88
89
90
91
92
  /****************************************************************************
   *
   * Helper functions
   *
   ****************************************************************************/
f62dda66b   Joerg Roedel   x86/amd-iommu: St...
93
  static struct iommu_dev_data *alloc_dev_data(u16 devid)
8fa5f802a   Joerg Roedel   x86/amd-iommu: In...
94
95
96
97
98
99
100
  {
  	struct iommu_dev_data *dev_data;
  	unsigned long flags;
  
  	dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
  	if (!dev_data)
  		return NULL;
f62dda66b   Joerg Roedel   x86/amd-iommu: St...
101
  	dev_data->devid = devid;
8fa5f802a   Joerg Roedel   x86/amd-iommu: In...
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
  	atomic_set(&dev_data->bind, 0);
  
  	spin_lock_irqsave(&dev_data_list_lock, flags);
  	list_add_tail(&dev_data->dev_data_list, &dev_data_list);
  	spin_unlock_irqrestore(&dev_data_list_lock, flags);
  
  	return dev_data;
  }
  
  static void free_dev_data(struct iommu_dev_data *dev_data)
  {
  	unsigned long flags;
  
  	spin_lock_irqsave(&dev_data_list_lock, flags);
  	list_del(&dev_data->dev_data_list);
  	spin_unlock_irqrestore(&dev_data_list_lock, flags);
  
  	kfree(dev_data);
  }
3b03bb745   Joerg Roedel   x86/amd-iommu: Se...
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
  static struct iommu_dev_data *search_dev_data(u16 devid)
  {
  	struct iommu_dev_data *dev_data;
  	unsigned long flags;
  
  	spin_lock_irqsave(&dev_data_list_lock, flags);
  	list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
  		if (dev_data->devid == devid)
  			goto out_unlock;
  	}
  
  	dev_data = NULL;
  
  out_unlock:
  	spin_unlock_irqrestore(&dev_data_list_lock, flags);
  
  	return dev_data;
  }
  
  static struct iommu_dev_data *find_dev_data(u16 devid)
  {
  	struct iommu_dev_data *dev_data;
  
  	dev_data = search_dev_data(devid);
  
  	if (dev_data == NULL)
  		dev_data = alloc_dev_data(devid);
  
  	return dev_data;
  }
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
151
152
153
154
155
156
  static inline u16 get_device_id(struct device *dev)
  {
  	struct pci_dev *pdev = to_pci_dev(dev);
  
  	return calc_devid(pdev->bus->number, pdev->devfn);
  }
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
157
158
159
160
  static struct iommu_dev_data *get_dev_data(struct device *dev)
  {
  	return dev->archdata.iommu;
  }
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
161
162
163
164
  static bool pci_iommuv2_capable(struct pci_dev *pdev)
  {
  	static const int caps[] = {
  		PCI_EXT_CAP_ID_ATS,
46277b75d   Joerg Roedel   iommu/amd: Adapt ...
165
166
  		PCI_EXT_CAP_ID_PRI,
  		PCI_EXT_CAP_ID_PASID,
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
167
168
169
170
171
172
173
174
175
176
177
  	};
  	int i, pos;
  
  	for (i = 0; i < 3; ++i) {
  		pos = pci_find_ext_capability(pdev, caps[i]);
  		if (pos == 0)
  			return false;
  	}
  
  	return true;
  }
6a113ddc0   Joerg Roedel   iommu/amd: Add de...
178
179
180
181
182
183
184
185
  static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
  {
  	struct iommu_dev_data *dev_data;
  
  	dev_data = get_dev_data(&pdev->dev);
  
  	return dev_data->errata & (1 << erratum) ? true : false;
  }
71c70984e   Joerg Roedel   x86/amd-iommu: Mo...
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
  /*
   * In this function the list of preallocated protection domains is traversed to
   * find the domain for a specific device
   */
  static struct dma_ops_domain *find_protection_domain(u16 devid)
  {
  	struct dma_ops_domain *entry, *ret = NULL;
  	unsigned long flags;
  	u16 alias = amd_iommu_alias_table[devid];
  
  	if (list_empty(&iommu_pd_list))
  		return NULL;
  
  	spin_lock_irqsave(&iommu_pd_list_lock, flags);
  
  	list_for_each_entry(entry, &iommu_pd_list, list) {
  		if (entry->target_dev == devid ||
  		    entry->target_dev == alias) {
  			ret = entry;
  			break;
  		}
  	}
  
  	spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
  
  	return ret;
  }
98fc5a693   Joerg Roedel   x86/amd-iommu: Us...
213
214
215
216
217
218
219
220
221
222
223
224
  /*
   * This function checks if the driver got a valid device from the caller to
   * avoid dereferencing invalid pointers.
   */
  static bool check_device(struct device *dev)
  {
  	u16 devid;
  
  	if (!dev || !dev->dma_mask)
  		return false;
  
  	/* No device or no PCI device */
339d3261a   Julia Lawall   x86/amd-iommu: Re...
225
  	if (dev->bus != &pci_bus_type)
98fc5a693   Joerg Roedel   x86/amd-iommu: Us...
226
227
228
229
230
231
232
233
234
235
236
237
238
  		return false;
  
  	devid = get_device_id(dev);
  
  	/* Out of our scope? */
  	if (devid > amd_iommu_last_bdf)
  		return false;
  
  	if (amd_iommu_rlookup_table[devid] == NULL)
  		return false;
  
  	return true;
  }
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
239
240
  static int iommu_init_device(struct device *dev)
  {
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
241
  	struct pci_dev *pdev = to_pci_dev(dev);
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
242
  	struct iommu_dev_data *dev_data;
8fa5f802a   Joerg Roedel   x86/amd-iommu: In...
243
  	u16 alias;
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
244
245
246
  
  	if (dev->archdata.iommu)
  		return 0;
3b03bb745   Joerg Roedel   x86/amd-iommu: Se...
247
  	dev_data = find_dev_data(get_device_id(dev));
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
248
249
  	if (!dev_data)
  		return -ENOMEM;
f62dda66b   Joerg Roedel   x86/amd-iommu: St...
250
  	alias = amd_iommu_alias_table[dev_data->devid];
2b02b091a   Joerg Roedel   x86/amd-iommu: Al...
251
  	if (alias != dev_data->devid) {
71f775809   Joerg Roedel   x86/amd-iommu: St...
252
  		struct iommu_dev_data *alias_data;
b00d3bcff   Joerg Roedel   x86/amd-iommu: Cl...
253

71f775809   Joerg Roedel   x86/amd-iommu: St...
254
255
256
257
258
  		alias_data = find_dev_data(alias);
  		if (alias_data == NULL) {
  			pr_err("AMD-Vi: Warning: Unhandled device %s
  ",
  					dev_name(dev));
2b02b091a   Joerg Roedel   x86/amd-iommu: Al...
259
260
261
  			free_dev_data(dev_data);
  			return -ENOTSUPP;
  		}
71f775809   Joerg Roedel   x86/amd-iommu: St...
262
  		dev_data->alias_data = alias_data;
26018874e   Joerg Roedel   x86/amd-iommu: Fi...
263
  	}
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
264

5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
265
266
267
268
269
270
  	if (pci_iommuv2_capable(pdev)) {
  		struct amd_iommu *iommu;
  
  		iommu              = amd_iommu_rlookup_table[dev_data->devid];
  		dev_data->iommu_v2 = iommu->is_iommu_v2;
  	}
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
271
  	dev->archdata.iommu = dev_data;
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
272
273
  	return 0;
  }
26018874e   Joerg Roedel   x86/amd-iommu: Fi...
274
275
276
277
278
279
280
281
282
283
284
285
286
  static void iommu_ignore_device(struct device *dev)
  {
  	u16 devid, alias;
  
  	devid = get_device_id(dev);
  	alias = amd_iommu_alias_table[devid];
  
  	memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
  	memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
  
  	amd_iommu_rlookup_table[devid] = NULL;
  	amd_iommu_rlookup_table[alias] = NULL;
  }
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
287
288
  static void iommu_uninit_device(struct device *dev)
  {
8fa5f802a   Joerg Roedel   x86/amd-iommu: In...
289
290
291
292
293
  	/*
  	 * Nothing to do here - we keep dev_data around for unplugged devices
  	 * and reuse it when the device is re-plugged - not doing so would
  	 * introduce a ton of races.
  	 */
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
294
  }
b7cc9554b   Joerg Roedel   x86/amd-iommu: Fi...
295
296
297
  
  void __init amd_iommu_uninit_devices(void)
  {
8fa5f802a   Joerg Roedel   x86/amd-iommu: In...
298
  	struct iommu_dev_data *dev_data, *n;
b7cc9554b   Joerg Roedel   x86/amd-iommu: Fi...
299
300
301
302
303
304
305
306
307
  	struct pci_dev *pdev = NULL;
  
  	for_each_pci_dev(pdev) {
  
  		if (!check_device(&pdev->dev))
  			continue;
  
  		iommu_uninit_device(&pdev->dev);
  	}
8fa5f802a   Joerg Roedel   x86/amd-iommu: In...
308
309
310
311
  
  	/* Free all of our dev_data structures */
  	list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list)
  		free_dev_data(dev_data);
b7cc9554b   Joerg Roedel   x86/amd-iommu: Fi...
312
313
314
315
316
317
318
319
320
321
322
323
324
  }
  
  int __init amd_iommu_init_devices(void)
  {
  	struct pci_dev *pdev = NULL;
  	int ret = 0;
  
  	for_each_pci_dev(pdev) {
  
  		if (!check_device(&pdev->dev))
  			continue;
  
  		ret = iommu_init_device(&pdev->dev);
26018874e   Joerg Roedel   x86/amd-iommu: Fi...
325
326
327
  		if (ret == -ENOTSUPP)
  			iommu_ignore_device(&pdev->dev);
  		else if (ret)
b7cc9554b   Joerg Roedel   x86/amd-iommu: Fi...
328
329
330
331
332
333
334
335
336
337
338
  			goto out_free;
  	}
  
  	return 0;
  
  out_free:
  
  	amd_iommu_uninit_devices();
  
  	return ret;
  }
7f26508bb   Joerg Roedel   AMD IOMMU: add in...
339
340
341
342
343
  #ifdef CONFIG_AMD_IOMMU_STATS
  
  /*
   * Initialization code for statistics collection
   */
da49f6df7   Joerg Roedel   AMD IOMMU: add st...
344
  DECLARE_STATS_COUNTER(compl_wait);
0f2a86f20   Joerg Roedel   AMD IOMMU: add st...
345
  DECLARE_STATS_COUNTER(cnt_map_single);
146a6917f   Joerg Roedel   AMD IOMMU: add st...
346
  DECLARE_STATS_COUNTER(cnt_unmap_single);
d03f067a9   Joerg Roedel   AMD IOMMU: add st...
347
  DECLARE_STATS_COUNTER(cnt_map_sg);
55877a6bc   Joerg Roedel   AMD IOMMU: add st...
348
  DECLARE_STATS_COUNTER(cnt_unmap_sg);
c8f0fb36b   Joerg Roedel   AMD IOMMU: add st...
349
  DECLARE_STATS_COUNTER(cnt_alloc_coherent);
5d31ee7e0   Joerg Roedel   AMD IOMMU: add st...
350
  DECLARE_STATS_COUNTER(cnt_free_coherent);
c1858976f   Joerg Roedel   AMD IOMMU: add st...
351
  DECLARE_STATS_COUNTER(cross_page);
f57d98ae6   Joerg Roedel   AMD IOMMU: add st...
352
  DECLARE_STATS_COUNTER(domain_flush_single);
18811f55d   Joerg Roedel   AMD IOMMU: add st...
353
  DECLARE_STATS_COUNTER(domain_flush_all);
5774f7c5f   Joerg Roedel   AMD IOMMU: add st...
354
  DECLARE_STATS_COUNTER(alloced_io_mem);
8ecaf8f19   Joerg Roedel   AMD IOMMU: add st...
355
  DECLARE_STATS_COUNTER(total_map_requests);
399be2f51   Joerg Roedel   iommu/amd: Add st...
356
357
358
359
  DECLARE_STATS_COUNTER(complete_ppr);
  DECLARE_STATS_COUNTER(invalidate_iotlb);
  DECLARE_STATS_COUNTER(invalidate_iotlb_all);
  DECLARE_STATS_COUNTER(pri_requests);
da49f6df7   Joerg Roedel   AMD IOMMU: add st...
360

7f26508bb   Joerg Roedel   AMD IOMMU: add in...
361
  static struct dentry *stats_dir;
7f26508bb   Joerg Roedel   AMD IOMMU: add in...
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
  static struct dentry *de_fflush;
  
  static void amd_iommu_stats_add(struct __iommu_counter *cnt)
  {
  	if (stats_dir == NULL)
  		return;
  
  	cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
  				       &cnt->value);
  }
  
  static void amd_iommu_stats_init(void)
  {
  	stats_dir = debugfs_create_dir("amd-iommu", NULL);
  	if (stats_dir == NULL)
  		return;
7f26508bb   Joerg Roedel   AMD IOMMU: add in...
378
379
  	de_fflush  = debugfs_create_bool("fullflush", 0444, stats_dir,
  					 (u32 *)&amd_iommu_unmap_flush);
da49f6df7   Joerg Roedel   AMD IOMMU: add st...
380
381
  
  	amd_iommu_stats_add(&compl_wait);
0f2a86f20   Joerg Roedel   AMD IOMMU: add st...
382
  	amd_iommu_stats_add(&cnt_map_single);
146a6917f   Joerg Roedel   AMD IOMMU: add st...
383
  	amd_iommu_stats_add(&cnt_unmap_single);
d03f067a9   Joerg Roedel   AMD IOMMU: add st...
384
  	amd_iommu_stats_add(&cnt_map_sg);
55877a6bc   Joerg Roedel   AMD IOMMU: add st...
385
  	amd_iommu_stats_add(&cnt_unmap_sg);
c8f0fb36b   Joerg Roedel   AMD IOMMU: add st...
386
  	amd_iommu_stats_add(&cnt_alloc_coherent);
5d31ee7e0   Joerg Roedel   AMD IOMMU: add st...
387
  	amd_iommu_stats_add(&cnt_free_coherent);
c1858976f   Joerg Roedel   AMD IOMMU: add st...
388
  	amd_iommu_stats_add(&cross_page);
f57d98ae6   Joerg Roedel   AMD IOMMU: add st...
389
  	amd_iommu_stats_add(&domain_flush_single);
18811f55d   Joerg Roedel   AMD IOMMU: add st...
390
  	amd_iommu_stats_add(&domain_flush_all);
5774f7c5f   Joerg Roedel   AMD IOMMU: add st...
391
  	amd_iommu_stats_add(&alloced_io_mem);
8ecaf8f19   Joerg Roedel   AMD IOMMU: add st...
392
  	amd_iommu_stats_add(&total_map_requests);
399be2f51   Joerg Roedel   iommu/amd: Add st...
393
394
395
396
  	amd_iommu_stats_add(&complete_ppr);
  	amd_iommu_stats_add(&invalidate_iotlb);
  	amd_iommu_stats_add(&invalidate_iotlb_all);
  	amd_iommu_stats_add(&pri_requests);
7f26508bb   Joerg Roedel   AMD IOMMU: add in...
397
398
399
  }
  
  #endif
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
400
401
  /****************************************************************************
   *
a80dc3e0e   Joerg Roedel   AMD IOMMU: add MS...
402
403
404
   * Interrupt handling functions
   *
   ****************************************************************************/
e3e59876e   Joerg Roedel   x86/amd-iommu: Du...
405
406
407
  static void dump_dte_entry(u16 devid)
  {
  	int i;
ee6c28684   Joerg Roedel   iommu/amd: Conver...
408
409
410
  	for (i = 0; i < 4; ++i)
  		pr_err("AMD-Vi: DTE[%d]: %016llx
  ", i,
e3e59876e   Joerg Roedel   x86/amd-iommu: Du...
411
412
  			amd_iommu_dev_table[devid].data[i]);
  }
945b4ac44   Joerg Roedel   x86/amd-iommu: Du...
413
414
415
416
417
418
419
420
421
  static void dump_command(unsigned long phys_addr)
  {
  	struct iommu_cmd *cmd = phys_to_virt(phys_addr);
  	int i;
  
  	for (i = 0; i < 4; ++i)
  		pr_err("AMD-Vi: CMD[%d]: %08x
  ", i, cmd->data[i]);
  }
a345b23b7   Joerg Roedel   x86/amd-iommu: Re...
422
  static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
90008ee4b   Joerg Roedel   AMD IOMMU: add ev...
423
424
425
426
427
428
429
  {
  	u32 *event = __evt;
  	int type  = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
  	int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
  	int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
  	int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
  	u64 address = (u64)(((u64)event[3]) << 32) | event[2];
4c6f40d4e   Joerg Roedel   x86/amd-iommu: re...
430
  	printk(KERN_ERR "AMD-Vi: Event logged [");
90008ee4b   Joerg Roedel   AMD IOMMU: add ev...
431
432
433
434
435
436
437
438
  
  	switch (type) {
  	case EVENT_TYPE_ILL_DEV:
  		printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
  		       "address=0x%016llx flags=0x%04x]
  ",
  		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  		       address, flags);
e3e59876e   Joerg Roedel   x86/amd-iommu: Du...
439
  		dump_dte_entry(devid);
90008ee4b   Joerg Roedel   AMD IOMMU: add ev...
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
  		break;
  	case EVENT_TYPE_IO_FAULT:
  		printk("IO_PAGE_FAULT device=%02x:%02x.%x "
  		       "domain=0x%04x address=0x%016llx flags=0x%04x]
  ",
  		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  		       domid, address, flags);
  		break;
  	case EVENT_TYPE_DEV_TAB_ERR:
  		printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
  		       "address=0x%016llx flags=0x%04x]
  ",
  		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  		       address, flags);
  		break;
  	case EVENT_TYPE_PAGE_TAB_ERR:
  		printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
  		       "domain=0x%04x address=0x%016llx flags=0x%04x]
  ",
  		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  		       domid, address, flags);
  		break;
  	case EVENT_TYPE_ILL_CMD:
  		printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]
  ", address);
945b4ac44   Joerg Roedel   x86/amd-iommu: Du...
465
  		dump_command(address);
90008ee4b   Joerg Roedel   AMD IOMMU: add ev...
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
  		break;
  	case EVENT_TYPE_CMD_HARD_ERR:
  		printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
  		       "flags=0x%04x]
  ", address, flags);
  		break;
  	case EVENT_TYPE_IOTLB_INV_TO:
  		printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
  		       "address=0x%016llx]
  ",
  		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  		       address);
  		break;
  	case EVENT_TYPE_INV_DEV_REQ:
  		printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
  		       "address=0x%016llx flags=0x%04x]
  ",
  		       PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
  		       address, flags);
  		break;
  	default:
  		printk(KERN_ERR "UNKNOWN type=0x%02x]
  ", type);
  	}
  }
  
  static void iommu_poll_events(struct amd_iommu *iommu)
  {
  	u32 head, tail;
  	unsigned long flags;
  
  	spin_lock_irqsave(&iommu->lock, flags);
  
  	head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
  	tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
  
  	while (head != tail) {
a345b23b7   Joerg Roedel   x86/amd-iommu: Re...
503
  		iommu_print_event(iommu, iommu->evt_buf + head);
90008ee4b   Joerg Roedel   AMD IOMMU: add ev...
504
505
506
507
508
509
510
  		head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
  	}
  
  	writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
  
  	spin_unlock_irqrestore(&iommu->lock, flags);
  }
72e1dcc41   Joerg Roedel   iommu/amd: Implem...
511
512
513
514
515
  static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
  {
  	struct amd_iommu_fault fault;
  	volatile u64 *raw;
  	int i;
399be2f51   Joerg Roedel   iommu/amd: Add st...
516
  	INC_STATS_COUNTER(pri_requests);
72e1dcc41   Joerg Roedel   iommu/amd: Implem...
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
  	raw = (u64 *)(iommu->ppr_log + head);
  
  	/*
  	 * Hardware bug: Interrupt may arrive before the entry is written to
  	 * memory. If this happens we need to wait for the entry to arrive.
  	 */
  	for (i = 0; i < LOOP_TIMEOUT; ++i) {
  		if (PPR_REQ_TYPE(raw[0]) != 0)
  			break;
  		udelay(1);
  	}
  
  	if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
  		pr_err_ratelimited("AMD-Vi: Unknown PPR request received
  ");
  		return;
  	}
  
  	fault.address   = raw[1];
  	fault.pasid     = PPR_PASID(raw[0]);
  	fault.device_id = PPR_DEVID(raw[0]);
  	fault.tag       = PPR_TAG(raw[0]);
  	fault.flags     = PPR_FLAGS(raw[0]);
  
  	/*
  	 * To detect the hardware bug we need to clear the entry
  	 * to back to zero.
  	 */
  	raw[0] = raw[1] = 0;
  
  	atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
  }
  
  static void iommu_poll_ppr_log(struct amd_iommu *iommu)
  {
  	unsigned long flags;
  	u32 head, tail;
  
  	if (iommu->ppr_log == NULL)
  		return;
  
  	spin_lock_irqsave(&iommu->lock, flags);
  
  	head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
  	tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
  
  	while (head != tail) {
  
  		/* Handle PPR entry */
  		iommu_handle_ppr_entry(iommu, head);
  
  		/* Update and refresh ring-buffer state*/
  		head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
  		writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
  		tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
  	}
  
  	/* enable ppr interrupts again */
  	writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
  
  	spin_unlock_irqrestore(&iommu->lock, flags);
  }
72fe00f01   Joerg Roedel   x86/amd-iommu: Us...
579
  irqreturn_t amd_iommu_int_thread(int irq, void *data)
a80dc3e0e   Joerg Roedel   AMD IOMMU: add MS...
580
  {
90008ee4b   Joerg Roedel   AMD IOMMU: add ev...
581
  	struct amd_iommu *iommu;
72e1dcc41   Joerg Roedel   iommu/amd: Implem...
582
  	for_each_iommu(iommu) {
90008ee4b   Joerg Roedel   AMD IOMMU: add ev...
583
  		iommu_poll_events(iommu);
72e1dcc41   Joerg Roedel   iommu/amd: Implem...
584
585
  		iommu_poll_ppr_log(iommu);
  	}
90008ee4b   Joerg Roedel   AMD IOMMU: add ev...
586
587
  
  	return IRQ_HANDLED;
a80dc3e0e   Joerg Roedel   AMD IOMMU: add MS...
588
  }
72fe00f01   Joerg Roedel   x86/amd-iommu: Us...
589
590
591
592
  irqreturn_t amd_iommu_int_handler(int irq, void *data)
  {
  	return IRQ_WAKE_THREAD;
  }
a80dc3e0e   Joerg Roedel   AMD IOMMU: add MS...
593
594
  /****************************************************************************
   *
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
595
596
597
   * IOMMU command queuing functions
   *
   ****************************************************************************/
ac0ea6e92   Joerg Roedel   x86/amd-iommu: Im...
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
  static int wait_on_sem(volatile u64 *sem)
  {
  	int i = 0;
  
  	while (*sem == 0 && i < LOOP_TIMEOUT) {
  		udelay(1);
  		i += 1;
  	}
  
  	if (i == LOOP_TIMEOUT) {
  		pr_alert("AMD-Vi: Completion-Wait loop timed out
  ");
  		return -EIO;
  	}
  
  	return 0;
  }
  
  static void copy_cmd_to_buffer(struct amd_iommu *iommu,
  			       struct iommu_cmd *cmd,
  			       u32 tail)
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
619
  {
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
620
  	u8 *target;
8a7c5ef3b   Jiri Kosina   x86 iommu: remove...
621
  	target = iommu->cmd_buf + tail;
ac0ea6e92   Joerg Roedel   x86/amd-iommu: Im...
622
623
624
625
626
627
  	tail   = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
  
  	/* Copy command to buffer */
  	memcpy(target, cmd, sizeof(*cmd));
  
  	/* Tell the IOMMU about it */
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
628
  	writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
ac0ea6e92   Joerg Roedel   x86/amd-iommu: Im...
629
  }
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
630

815b33fdc   Joerg Roedel   x86/amd-iommu: Cl...
631
  static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
ded467374   Joerg Roedel   x86/amd-iommu: Mo...
632
  {
815b33fdc   Joerg Roedel   x86/amd-iommu: Cl...
633
  	WARN_ON(address & 0x7ULL);
ded467374   Joerg Roedel   x86/amd-iommu: Mo...
634
  	memset(cmd, 0, sizeof(*cmd));
815b33fdc   Joerg Roedel   x86/amd-iommu: Cl...
635
636
637
  	cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
  	cmd->data[1] = upper_32_bits(__pa(address));
  	cmd->data[2] = 1;
ded467374   Joerg Roedel   x86/amd-iommu: Mo...
638
639
  	CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
  }
94fe79e2f   Joerg Roedel   x86/amd-iommu: Mo...
640
641
642
643
644
645
  static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
  {
  	memset(cmd, 0, sizeof(*cmd));
  	cmd->data[0] = devid;
  	CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
  }
11b6402c6   Joerg Roedel   x86/amd-iommu: Cl...
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
  static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
  				  size_t size, u16 domid, int pde)
  {
  	u64 pages;
  	int s;
  
  	pages = iommu_num_pages(address, size, PAGE_SIZE);
  	s     = 0;
  
  	if (pages > 1) {
  		/*
  		 * If we have to flush more than one page, flush all
  		 * TLB entries for this domain
  		 */
  		address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  		s = 1;
  	}
  
  	address &= PAGE_MASK;
  
  	memset(cmd, 0, sizeof(*cmd));
  	cmd->data[1] |= domid;
  	cmd->data[2]  = lower_32_bits(address);
  	cmd->data[3]  = upper_32_bits(address);
  	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
  	if (s) /* size bit - we flush more than one 4kb page */
  		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  	if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
  		cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
  }
cb41ed85e   Joerg Roedel   x86/amd-iommu: Fl...
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
  static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
  				  u64 address, size_t size)
  {
  	u64 pages;
  	int s;
  
  	pages = iommu_num_pages(address, size, PAGE_SIZE);
  	s     = 0;
  
  	if (pages > 1) {
  		/*
  		 * If we have to flush more than one page, flush all
  		 * TLB entries for this domain
  		 */
  		address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
  		s = 1;
  	}
  
  	address &= PAGE_MASK;
  
  	memset(cmd, 0, sizeof(*cmd));
  	cmd->data[0]  = devid;
  	cmd->data[0] |= (qdep & 0xff) << 24;
  	cmd->data[1]  = devid;
  	cmd->data[2]  = lower_32_bits(address);
  	cmd->data[3]  = upper_32_bits(address);
  	CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
  	if (s)
  		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  }
22e266c79   Joerg Roedel   iommu/amd: Implem...
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
  static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
  				  u64 address, bool size)
  {
  	memset(cmd, 0, sizeof(*cmd));
  
  	address &= ~(0xfffULL);
  
  	cmd->data[0]  = pasid & PASID_MASK;
  	cmd->data[1]  = domid;
  	cmd->data[2]  = lower_32_bits(address);
  	cmd->data[3]  = upper_32_bits(address);
  	cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
  	cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
  	if (size)
  		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  	CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
  }
  
  static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
  				  int qdep, u64 address, bool size)
  {
  	memset(cmd, 0, sizeof(*cmd));
  
  	address &= ~(0xfffULL);
  
  	cmd->data[0]  = devid;
  	cmd->data[0] |= (pasid & 0xff) << 16;
  	cmd->data[0] |= (qdep  & 0xff) << 24;
  	cmd->data[1]  = devid;
  	cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16;
  	cmd->data[2]  = lower_32_bits(address);
  	cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
  	cmd->data[3]  = upper_32_bits(address);
  	if (size)
  		cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  	CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
  }
c99afa25b   Joerg Roedel   iommu/amd: Implem...
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
  static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
  			       int status, int tag, bool gn)
  {
  	memset(cmd, 0, sizeof(*cmd));
  
  	cmd->data[0]  = devid;
  	if (gn) {
  		cmd->data[1]  = pasid & PASID_MASK;
  		cmd->data[2]  = CMD_INV_IOMMU_PAGES_GN_MASK;
  	}
  	cmd->data[3]  = tag & 0x1ff;
  	cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
  
  	CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
  }
58fc7f141   Joerg Roedel   x86/amd-iommu: Ad...
758
759
760
761
  static void build_inv_all(struct iommu_cmd *cmd)
  {
  	memset(cmd, 0, sizeof(*cmd));
  	CMD_SET_TYPE(cmd, CMD_INV_ALL);
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
762
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
763
  /*
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
764
   * Writes the command to the IOMMUs command buffer and informs the
ac0ea6e92   Joerg Roedel   x86/amd-iommu: Im...
765
   * hardware about the new command.
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
766
   */
f1ca1512e   Joerg Roedel   iommu/amd: Make s...
767
768
769
  static int iommu_queue_command_sync(struct amd_iommu *iommu,
  				    struct iommu_cmd *cmd,
  				    bool sync)
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
770
  {
ac0ea6e92   Joerg Roedel   x86/amd-iommu: Im...
771
  	u32 left, tail, head, next_tail;
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
772
  	unsigned long flags;
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
773

549c90dc9   Chris Wright   x86/amd-iommu: wa...
774
  	WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
ac0ea6e92   Joerg Roedel   x86/amd-iommu: Im...
775
776
  
  again:
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
777
  	spin_lock_irqsave(&iommu->lock, flags);
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
778

ac0ea6e92   Joerg Roedel   x86/amd-iommu: Im...
779
780
781
782
  	head      = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
  	tail      = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  	next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
  	left      = (head - next_tail) % iommu->cmd_buf_size;
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
783

ac0ea6e92   Joerg Roedel   x86/amd-iommu: Im...
784
785
786
787
  	if (left <= 2) {
  		struct iommu_cmd sync_cmd;
  		volatile u64 sem = 0;
  		int ret;
8d201968e   Joerg Roedel   AMD IOMMU: refact...
788

ac0ea6e92   Joerg Roedel   x86/amd-iommu: Im...
789
790
  		build_completion_wait(&sync_cmd, (u64)&sem);
  		copy_cmd_to_buffer(iommu, &sync_cmd, tail);
da49f6df7   Joerg Roedel   AMD IOMMU: add st...
791

ac0ea6e92   Joerg Roedel   x86/amd-iommu: Im...
792
793
794
795
796
797
  		spin_unlock_irqrestore(&iommu->lock, flags);
  
  		if ((ret = wait_on_sem(&sem)) != 0)
  			return ret;
  
  		goto again;
8d201968e   Joerg Roedel   AMD IOMMU: refact...
798
  	}
ac0ea6e92   Joerg Roedel   x86/amd-iommu: Im...
799
800
801
  	copy_cmd_to_buffer(iommu, cmd, tail);
  
  	/* We need to sync now to make sure all commands are processed */
f1ca1512e   Joerg Roedel   iommu/amd: Make s...
802
  	iommu->need_sync = sync;
ac0ea6e92   Joerg Roedel   x86/amd-iommu: Im...
803

a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
804
  	spin_unlock_irqrestore(&iommu->lock, flags);
8d201968e   Joerg Roedel   AMD IOMMU: refact...
805

815b33fdc   Joerg Roedel   x86/amd-iommu: Cl...
806
  	return 0;
8d201968e   Joerg Roedel   AMD IOMMU: refact...
807
  }
f1ca1512e   Joerg Roedel   iommu/amd: Make s...
808
809
810
811
  static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
  {
  	return iommu_queue_command_sync(iommu, cmd, true);
  }
8d201968e   Joerg Roedel   AMD IOMMU: refact...
812
813
814
815
  /*
   * This function queues a completion wait command into the command
   * buffer of an IOMMU
   */
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
816
  static int iommu_completion_wait(struct amd_iommu *iommu)
8d201968e   Joerg Roedel   AMD IOMMU: refact...
817
818
  {
  	struct iommu_cmd cmd;
815b33fdc   Joerg Roedel   x86/amd-iommu: Cl...
819
  	volatile u64 sem = 0;
ac0ea6e92   Joerg Roedel   x86/amd-iommu: Im...
820
  	int ret;
8d201968e   Joerg Roedel   AMD IOMMU: refact...
821

09ee17eb8   Joerg Roedel   AMD IOMMU: fix po...
822
  	if (!iommu->need_sync)
815b33fdc   Joerg Roedel   x86/amd-iommu: Cl...
823
  		return 0;
09ee17eb8   Joerg Roedel   AMD IOMMU: fix po...
824

815b33fdc   Joerg Roedel   x86/amd-iommu: Cl...
825
  	build_completion_wait(&cmd, (u64)&sem);
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
826

f1ca1512e   Joerg Roedel   iommu/amd: Make s...
827
  	ret = iommu_queue_command_sync(iommu, &cmd, false);
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
828
  	if (ret)
815b33fdc   Joerg Roedel   x86/amd-iommu: Cl...
829
  		return ret;
8d201968e   Joerg Roedel   AMD IOMMU: refact...
830

ac0ea6e92   Joerg Roedel   x86/amd-iommu: Im...
831
  	return wait_on_sem(&sem);
8d201968e   Joerg Roedel   AMD IOMMU: refact...
832
  }
d8c130857   Joerg Roedel   x86/amd-iommu: Re...
833
  static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
834
  {
d8c130857   Joerg Roedel   x86/amd-iommu: Re...
835
  	struct iommu_cmd cmd;
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
836

d8c130857   Joerg Roedel   x86/amd-iommu: Re...
837
  	build_inv_dte(&cmd, devid);
7e4f88da7   Joerg Roedel   AMD IOMMU: protec...
838

d8c130857   Joerg Roedel   x86/amd-iommu: Re...
839
840
  	return iommu_queue_command(iommu, &cmd);
  }
09ee17eb8   Joerg Roedel   AMD IOMMU: fix po...
841

7d0c5cc5b   Joerg Roedel   x86/amd-iommu: Fl...
842
843
844
  static void iommu_flush_dte_all(struct amd_iommu *iommu)
  {
  	u32 devid;
09ee17eb8   Joerg Roedel   AMD IOMMU: fix po...
845

7d0c5cc5b   Joerg Roedel   x86/amd-iommu: Fl...
846
847
  	for (devid = 0; devid <= 0xffff; ++devid)
  		iommu_flush_dte(iommu, devid);
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
848

7d0c5cc5b   Joerg Roedel   x86/amd-iommu: Fl...
849
850
  	iommu_completion_wait(iommu);
  }
84df81759   Joerg Roedel   AMD IOMMU: panic ...
851

7d0c5cc5b   Joerg Roedel   x86/amd-iommu: Fl...
852
853
854
855
856
857
858
  /*
   * This function uses heavy locking and may disable irqs for some time. But
   * this is no issue because it is only called during resume.
   */
  static void iommu_flush_tlb_all(struct amd_iommu *iommu)
  {
  	u32 dom_id;
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
859

7d0c5cc5b   Joerg Roedel   x86/amd-iommu: Fl...
860
861
862
863
864
865
  	for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
  		struct iommu_cmd cmd;
  		build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
  				      dom_id, 1);
  		iommu_queue_command(iommu, &cmd);
  	}
8eed98333   Joerg Roedel   x86/amd-iommu: Mo...
866

7d0c5cc5b   Joerg Roedel   x86/amd-iommu: Fl...
867
  	iommu_completion_wait(iommu);
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
868
  }
58fc7f141   Joerg Roedel   x86/amd-iommu: Ad...
869
  static void iommu_flush_all(struct amd_iommu *iommu)
0518a3a45   Joerg Roedel   x86/amd-iommu: Ad...
870
  {
58fc7f141   Joerg Roedel   x86/amd-iommu: Ad...
871
  	struct iommu_cmd cmd;
0518a3a45   Joerg Roedel   x86/amd-iommu: Ad...
872

58fc7f141   Joerg Roedel   x86/amd-iommu: Ad...
873
  	build_inv_all(&cmd);
0518a3a45   Joerg Roedel   x86/amd-iommu: Ad...
874

58fc7f141   Joerg Roedel   x86/amd-iommu: Ad...
875
876
877
  	iommu_queue_command(iommu, &cmd);
  	iommu_completion_wait(iommu);
  }
7d0c5cc5b   Joerg Roedel   x86/amd-iommu: Fl...
878
879
  void iommu_flush_all_caches(struct amd_iommu *iommu)
  {
58fc7f141   Joerg Roedel   x86/amd-iommu: Ad...
880
881
882
883
884
  	if (iommu_feature(iommu, FEATURE_IA)) {
  		iommu_flush_all(iommu);
  	} else {
  		iommu_flush_dte_all(iommu);
  		iommu_flush_tlb_all(iommu);
0518a3a45   Joerg Roedel   x86/amd-iommu: Ad...
885
886
  	}
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
887
  /*
cb41ed85e   Joerg Roedel   x86/amd-iommu: Fl...
888
   * Command send function for flushing on-device TLB
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
889
   */
6c5420479   Joerg Roedel   x86/amd-iommu: Us...
890
891
  static int device_flush_iotlb(struct iommu_dev_data *dev_data,
  			      u64 address, size_t size)
3fa43655d   Joerg Roedel   x86/amd-iommu: In...
892
893
  {
  	struct amd_iommu *iommu;
b00d3bcff   Joerg Roedel   x86/amd-iommu: Cl...
894
  	struct iommu_cmd cmd;
cb41ed85e   Joerg Roedel   x86/amd-iommu: Fl...
895
  	int qdep;
3fa43655d   Joerg Roedel   x86/amd-iommu: In...
896

ea61cddb9   Joerg Roedel   x86/amd-iommu: St...
897
898
  	qdep     = dev_data->ats.qdep;
  	iommu    = amd_iommu_rlookup_table[dev_data->devid];
3fa43655d   Joerg Roedel   x86/amd-iommu: In...
899

ea61cddb9   Joerg Roedel   x86/amd-iommu: St...
900
  	build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
b00d3bcff   Joerg Roedel   x86/amd-iommu: Cl...
901
902
  
  	return iommu_queue_command(iommu, &cmd);
3fa43655d   Joerg Roedel   x86/amd-iommu: In...
903
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
904
  /*
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
905
   * Command send function for invalidating a device table entry
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
906
   */
6c5420479   Joerg Roedel   x86/amd-iommu: Us...
907
  static int device_flush_dte(struct iommu_dev_data *dev_data)
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
908
  {
3fa43655d   Joerg Roedel   x86/amd-iommu: In...
909
  	struct amd_iommu *iommu;
ee2fa7435   Joerg Roedel   AMD IOMMU: set io...
910
  	int ret;
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
911

6c5420479   Joerg Roedel   x86/amd-iommu: Us...
912
  	iommu = amd_iommu_rlookup_table[dev_data->devid];
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
913

f62dda66b   Joerg Roedel   x86/amd-iommu: St...
914
  	ret = iommu_flush_dte(iommu, dev_data->devid);
cb41ed85e   Joerg Roedel   x86/amd-iommu: Fl...
915
916
  	if (ret)
  		return ret;
ea61cddb9   Joerg Roedel   x86/amd-iommu: St...
917
  	if (dev_data->ats.enabled)
6c5420479   Joerg Roedel   x86/amd-iommu: Us...
918
  		ret = device_flush_iotlb(dev_data, 0, ~0UL);
ee2fa7435   Joerg Roedel   AMD IOMMU: set io...
919

ee2fa7435   Joerg Roedel   AMD IOMMU: set io...
920
  	return ret;
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
921
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
922
923
924
925
926
  /*
   * TLB invalidation function which is called from the mapping functions.
   * It invalidates a single PTE if the range to flush is within a single
   * page. Otherwise it flushes the whole TLB of the IOMMU.
   */
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
927
928
  static void __domain_flush_pages(struct protection_domain *domain,
  				 u64 address, size_t size, int pde)
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
929
  {
cb41ed85e   Joerg Roedel   x86/amd-iommu: Fl...
930
  	struct iommu_dev_data *dev_data;
11b6402c6   Joerg Roedel   x86/amd-iommu: Cl...
931
932
  	struct iommu_cmd cmd;
  	int ret = 0, i;
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
933

11b6402c6   Joerg Roedel   x86/amd-iommu: Cl...
934
  	build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
999ba417c   Joerg Roedel   x86, AMD IOMMU: f...
935

6de8ad9b9   Joerg Roedel   x86/amd-iommu: Ma...
936
937
938
939
940
941
942
943
  	for (i = 0; i < amd_iommus_present; ++i) {
  		if (!domain->dev_iommu[i])
  			continue;
  
  		/*
  		 * Devices of this domain are behind this IOMMU
  		 * We need a TLB flush
  		 */
11b6402c6   Joerg Roedel   x86/amd-iommu: Cl...
944
  		ret |= iommu_queue_command(amd_iommus[i], &cmd);
6de8ad9b9   Joerg Roedel   x86/amd-iommu: Ma...
945
  	}
cb41ed85e   Joerg Roedel   x86/amd-iommu: Fl...
946
  	list_for_each_entry(dev_data, &domain->dev_list, list) {
cb41ed85e   Joerg Roedel   x86/amd-iommu: Fl...
947

ea61cddb9   Joerg Roedel   x86/amd-iommu: St...
948
  		if (!dev_data->ats.enabled)
cb41ed85e   Joerg Roedel   x86/amd-iommu: Fl...
949
  			continue;
6c5420479   Joerg Roedel   x86/amd-iommu: Us...
950
  		ret |= device_flush_iotlb(dev_data, address, size);
cb41ed85e   Joerg Roedel   x86/amd-iommu: Fl...
951
  	}
11b6402c6   Joerg Roedel   x86/amd-iommu: Cl...
952
  	WARN_ON(ret);
6de8ad9b9   Joerg Roedel   x86/amd-iommu: Ma...
953
  }
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
954
955
  static void domain_flush_pages(struct protection_domain *domain,
  			       u64 address, size_t size)
6de8ad9b9   Joerg Roedel   x86/amd-iommu: Ma...
956
  {
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
957
  	__domain_flush_pages(domain, address, size, 0);
a19ae1ecc   Joerg Roedel   x86, AMD IOMMU: a...
958
  }
b6c02715c   Joerg Roedel   x86, AMD IOMMU: a...
959

1c6557739   Joerg Roedel   AMD IOMMU: implem...
960
  /* Flush the whole IO/TLB for a given protection domain */
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
961
  static void domain_flush_tlb(struct protection_domain *domain)
1c6557739   Joerg Roedel   AMD IOMMU: implem...
962
  {
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
963
  	__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
1c6557739   Joerg Roedel   AMD IOMMU: implem...
964
  }
42a49f965   Chris Wright   amd-iommu: flush ...
965
  /* Flush the whole IO/TLB for a given protection domain - including PDE */
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
966
  static void domain_flush_tlb_pde(struct protection_domain *domain)
42a49f965   Chris Wright   amd-iommu: flush ...
967
  {
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
968
  	__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
42a49f965   Chris Wright   amd-iommu: flush ...
969
  }
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
970
  static void domain_flush_complete(struct protection_domain *domain)
b00d3bcff   Joerg Roedel   x86/amd-iommu: Cl...
971
  {
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
972
  	int i;
18811f55d   Joerg Roedel   AMD IOMMU: add st...
973

17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
974
975
976
  	for (i = 0; i < amd_iommus_present; ++i) {
  		if (!domain->dev_iommu[i])
  			continue;
bfd1be185   Joerg Roedel   amd-iommu: add fu...
977

17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
978
979
980
981
982
  		/*
  		 * Devices of this domain are behind this IOMMU
  		 * We need to wait for completion of all commands.
  		 */
  		iommu_completion_wait(amd_iommus[i]);
bfd1be185   Joerg Roedel   amd-iommu: add fu...
983
  	}
e394d72aa   Joerg Roedel   x86/amd-iommu: In...
984
  }
b00d3bcff   Joerg Roedel   x86/amd-iommu: Cl...
985

09b428043   Joerg Roedel   x86/amd-iommu: Re...
986
  /*
b00d3bcff   Joerg Roedel   x86/amd-iommu: Cl...
987
   * This function flushes the DTEs for all devices in domain
09b428043   Joerg Roedel   x86/amd-iommu: Re...
988
   */
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
989
  static void domain_flush_devices(struct protection_domain *domain)
bfd1be185   Joerg Roedel   amd-iommu: add fu...
990
  {
b00d3bcff   Joerg Roedel   x86/amd-iommu: Cl...
991
  	struct iommu_dev_data *dev_data;
b26e81b87   Joerg Roedel   x86/amd-iommu: Pa...
992

b00d3bcff   Joerg Roedel   x86/amd-iommu: Cl...
993
  	list_for_each_entry(dev_data, &domain->dev_list, list)
6c5420479   Joerg Roedel   x86/amd-iommu: Us...
994
  		device_flush_dte(dev_data);
a345b23b7   Joerg Roedel   x86/amd-iommu: Re...
995
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
996
997
998
999
1000
1001
1002
1003
  /****************************************************************************
   *
   * The functions below are used the create the page table mappings for
   * unity mapped regions.
   *
   ****************************************************************************/
  
  /*
308973d3b   Joerg Roedel   x86/amd-iommu: Mo...
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
   * This function is used to add another level to an IO page table. Adding
   * another level increases the size of the address space by 9 bits to a size up
   * to 64 bits.
   */
  static bool increase_address_space(struct protection_domain *domain,
  				   gfp_t gfp)
  {
  	u64 *pte;
  
  	if (domain->mode == PAGE_MODE_6_LEVEL)
  		/* address space already 64 bit large */
  		return false;
  
  	pte = (void *)get_zeroed_page(gfp);
  	if (!pte)
  		return false;
  
  	*pte             = PM_LEVEL_PDE(domain->mode,
  					virt_to_phys(domain->pt_root));
  	domain->pt_root  = pte;
  	domain->mode    += 1;
  	domain->updated  = true;
  
  	return true;
  }
  
  static u64 *alloc_pte(struct protection_domain *domain,
  		      unsigned long address,
cbb9d729f   Joerg Roedel   x86/amd-iommu: Ma...
1032
  		      unsigned long page_size,
308973d3b   Joerg Roedel   x86/amd-iommu: Mo...
1033
1034
1035
  		      u64 **pte_page,
  		      gfp_t gfp)
  {
cbb9d729f   Joerg Roedel   x86/amd-iommu: Ma...
1036
  	int level, end_lvl;
308973d3b   Joerg Roedel   x86/amd-iommu: Mo...
1037
  	u64 *pte, *page;
cbb9d729f   Joerg Roedel   x86/amd-iommu: Ma...
1038
1039
  
  	BUG_ON(!is_power_of_2(page_size));
308973d3b   Joerg Roedel   x86/amd-iommu: Mo...
1040
1041
1042
  
  	while (address > PM_LEVEL_SIZE(domain->mode))
  		increase_address_space(domain, gfp);
cbb9d729f   Joerg Roedel   x86/amd-iommu: Ma...
1043
1044
1045
1046
  	level   = domain->mode - 1;
  	pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
  	address = PAGE_SIZE_ALIGN(address, page_size);
  	end_lvl = PAGE_SIZE_LEVEL(page_size);
308973d3b   Joerg Roedel   x86/amd-iommu: Mo...
1047
1048
1049
1050
1051
1052
1053
1054
  
  	while (level > end_lvl) {
  		if (!IOMMU_PTE_PRESENT(*pte)) {
  			page = (u64 *)get_zeroed_page(gfp);
  			if (!page)
  				return NULL;
  			*pte = PM_LEVEL_PDE(level, virt_to_phys(page));
  		}
cbb9d729f   Joerg Roedel   x86/amd-iommu: Ma...
1055
1056
1057
  		/* No level skipping support yet */
  		if (PM_PTE_LEVEL(*pte) != level)
  			return NULL;
308973d3b   Joerg Roedel   x86/amd-iommu: Mo...
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
  		level -= 1;
  
  		pte = IOMMU_PTE_PAGE(*pte);
  
  		if (pte_page && level == end_lvl)
  			*pte_page = pte;
  
  		pte = &pte[PM_LEVEL_INDEX(level, address)];
  	}
  
  	return pte;
  }
  
  /*
   * This function checks if there is a PTE for a given dma address. If
   * there is one, it returns the pointer to it.
   */
24cd77231   Joerg Roedel   x86/amd-iommu: Ma...
1075
  static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
308973d3b   Joerg Roedel   x86/amd-iommu: Mo...
1076
1077
1078
  {
  	int level;
  	u64 *pte;
24cd77231   Joerg Roedel   x86/amd-iommu: Ma...
1079
1080
1081
1082
1083
  	if (address > PM_LEVEL_SIZE(domain->mode))
  		return NULL;
  
  	level   =  domain->mode - 1;
  	pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
308973d3b   Joerg Roedel   x86/amd-iommu: Mo...
1084

24cd77231   Joerg Roedel   x86/amd-iommu: Ma...
1085
1086
1087
  	while (level > 0) {
  
  		/* Not Present */
308973d3b   Joerg Roedel   x86/amd-iommu: Mo...
1088
1089
  		if (!IOMMU_PTE_PRESENT(*pte))
  			return NULL;
24cd77231   Joerg Roedel   x86/amd-iommu: Ma...
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
  		/* Large PTE */
  		if (PM_PTE_LEVEL(*pte) == 0x07) {
  			unsigned long pte_mask, __pte;
  
  			/*
  			 * If we have a series of large PTEs, make
  			 * sure to return a pointer to the first one.
  			 */
  			pte_mask = PTE_PAGE_SIZE(*pte);
  			pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
  			__pte    = ((unsigned long)pte) & pte_mask;
  
  			return (u64 *)__pte;
  		}
  
  		/* No level skipping support yet */
  		if (PM_PTE_LEVEL(*pte) != level)
  			return NULL;
308973d3b   Joerg Roedel   x86/amd-iommu: Mo...
1108
  		level -= 1;
24cd77231   Joerg Roedel   x86/amd-iommu: Ma...
1109
  		/* Walk to the next level */
308973d3b   Joerg Roedel   x86/amd-iommu: Mo...
1110
1111
  		pte = IOMMU_PTE_PAGE(*pte);
  		pte = &pte[PM_LEVEL_INDEX(level, address)];
308973d3b   Joerg Roedel   x86/amd-iommu: Mo...
1112
1113
1114
1115
1116
1117
  	}
  
  	return pte;
  }
  
  /*
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1118
1119
1120
1121
1122
1123
   * Generic mapping functions. It maps a physical address into a DMA
   * address space. It allocates the page table pages if necessary.
   * In the future it can be extended to a generic mapping function
   * supporting all features of AMD IOMMU page tables like level skipping
   * and full 64 bit address spaces.
   */
38e817feb   Joerg Roedel   AMD IOMMU: rename...
1124
1125
1126
  static int iommu_map_page(struct protection_domain *dom,
  			  unsigned long bus_addr,
  			  unsigned long phys_addr,
abdc5eb3d   Joerg Roedel   x86/amd-iommu: Ch...
1127
  			  int prot,
cbb9d729f   Joerg Roedel   x86/amd-iommu: Ma...
1128
  			  unsigned long page_size)
bd0e52115   Joerg Roedel   x86, AMD IOMMU: a...
1129
  {
8bda3092b   Joerg Roedel   amd-iommu: move p...
1130
  	u64 __pte, *pte;
cbb9d729f   Joerg Roedel   x86/amd-iommu: Ma...
1131
  	int i, count;
abdc5eb3d   Joerg Roedel   x86/amd-iommu: Ch...
1132

bad1cac28   Joerg Roedel   x86/amd-iommu: Re...
1133
  	if (!(prot & IOMMU_PROT_MASK))
bd0e52115   Joerg Roedel   x86, AMD IOMMU: a...
1134
  		return -EINVAL;
cbb9d729f   Joerg Roedel   x86/amd-iommu: Ma...
1135
1136
1137
1138
1139
1140
1141
1142
  	bus_addr  = PAGE_ALIGN(bus_addr);
  	phys_addr = PAGE_ALIGN(phys_addr);
  	count     = PAGE_SIZE_PTE_COUNT(page_size);
  	pte       = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
  
  	for (i = 0; i < count; ++i)
  		if (IOMMU_PTE_PRESENT(pte[i]))
  			return -EBUSY;
bd0e52115   Joerg Roedel   x86, AMD IOMMU: a...
1143

cbb9d729f   Joerg Roedel   x86/amd-iommu: Ma...
1144
1145
1146
1147
1148
  	if (page_size > PAGE_SIZE) {
  		__pte = PAGE_SIZE_PTE(phys_addr, page_size);
  		__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
  	} else
  		__pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
bd0e52115   Joerg Roedel   x86, AMD IOMMU: a...
1149

bd0e52115   Joerg Roedel   x86, AMD IOMMU: a...
1150
1151
1152
1153
  	if (prot & IOMMU_PROT_IR)
  		__pte |= IOMMU_PTE_IR;
  	if (prot & IOMMU_PROT_IW)
  		__pte |= IOMMU_PTE_IW;
cbb9d729f   Joerg Roedel   x86/amd-iommu: Ma...
1154
1155
  	for (i = 0; i < count; ++i)
  		pte[i] = __pte;
bd0e52115   Joerg Roedel   x86, AMD IOMMU: a...
1156

04bfdd840   Joerg Roedel   x86/amd-iommu: Fl...
1157
  	update_domain(dom);
bd0e52115   Joerg Roedel   x86, AMD IOMMU: a...
1158
1159
  	return 0;
  }
24cd77231   Joerg Roedel   x86/amd-iommu: Ma...
1160
1161
1162
  static unsigned long iommu_unmap_page(struct protection_domain *dom,
  				      unsigned long bus_addr,
  				      unsigned long page_size)
eb74ff6cc   Joerg Roedel   AMD IOMMU: add do...
1163
  {
24cd77231   Joerg Roedel   x86/amd-iommu: Ma...
1164
1165
1166
1167
1168
1169
  	unsigned long long unmap_size, unmapped;
  	u64 *pte;
  
  	BUG_ON(!is_power_of_2(page_size));
  
  	unmapped = 0;
eb74ff6cc   Joerg Roedel   AMD IOMMU: add do...
1170

24cd77231   Joerg Roedel   x86/amd-iommu: Ma...
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
  	while (unmapped < page_size) {
  
  		pte = fetch_pte(dom, bus_addr);
  
  		if (!pte) {
  			/*
  			 * No PTE for this address
  			 * move forward in 4kb steps
  			 */
  			unmap_size = PAGE_SIZE;
  		} else if (PM_PTE_LEVEL(*pte) == 0) {
  			/* 4kb PTE found for this address */
  			unmap_size = PAGE_SIZE;
  			*pte       = 0ULL;
  		} else {
  			int count, i;
  
  			/* Large PTE found which maps this address */
  			unmap_size = PTE_PAGE_SIZE(*pte);
  			count      = PAGE_SIZE_PTE_COUNT(unmap_size);
  			for (i = 0; i < count; i++)
  				pte[i] = 0ULL;
  		}
  
  		bus_addr  = (bus_addr & ~(unmap_size - 1)) + unmap_size;
  		unmapped += unmap_size;
  	}
  
  	BUG_ON(!is_power_of_2(unmapped));
eb74ff6cc   Joerg Roedel   AMD IOMMU: add do...
1200

24cd77231   Joerg Roedel   x86/amd-iommu: Ma...
1201
  	return unmapped;
eb74ff6cc   Joerg Roedel   AMD IOMMU: add do...
1202
  }
eb74ff6cc   Joerg Roedel   AMD IOMMU: add do...
1203

431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1204
1205
1206
1207
  /*
   * This function checks if a specific unity mapping entry is needed for
   * this specific IOMMU.
   */
bd0e52115   Joerg Roedel   x86, AMD IOMMU: a...
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
  static int iommu_for_unity_map(struct amd_iommu *iommu,
  			       struct unity_map_entry *entry)
  {
  	u16 bdf, i;
  
  	for (i = entry->devid_start; i <= entry->devid_end; ++i) {
  		bdf = amd_iommu_alias_table[i];
  		if (amd_iommu_rlookup_table[bdf] == iommu)
  			return 1;
  	}
  
  	return 0;
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1221
  /*
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1222
1223
1224
   * This function actually applies the mapping to the page table of the
   * dma_ops domain.
   */
bd0e52115   Joerg Roedel   x86, AMD IOMMU: a...
1225
1226
1227
1228
1229
1230
1231
1232
  static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
  			     struct unity_map_entry *e)
  {
  	u64 addr;
  	int ret;
  
  	for (addr = e->address_start; addr < e->address_end;
  	     addr += PAGE_SIZE) {
abdc5eb3d   Joerg Roedel   x86/amd-iommu: Ch...
1233
  		ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
cbb9d729f   Joerg Roedel   x86/amd-iommu: Ma...
1234
  				     PAGE_SIZE);
bd0e52115   Joerg Roedel   x86, AMD IOMMU: a...
1235
1236
1237
1238
1239
1240
1241
  		if (ret)
  			return ret;
  		/*
  		 * if unity mapping is in aperture range mark the page
  		 * as allocated in the aperture
  		 */
  		if (addr < dma_dom->aperture_size)
c3239567a   Joerg Roedel   amd-iommu: introd...
1242
  			__set_bit(addr >> PAGE_SHIFT,
384de7291   Joerg Roedel   amd-iommu: make a...
1243
  				  dma_dom->aperture[0]->bitmap);
bd0e52115   Joerg Roedel   x86, AMD IOMMU: a...
1244
1245
1246
1247
  	}
  
  	return 0;
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1248
  /*
171e7b373   Joerg Roedel   x86/amd-iommu: Re...
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
   * Init the unity mappings for a specific IOMMU in the system
   *
   * Basically iterates over all unity mapping entries and applies them to
   * the default domain DMA of that IOMMU if necessary.
   */
  static int iommu_init_unity_mappings(struct amd_iommu *iommu)
  {
  	struct unity_map_entry *entry;
  	int ret;
  
  	list_for_each_entry(entry, &amd_iommu_unity_map, list) {
  		if (!iommu_for_unity_map(iommu, entry))
  			continue;
  		ret = dma_ops_unity_map(iommu->default_dom, entry);
  		if (ret)
  			return ret;
  	}
  
  	return 0;
  }
  
  /*
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1271
1272
   * Inits the unity mappings required for a specific device
   */
bd0e52115   Joerg Roedel   x86, AMD IOMMU: a...
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
  static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
  					  u16 devid)
  {
  	struct unity_map_entry *e;
  	int ret;
  
  	list_for_each_entry(e, &amd_iommu_unity_map, list) {
  		if (!(devid >= e->devid_start && devid <= e->devid_end))
  			continue;
  		ret = dma_ops_unity_map(dma_dom, e);
  		if (ret)
  			return ret;
  	}
  
  	return 0;
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1289
1290
1291
1292
1293
1294
1295
1296
1297
  /****************************************************************************
   *
   * The next functions belong to the address allocator for the dma_ops
   * interface functions. They work like the allocators in the other IOMMU
   * drivers. Its basically a bitmap which marks the allocated pages in
   * the aperture. Maybe it could be enhanced in the future to a more
   * efficient allocator.
   *
   ****************************************************************************/
d3086444b   Joerg Roedel   x86, AMD IOMMU: a...
1298

431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1299
  /*
384de7291   Joerg Roedel   amd-iommu: make a...
1300
   * The address allocator core functions.
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1301
1302
1303
   *
   * called with domain->lock held
   */
384de7291   Joerg Roedel   amd-iommu: make a...
1304

9cabe89b9   Joerg Roedel   amd-iommu: move a...
1305
  /*
171e7b373   Joerg Roedel   x86/amd-iommu: Re...
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
   * Used to reserve address ranges in the aperture (e.g. for exclusion
   * ranges.
   */
  static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
  				      unsigned long start_page,
  				      unsigned int pages)
  {
  	unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
  
  	if (start_page + pages > last_page)
  		pages = last_page - start_page;
  
  	for (i = start_page; i < start_page + pages; ++i) {
  		int index = i / APERTURE_RANGE_PAGES;
  		int page  = i % APERTURE_RANGE_PAGES;
  		__set_bit(page, dom->aperture[index]->bitmap);
  	}
  }
  
  /*
9cabe89b9   Joerg Roedel   amd-iommu: move a...
1326
1327
1328
1329
   * This function is used to add a new aperture range to an existing
   * aperture in case of dma_ops domain allocation or address allocation
   * failure.
   */
576175c25   Joerg Roedel   x86/amd-iommu: Ma...
1330
  static int alloc_new_range(struct dma_ops_domain *dma_dom,
9cabe89b9   Joerg Roedel   amd-iommu: move a...
1331
1332
1333
  			   bool populate, gfp_t gfp)
  {
  	int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
576175c25   Joerg Roedel   x86/amd-iommu: Ma...
1334
  	struct amd_iommu *iommu;
17f5b569e   Joerg Roedel   iommu/amd: Don't ...
1335
  	unsigned long i, old_size;
9cabe89b9   Joerg Roedel   amd-iommu: move a...
1336

f5e9705c6   Joerg Roedel   amd-iommu: don't ...
1337
1338
1339
  #ifdef CONFIG_IOMMU_STRESS
  	populate = false;
  #endif
9cabe89b9   Joerg Roedel   amd-iommu: move a...
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
  	if (index >= APERTURE_MAX_RANGES)
  		return -ENOMEM;
  
  	dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
  	if (!dma_dom->aperture[index])
  		return -ENOMEM;
  
  	dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
  	if (!dma_dom->aperture[index]->bitmap)
  		goto out_free;
  
  	dma_dom->aperture[index]->offset = dma_dom->aperture_size;
  
  	if (populate) {
  		unsigned long address = dma_dom->aperture_size;
  		int i, num_ptes = APERTURE_RANGE_PAGES / 512;
  		u64 *pte, *pte_page;
  
  		for (i = 0; i < num_ptes; ++i) {
cbb9d729f   Joerg Roedel   x86/amd-iommu: Ma...
1359
  			pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
9cabe89b9   Joerg Roedel   amd-iommu: move a...
1360
1361
1362
1363
1364
1365
1366
1367
1368
  					&pte_page, gfp);
  			if (!pte)
  				goto out_free;
  
  			dma_dom->aperture[index]->pte_pages[i] = pte_page;
  
  			address += APERTURE_RANGE_SIZE / 64;
  		}
  	}
17f5b569e   Joerg Roedel   iommu/amd: Don't ...
1369
  	old_size                = dma_dom->aperture_size;
9cabe89b9   Joerg Roedel   amd-iommu: move a...
1370
  	dma_dom->aperture_size += APERTURE_RANGE_SIZE;
17f5b569e   Joerg Roedel   iommu/amd: Don't ...
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
  	/* Reserve address range used for MSI messages */
  	if (old_size < MSI_ADDR_BASE_LO &&
  	    dma_dom->aperture_size > MSI_ADDR_BASE_LO) {
  		unsigned long spage;
  		int pages;
  
  		pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE);
  		spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT;
  
  		dma_ops_reserve_addresses(dma_dom, spage, pages);
  	}
b595076a1   Uwe Kleine-König   tree-wide: fix co...
1382
  	/* Initialize the exclusion range if necessary */
576175c25   Joerg Roedel   x86/amd-iommu: Ma...
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
  	for_each_iommu(iommu) {
  		if (iommu->exclusion_start &&
  		    iommu->exclusion_start >= dma_dom->aperture[index]->offset
  		    && iommu->exclusion_start < dma_dom->aperture_size) {
  			unsigned long startpage;
  			int pages = iommu_num_pages(iommu->exclusion_start,
  						    iommu->exclusion_length,
  						    PAGE_SIZE);
  			startpage = iommu->exclusion_start >> PAGE_SHIFT;
  			dma_ops_reserve_addresses(dma_dom, startpage, pages);
  		}
00cd122ae   Joerg Roedel   amd-iommu: handle...
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
  	}
  
  	/*
  	 * Check for areas already mapped as present in the new aperture
  	 * range and mark those pages as reserved in the allocator. Such
  	 * mappings may already exist as a result of requested unity
  	 * mappings for devices.
  	 */
  	for (i = dma_dom->aperture[index]->offset;
  	     i < dma_dom->aperture_size;
  	     i += PAGE_SIZE) {
24cd77231   Joerg Roedel   x86/amd-iommu: Ma...
1405
  		u64 *pte = fetch_pte(&dma_dom->domain, i);
00cd122ae   Joerg Roedel   amd-iommu: handle...
1406
1407
  		if (!pte || !IOMMU_PTE_PRESENT(*pte))
  			continue;
fcd0861db   Joerg Roedel   iommu/amd: Fix wr...
1408
  		dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
00cd122ae   Joerg Roedel   amd-iommu: handle...
1409
  	}
04bfdd840   Joerg Roedel   x86/amd-iommu: Fl...
1410
  	update_domain(&dma_dom->domain);
9cabe89b9   Joerg Roedel   amd-iommu: move a...
1411
1412
1413
  	return 0;
  
  out_free:
04bfdd840   Joerg Roedel   x86/amd-iommu: Fl...
1414
  	update_domain(&dma_dom->domain);
9cabe89b9   Joerg Roedel   amd-iommu: move a...
1415
1416
1417
1418
1419
1420
1421
  	free_page((unsigned long)dma_dom->aperture[index]->bitmap);
  
  	kfree(dma_dom->aperture[index]);
  	dma_dom->aperture[index] = NULL;
  
  	return -ENOMEM;
  }
384de7291   Joerg Roedel   amd-iommu: make a...
1422
1423
1424
1425
1426
1427
1428
  static unsigned long dma_ops_area_alloc(struct device *dev,
  					struct dma_ops_domain *dom,
  					unsigned int pages,
  					unsigned long align_mask,
  					u64 dma_mask,
  					unsigned long start)
  {
803b8cb4d   Joerg Roedel   amd-iommu: change...
1429
  	unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
384de7291   Joerg Roedel   amd-iommu: make a...
1430
1431
1432
1433
1434
  	int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
  	int i = start >> APERTURE_RANGE_SHIFT;
  	unsigned long boundary_size;
  	unsigned long address = -1;
  	unsigned long limit;
803b8cb4d   Joerg Roedel   amd-iommu: change...
1435
  	next_bit >>= PAGE_SHIFT;
384de7291   Joerg Roedel   amd-iommu: make a...
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
  	boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
  			PAGE_SIZE) >> PAGE_SHIFT;
  
  	for (;i < max_index; ++i) {
  		unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
  
  		if (dom->aperture[i]->offset >= dma_mask)
  			break;
  
  		limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
  					       dma_mask >> PAGE_SHIFT);
  
  		address = iommu_area_alloc(dom->aperture[i]->bitmap,
  					   limit, next_bit, pages, 0,
  					    boundary_size, align_mask);
  		if (address != -1) {
  			address = dom->aperture[i]->offset +
  				  (address << PAGE_SHIFT);
803b8cb4d   Joerg Roedel   amd-iommu: change...
1454
  			dom->next_address = address + (pages << PAGE_SHIFT);
384de7291   Joerg Roedel   amd-iommu: make a...
1455
1456
1457
1458
1459
1460
1461
1462
  			break;
  		}
  
  		next_bit = 0;
  	}
  
  	return address;
  }
d3086444b   Joerg Roedel   x86, AMD IOMMU: a...
1463
1464
  static unsigned long dma_ops_alloc_addresses(struct device *dev,
  					     struct dma_ops_domain *dom,
6d4f343f8   Joerg Roedel   AMD IOMMU: align ...
1465
  					     unsigned int pages,
832a90c30   Joerg Roedel   AMD IOMMU: use co...
1466
1467
  					     unsigned long align_mask,
  					     u64 dma_mask)
d3086444b   Joerg Roedel   x86, AMD IOMMU: a...
1468
  {
d3086444b   Joerg Roedel   x86, AMD IOMMU: a...
1469
  	unsigned long address;
d3086444b   Joerg Roedel   x86, AMD IOMMU: a...
1470

fe16f088a   Joerg Roedel   amd-iommu: disabl...
1471
1472
1473
1474
  #ifdef CONFIG_IOMMU_STRESS
  	dom->next_address = 0;
  	dom->need_flush = true;
  #endif
d3086444b   Joerg Roedel   x86, AMD IOMMU: a...
1475

384de7291   Joerg Roedel   amd-iommu: make a...
1476
  	address = dma_ops_area_alloc(dev, dom, pages, align_mask,
803b8cb4d   Joerg Roedel   amd-iommu: change...
1477
  				     dma_mask, dom->next_address);
d3086444b   Joerg Roedel   x86, AMD IOMMU: a...
1478

1c6557739   Joerg Roedel   AMD IOMMU: implem...
1479
  	if (address == -1) {
803b8cb4d   Joerg Roedel   amd-iommu: change...
1480
  		dom->next_address = 0;
384de7291   Joerg Roedel   amd-iommu: make a...
1481
1482
  		address = dma_ops_area_alloc(dev, dom, pages, align_mask,
  					     dma_mask, 0);
1c6557739   Joerg Roedel   AMD IOMMU: implem...
1483
1484
  		dom->need_flush = true;
  	}
d3086444b   Joerg Roedel   x86, AMD IOMMU: a...
1485

384de7291   Joerg Roedel   amd-iommu: make a...
1486
  	if (unlikely(address == -1))
8fd524b35   FUJITA Tomonori   x86: Kill bad_dma...
1487
  		address = DMA_ERROR_CODE;
d3086444b   Joerg Roedel   x86, AMD IOMMU: a...
1488
1489
1490
1491
1492
  
  	WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
  
  	return address;
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1493
1494
1495
1496
1497
  /*
   * The address free function.
   *
   * called with domain->lock held
   */
d3086444b   Joerg Roedel   x86, AMD IOMMU: a...
1498
1499
1500
1501
  static void dma_ops_free_addresses(struct dma_ops_domain *dom,
  				   unsigned long address,
  				   unsigned int pages)
  {
384de7291   Joerg Roedel   amd-iommu: make a...
1502
1503
  	unsigned i = address >> APERTURE_RANGE_SHIFT;
  	struct aperture_range *range = dom->aperture[i];
80be308df   Joerg Roedel   AMD IOMMU: fix la...
1504

384de7291   Joerg Roedel   amd-iommu: make a...
1505
  	BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
47bccd6bb   Joerg Roedel   amd-iommu: don't ...
1506
1507
1508
1509
  #ifdef CONFIG_IOMMU_STRESS
  	if (i < 4)
  		return;
  #endif
80be308df   Joerg Roedel   AMD IOMMU: fix la...
1510

803b8cb4d   Joerg Roedel   amd-iommu: change...
1511
  	if (address >= dom->next_address)
80be308df   Joerg Roedel   AMD IOMMU: fix la...
1512
  		dom->need_flush = true;
384de7291   Joerg Roedel   amd-iommu: make a...
1513
1514
  
  	address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
803b8cb4d   Joerg Roedel   amd-iommu: change...
1515

a66022c45   Akinobu Mita   iommu-helper: use...
1516
  	bitmap_clear(range->bitmap, address, pages);
384de7291   Joerg Roedel   amd-iommu: make a...
1517

d3086444b   Joerg Roedel   x86, AMD IOMMU: a...
1518
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1519
1520
1521
1522
1523
1524
1525
1526
1527
  /****************************************************************************
   *
   * The next functions belong to the domain allocation. A domain is
   * allocated for every IOMMU as the default domain. If device isolation
   * is enabled, every device get its own domain. The most important thing
   * about domains is the page table mapping the DMA address space they
   * contain.
   *
   ****************************************************************************/
aeb26f553   Joerg Roedel   x86/amd-iommu: Im...
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
  /*
   * This function adds a protection domain to the global protection domain list
   */
  static void add_domain_to_list(struct protection_domain *domain)
  {
  	unsigned long flags;
  
  	spin_lock_irqsave(&amd_iommu_pd_lock, flags);
  	list_add(&domain->list, &amd_iommu_pd_list);
  	spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
  }
  
  /*
   * This function removes a protection domain to the global
   * protection domain list
   */
  static void del_domain_from_list(struct protection_domain *domain)
  {
  	unsigned long flags;
  
  	spin_lock_irqsave(&amd_iommu_pd_lock, flags);
  	list_del(&domain->list);
  	spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
  }
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
  static u16 domain_id_alloc(void)
  {
  	unsigned long flags;
  	int id;
  
  	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  	id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
  	BUG_ON(id == 0);
  	if (id > 0 && id < MAX_DOMAIN_ID)
  		__set_bit(id, amd_iommu_pd_alloc_bitmap);
  	else
  		id = 0;
  	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  
  	return id;
  }
a2acfb757   Joerg Roedel   AMD IOMMU: add do...
1568
1569
1570
1571
1572
1573
1574
1575
1576
  static void domain_id_free(int id)
  {
  	unsigned long flags;
  
  	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  	if (id > 0 && id < MAX_DOMAIN_ID)
  		__clear_bit(id, amd_iommu_pd_alloc_bitmap);
  	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  }
a2acfb757   Joerg Roedel   AMD IOMMU: add do...
1577

86db2e5d4   Joerg Roedel   AMD IOMMU: make d...
1578
  static void free_pagetable(struct protection_domain *domain)
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1579
1580
1581
  {
  	int i, j;
  	u64 *p1, *p2, *p3;
86db2e5d4   Joerg Roedel   AMD IOMMU: make d...
1582
  	p1 = domain->pt_root;
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1583
1584
1585
1586
1587
1588
1589
1590
1591
  
  	if (!p1)
  		return;
  
  	for (i = 0; i < 512; ++i) {
  		if (!IOMMU_PTE_PRESENT(p1[i]))
  			continue;
  
  		p2 = IOMMU_PTE_PAGE(p1[i]);
3cc3d84bf   Joerg Roedel   AMD IOMMU: fix lo...
1592
  		for (j = 0; j < 512; ++j) {
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
  			if (!IOMMU_PTE_PRESENT(p2[j]))
  				continue;
  			p3 = IOMMU_PTE_PAGE(p2[j]);
  			free_page((unsigned long)p3);
  		}
  
  		free_page((unsigned long)p2);
  	}
  
  	free_page((unsigned long)p1);
86db2e5d4   Joerg Roedel   AMD IOMMU: make d...
1603
1604
  
  	domain->pt_root = NULL;
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1605
  }
b16137b11   Joerg Roedel   iommu/amd: Implem...
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
  static void free_gcr3_tbl_level1(u64 *tbl)
  {
  	u64 *ptr;
  	int i;
  
  	for (i = 0; i < 512; ++i) {
  		if (!(tbl[i] & GCR3_VALID))
  			continue;
  
  		ptr = __va(tbl[i] & PAGE_MASK);
  
  		free_page((unsigned long)ptr);
  	}
  }
  
  static void free_gcr3_tbl_level2(u64 *tbl)
  {
  	u64 *ptr;
  	int i;
  
  	for (i = 0; i < 512; ++i) {
  		if (!(tbl[i] & GCR3_VALID))
  			continue;
  
  		ptr = __va(tbl[i] & PAGE_MASK);
  
  		free_gcr3_tbl_level1(ptr);
  	}
  }
52815b756   Joerg Roedel   iommu/amd: Add su...
1635
1636
  static void free_gcr3_table(struct protection_domain *domain)
  {
b16137b11   Joerg Roedel   iommu/amd: Implem...
1637
1638
1639
1640
1641
1642
  	if (domain->glx == 2)
  		free_gcr3_tbl_level2(domain->gcr3_tbl);
  	else if (domain->glx == 1)
  		free_gcr3_tbl_level1(domain->gcr3_tbl);
  	else if (domain->glx != 0)
  		BUG();
52815b756   Joerg Roedel   iommu/amd: Add su...
1643
1644
  	free_page((unsigned long)domain->gcr3_tbl);
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1645
1646
1647
1648
  /*
   * Free a domain, only used if something went wrong in the
   * allocation path and we need to free an already allocated page table
   */
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1649
1650
  static void dma_ops_domain_free(struct dma_ops_domain *dom)
  {
384de7291   Joerg Roedel   amd-iommu: make a...
1651
  	int i;
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1652
1653
  	if (!dom)
  		return;
aeb26f553   Joerg Roedel   x86/amd-iommu: Im...
1654
  	del_domain_from_list(&dom->domain);
86db2e5d4   Joerg Roedel   AMD IOMMU: make d...
1655
  	free_pagetable(&dom->domain);
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1656

384de7291   Joerg Roedel   amd-iommu: make a...
1657
1658
1659
1660
1661
1662
  	for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
  		if (!dom->aperture[i])
  			continue;
  		free_page((unsigned long)dom->aperture[i]->bitmap);
  		kfree(dom->aperture[i]);
  	}
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1663
1664
1665
  
  	kfree(dom);
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1666
1667
  /*
   * Allocates a new protection domain usable for the dma_ops functions.
b595076a1   Uwe Kleine-König   tree-wide: fix co...
1668
   * It also initializes the page table and the address allocator data
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1669
1670
   * structures required for the dma_ops interface
   */
87a64d523   Joerg Roedel   x86/amd-iommu: Re...
1671
  static struct dma_ops_domain *dma_ops_domain_alloc(void)
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1672
1673
  {
  	struct dma_ops_domain *dma_dom;
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
  
  	dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
  	if (!dma_dom)
  		return NULL;
  
  	spin_lock_init(&dma_dom->domain.lock);
  
  	dma_dom->domain.id = domain_id_alloc();
  	if (dma_dom->domain.id == 0)
  		goto free_dma_dom;
7c392cbe9   Joerg Roedel   x86/amd-iommu: Ke...
1684
  	INIT_LIST_HEAD(&dma_dom->domain.dev_list);
8f7a017ce   Joerg Roedel   x86/amd-iommu: Us...
1685
  	dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1686
  	dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
9fdb19d64   Joerg Roedel   AMD IOMMU: add pr...
1687
  	dma_dom->domain.flags = PD_DMA_OPS_MASK;
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1688
1689
1690
  	dma_dom->domain.priv = dma_dom;
  	if (!dma_dom->domain.pt_root)
  		goto free_dma_dom;
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1691

1c6557739   Joerg Roedel   AMD IOMMU: implem...
1692
  	dma_dom->need_flush = false;
bd60b735c   Joerg Roedel   AMD IOMMU: don't ...
1693
  	dma_dom->target_dev = 0xffff;
1c6557739   Joerg Roedel   AMD IOMMU: implem...
1694

aeb26f553   Joerg Roedel   x86/amd-iommu: Im...
1695
  	add_domain_to_list(&dma_dom->domain);
576175c25   Joerg Roedel   x86/amd-iommu: Ma...
1696
  	if (alloc_new_range(dma_dom, true, GFP_KERNEL))
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1697
  		goto free_dma_dom;
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1698

431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1699
  	/*
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1700
1701
  	 * mark the first page as allocated so we never return 0 as
  	 * a valid dma-address. So we can use 0 as error value
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1702
  	 */
384de7291   Joerg Roedel   amd-iommu: make a...
1703
  	dma_dom->aperture[0]->bitmap[0] = 1;
803b8cb4d   Joerg Roedel   amd-iommu: change...
1704
  	dma_dom->next_address = 0;
ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1705

ec487d1a1   Joerg Roedel   x86, AMD IOMMU: a...
1706
1707
1708
1709
1710
1711
1712
1713
  
  	return dma_dom;
  
  free_dma_dom:
  	dma_ops_domain_free(dma_dom);
  
  	return NULL;
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
1714
  /*
5b28df6f4   Joerg Roedel   AMD IOMMU: add ch...
1715
1716
1717
1718
1719
1720
1721
   * little helper function to check whether a given protection domain is a
   * dma_ops domain
   */
  static bool dma_ops_domain(struct protection_domain *domain)
  {
  	return domain->flags & PD_DMA_OPS_MASK;
  }
fd7b5535e   Joerg Roedel   x86/amd-iommu: Ad...
1722
  static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
b20ac0d4d   Joerg Roedel   x86, AMD IOMMU: a...
1723
  {
132bd68f1   Joerg Roedel   iommu/amd: Add am...
1724
  	u64 pte_root = 0;
ee6c28684   Joerg Roedel   iommu/amd: Conver...
1725
  	u64 flags = 0;
863c74ebd   Joerg Roedel   AMD IOMMU: add de...
1726

132bd68f1   Joerg Roedel   iommu/amd: Add am...
1727
1728
  	if (domain->mode != PAGE_MODE_NONE)
  		pte_root = virt_to_phys(domain->pt_root);
38ddf41b1   Joerg Roedel   AMD IOMMU: some s...
1729
1730
1731
  	pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
  		    << DEV_ENTRY_MODE_SHIFT;
  	pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
b20ac0d4d   Joerg Roedel   x86, AMD IOMMU: a...
1732

ee6c28684   Joerg Roedel   iommu/amd: Conver...
1733
  	flags = amd_iommu_dev_table[devid].data[1];
fd7b5535e   Joerg Roedel   x86/amd-iommu: Ad...
1734
1735
  	if (ats)
  		flags |= DTE_FLAG_IOTLB;
52815b756   Joerg Roedel   iommu/amd: Add su...
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
  	if (domain->flags & PD_IOMMUV2_MASK) {
  		u64 gcr3 = __pa(domain->gcr3_tbl);
  		u64 glx  = domain->glx;
  		u64 tmp;
  
  		pte_root |= DTE_FLAG_GV;
  		pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
  
  		/* First mask out possible old values for GCR3 table */
  		tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
  		flags    &= ~tmp;
  
  		tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
  		flags    &= ~tmp;
  
  		/* Encode GCR3 table into DTE */
  		tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
  		pte_root |= tmp;
  
  		tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
  		flags    |= tmp;
  
  		tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
  		flags    |= tmp;
  	}
ee6c28684   Joerg Roedel   iommu/amd: Conver...
1761
1762
1763
1764
1765
  	flags &= ~(0xffffUL);
  	flags |= domain->id;
  
  	amd_iommu_dev_table[devid].data[1]  = flags;
  	amd_iommu_dev_table[devid].data[0]  = pte_root;
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
1766
1767
1768
1769
  }
  
  static void clear_dte_entry(u16 devid)
  {
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
1770
1771
1772
  	/* remove entry from the device table seen by the hardware */
  	amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
  	amd_iommu_dev_table[devid].data[1] = 0;
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
1773
1774
  
  	amd_iommu_apply_erratum_63(devid);
7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1775
  }
ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
1776
1777
  static void do_attach(struct iommu_dev_data *dev_data,
  		      struct protection_domain *domain)
7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1778
  {
7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1779
  	struct amd_iommu *iommu;
ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
1780
  	bool ats;
fd7b5535e   Joerg Roedel   x86/amd-iommu: Ad...
1781

ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
1782
1783
  	iommu = amd_iommu_rlookup_table[dev_data->devid];
  	ats   = dev_data->ats.enabled;
7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1784
1785
1786
1787
  
  	/* Update data structures */
  	dev_data->domain = domain;
  	list_add(&dev_data->list, &domain->dev_list);
f62dda66b   Joerg Roedel   x86/amd-iommu: St...
1788
  	set_dte_entry(dev_data->devid, domain, ats);
7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1789
1790
1791
1792
1793
1794
  
  	/* Do reference counting */
  	domain->dev_iommu[iommu->index] += 1;
  	domain->dev_cnt                 += 1;
  
  	/* Flush the DTE entry */
6c5420479   Joerg Roedel   x86/amd-iommu: Us...
1795
  	device_flush_dte(dev_data);
7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1796
  }
ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
1797
  static void do_detach(struct iommu_dev_data *dev_data)
7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1798
  {
7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1799
  	struct amd_iommu *iommu;
7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1800

ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
1801
  	iommu = amd_iommu_rlookup_table[dev_data->devid];
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
1802
1803
  
  	/* decrease reference counters */
7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1804
1805
1806
1807
1808
1809
  	dev_data->domain->dev_iommu[iommu->index] -= 1;
  	dev_data->domain->dev_cnt                 -= 1;
  
  	/* Update data structures */
  	dev_data->domain = NULL;
  	list_del(&dev_data->list);
f62dda66b   Joerg Roedel   x86/amd-iommu: St...
1810
  	clear_dte_entry(dev_data->devid);
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
1811

7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1812
  	/* Flush the DTE entry */
6c5420479   Joerg Roedel   x86/amd-iommu: Us...
1813
  	device_flush_dte(dev_data);
2b681fafc   Joerg Roedel   Merge branch 'amd...
1814
1815
1816
1817
1818
1819
  }
  
  /*
   * If a device is not yet associated with a domain, this function does
   * assigns it visible for the hardware
   */
ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
1820
  static int __attach_device(struct iommu_dev_data *dev_data,
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
1821
  			   struct protection_domain *domain)
2b681fafc   Joerg Roedel   Merge branch 'amd...
1822
  {
84fe6c19e   Julia Lawall   arch/x86/kernel: ...
1823
  	int ret;
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
1824

2b681fafc   Joerg Roedel   Merge branch 'amd...
1825
1826
  	/* lock domain */
  	spin_lock(&domain->lock);
71f775809   Joerg Roedel   x86/amd-iommu: St...
1827
1828
  	if (dev_data->alias_data != NULL) {
  		struct iommu_dev_data *alias_data = dev_data->alias_data;
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
1829

2b02b091a   Joerg Roedel   x86/amd-iommu: Al...
1830
1831
1832
1833
1834
  		/* Some sanity checks */
  		ret = -EBUSY;
  		if (alias_data->domain != NULL &&
  				alias_data->domain != domain)
  			goto out_unlock;
eba6ac60b   Joerg Roedel   x86/amd-iommu: Al...
1835

2b02b091a   Joerg Roedel   x86/amd-iommu: Al...
1836
1837
1838
  		if (dev_data->domain != NULL &&
  				dev_data->domain != domain)
  			goto out_unlock;
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
1839

2b02b091a   Joerg Roedel   x86/amd-iommu: Al...
1840
  		/* Do real assignment */
7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1841
  		if (alias_data->domain == NULL)
ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
1842
  			do_attach(alias_data, domain);
241000556   Joerg Roedel   x86/amd-iommu: Ad...
1843
1844
  
  		atomic_inc(&alias_data->bind);
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
1845
  	}
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
1846

7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1847
  	if (dev_data->domain == NULL)
ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
1848
  		do_attach(dev_data, domain);
eba6ac60b   Joerg Roedel   x86/amd-iommu: Al...
1849

241000556   Joerg Roedel   x86/amd-iommu: Ad...
1850
  	atomic_inc(&dev_data->bind);
84fe6c19e   Julia Lawall   arch/x86/kernel: ...
1851
1852
1853
  	ret = 0;
  
  out_unlock:
eba6ac60b   Joerg Roedel   x86/amd-iommu: Al...
1854
1855
  	/* ready */
  	spin_unlock(&domain->lock);
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
1856

84fe6c19e   Julia Lawall   arch/x86/kernel: ...
1857
  	return ret;
0feae533d   Joerg Roedel   x86/amd-iommu: Ad...
1858
  }
b20ac0d4d   Joerg Roedel   x86, AMD IOMMU: a...
1859

52815b756   Joerg Roedel   iommu/amd: Add su...
1860
1861
1862
1863
1864
1865
1866
  
  static void pdev_iommuv2_disable(struct pci_dev *pdev)
  {
  	pci_disable_ats(pdev);
  	pci_disable_pri(pdev);
  	pci_disable_pasid(pdev);
  }
6a113ddc0   Joerg Roedel   iommu/amd: Add de...
1867
1868
1869
1870
1871
  /* FIXME: Change generic reset-function to do the same */
  static int pri_reset_while_enabled(struct pci_dev *pdev)
  {
  	u16 control;
  	int pos;
46277b75d   Joerg Roedel   iommu/amd: Adapt ...
1872
  	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
6a113ddc0   Joerg Roedel   iommu/amd: Add de...
1873
1874
  	if (!pos)
  		return -EINVAL;
46277b75d   Joerg Roedel   iommu/amd: Adapt ...
1875
1876
1877
  	pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
  	control |= PCI_PRI_CTRL_RESET;
  	pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
6a113ddc0   Joerg Roedel   iommu/amd: Add de...
1878
1879
1880
  
  	return 0;
  }
52815b756   Joerg Roedel   iommu/amd: Add su...
1881
1882
  static int pdev_iommuv2_enable(struct pci_dev *pdev)
  {
6a113ddc0   Joerg Roedel   iommu/amd: Add de...
1883
1884
1885
1886
1887
1888
1889
1890
  	bool reset_enable;
  	int reqs, ret;
  
  	/* FIXME: Hardcode number of outstanding requests for now */
  	reqs = 32;
  	if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
  		reqs = 1;
  	reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
52815b756   Joerg Roedel   iommu/amd: Add su...
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
  
  	/* Only allow access to user-accessible pages */
  	ret = pci_enable_pasid(pdev, 0);
  	if (ret)
  		goto out_err;
  
  	/* First reset the PRI state of the device */
  	ret = pci_reset_pri(pdev);
  	if (ret)
  		goto out_err;
6a113ddc0   Joerg Roedel   iommu/amd: Add de...
1901
1902
  	/* Enable PRI */
  	ret = pci_enable_pri(pdev, reqs);
52815b756   Joerg Roedel   iommu/amd: Add su...
1903
1904
  	if (ret)
  		goto out_err;
6a113ddc0   Joerg Roedel   iommu/amd: Add de...
1905
1906
1907
1908
1909
  	if (reset_enable) {
  		ret = pri_reset_while_enabled(pdev);
  		if (ret)
  			goto out_err;
  	}
52815b756   Joerg Roedel   iommu/amd: Add su...
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
  	ret = pci_enable_ats(pdev, PAGE_SHIFT);
  	if (ret)
  		goto out_err;
  
  	return 0;
  
  out_err:
  	pci_disable_pri(pdev);
  	pci_disable_pasid(pdev);
  
  	return ret;
  }
c99afa25b   Joerg Roedel   iommu/amd: Implem...
1922
1923
1924
1925
1926
1927
1928
  /* FIXME: Move this to PCI code */
  #define PCI_PRI_TLP_OFF		(1 << 2)
  
  bool pci_pri_tlp_required(struct pci_dev *pdev)
  {
  	u16 control;
  	int pos;
46277b75d   Joerg Roedel   iommu/amd: Adapt ...
1929
  	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
c99afa25b   Joerg Roedel   iommu/amd: Implem...
1930
1931
  	if (!pos)
  		return false;
46277b75d   Joerg Roedel   iommu/amd: Adapt ...
1932
  	pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
c99afa25b   Joerg Roedel   iommu/amd: Implem...
1933
1934
1935
  
  	return (control & PCI_PRI_TLP_OFF) ? true : false;
  }
407d733e3   Joerg Roedel   x86/amd-iommu: In...
1936
1937
1938
1939
  /*
   * If a device is not yet associated with a domain, this function does
   * assigns it visible for the hardware
   */
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
1940
1941
  static int attach_device(struct device *dev,
  			 struct protection_domain *domain)
0feae533d   Joerg Roedel   x86/amd-iommu: Ad...
1942
  {
fd7b5535e   Joerg Roedel   x86/amd-iommu: Ad...
1943
  	struct pci_dev *pdev = to_pci_dev(dev);
ea61cddb9   Joerg Roedel   x86/amd-iommu: St...
1944
  	struct iommu_dev_data *dev_data;
eba6ac60b   Joerg Roedel   x86/amd-iommu: Al...
1945
  	unsigned long flags;
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
1946
  	int ret;
eba6ac60b   Joerg Roedel   x86/amd-iommu: Al...
1947

ea61cddb9   Joerg Roedel   x86/amd-iommu: St...
1948
  	dev_data = get_dev_data(dev);
52815b756   Joerg Roedel   iommu/amd: Add su...
1949
1950
1951
1952
1953
1954
1955
1956
1957
  	if (domain->flags & PD_IOMMUV2_MASK) {
  		if (!dev_data->iommu_v2 || !dev_data->passthrough)
  			return -EINVAL;
  
  		if (pdev_iommuv2_enable(pdev) != 0)
  			return -EINVAL;
  
  		dev_data->ats.enabled = true;
  		dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
c99afa25b   Joerg Roedel   iommu/amd: Implem...
1958
  		dev_data->pri_tlp     = pci_pri_tlp_required(pdev);
52815b756   Joerg Roedel   iommu/amd: Add su...
1959
1960
  	} else if (amd_iommu_iotlb_sup &&
  		   pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
ea61cddb9   Joerg Roedel   x86/amd-iommu: St...
1961
1962
1963
  		dev_data->ats.enabled = true;
  		dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
  	}
fd7b5535e   Joerg Roedel   x86/amd-iommu: Ad...
1964

eba6ac60b   Joerg Roedel   x86/amd-iommu: Al...
1965
  	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
1966
  	ret = __attach_device(dev_data, domain);
b20ac0d4d   Joerg Roedel   x86, AMD IOMMU: a...
1967
  	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
0feae533d   Joerg Roedel   x86/amd-iommu: Ad...
1968
1969
1970
1971
1972
  	/*
  	 * We might boot into a crash-kernel here. The crashed kernel
  	 * left the caches in the IOMMU dirty. So we have to flush
  	 * here to evict all dirty stuff.
  	 */
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
1973
  	domain_flush_tlb_pde(domain);
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
1974
1975
  
  	return ret;
b20ac0d4d   Joerg Roedel   x86, AMD IOMMU: a...
1976
  }
355bf553e   Joerg Roedel   AMD IOMMU: add de...
1977
1978
1979
  /*
   * Removes a device from a protection domain (unlocked)
   */
ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
1980
  static void __detach_device(struct iommu_dev_data *dev_data)
355bf553e   Joerg Roedel   AMD IOMMU: add de...
1981
  {
2ca762790   Joerg Roedel   x86/amd-iommu: Fi...
1982
  	struct protection_domain *domain;
7c392cbe9   Joerg Roedel   x86/amd-iommu: Ke...
1983
  	unsigned long flags;
c45961142   Joerg Roedel   x86/amd-iommu: Ad...
1984

7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1985
  	BUG_ON(!dev_data->domain);
355bf553e   Joerg Roedel   AMD IOMMU: add de...
1986

2ca762790   Joerg Roedel   x86/amd-iommu: Fi...
1987
1988
1989
  	domain = dev_data->domain;
  
  	spin_lock_irqsave(&domain->lock, flags);
241000556   Joerg Roedel   x86/amd-iommu: Ad...
1990

71f775809   Joerg Roedel   x86/amd-iommu: St...
1991
1992
  	if (dev_data->alias_data != NULL) {
  		struct iommu_dev_data *alias_data = dev_data->alias_data;
7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1993
  		if (atomic_dec_and_test(&alias_data->bind))
ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
1994
  			do_detach(alias_data);
241000556   Joerg Roedel   x86/amd-iommu: Ad...
1995
  	}
7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1996
  	if (atomic_dec_and_test(&dev_data->bind))
ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
1997
  		do_detach(dev_data);
7f760ddd7   Joerg Roedel   x86/amd-iommu: Cl...
1998

2ca762790   Joerg Roedel   x86/amd-iommu: Fi...
1999
  	spin_unlock_irqrestore(&domain->lock, flags);
21129f786   Joerg Roedel   x86/amd-iommu: Ma...
2000
2001
2002
  
  	/*
  	 * If we run in passthrough mode the device must be assigned to the
d3ad9373b   Joerg Roedel   x86/amd-iommu: Fi...
2003
2004
  	 * passthrough domain if it is detached from any other domain.
  	 * Make sure we can deassign from the pt_domain itself.
21129f786   Joerg Roedel   x86/amd-iommu: Ma...
2005
  	 */
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
2006
  	if (dev_data->passthrough &&
d3ad9373b   Joerg Roedel   x86/amd-iommu: Fi...
2007
  	    (dev_data->domain == NULL && domain != pt_domain))
ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
2008
  		__attach_device(dev_data, pt_domain);
355bf553e   Joerg Roedel   AMD IOMMU: add de...
2009
2010
2011
2012
2013
  }
  
  /*
   * Removes a device from a protection domain (with devtable_lock held)
   */
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2014
  static void detach_device(struct device *dev)
355bf553e   Joerg Roedel   AMD IOMMU: add de...
2015
  {
52815b756   Joerg Roedel   iommu/amd: Add su...
2016
  	struct protection_domain *domain;
ea61cddb9   Joerg Roedel   x86/amd-iommu: St...
2017
  	struct iommu_dev_data *dev_data;
355bf553e   Joerg Roedel   AMD IOMMU: add de...
2018
  	unsigned long flags;
ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
2019
  	dev_data = get_dev_data(dev);
52815b756   Joerg Roedel   iommu/amd: Add su...
2020
  	domain   = dev_data->domain;
ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
2021

355bf553e   Joerg Roedel   AMD IOMMU: add de...
2022
2023
  	/* lock device table */
  	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
2024
  	__detach_device(dev_data);
355bf553e   Joerg Roedel   AMD IOMMU: add de...
2025
  	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
fd7b5535e   Joerg Roedel   x86/amd-iommu: Ad...
2026

52815b756   Joerg Roedel   iommu/amd: Add su...
2027
2028
2029
  	if (domain->flags & PD_IOMMUV2_MASK)
  		pdev_iommuv2_disable(to_pci_dev(dev));
  	else if (dev_data->ats.enabled)
ea61cddb9   Joerg Roedel   x86/amd-iommu: St...
2030
  		pci_disable_ats(to_pci_dev(dev));
52815b756   Joerg Roedel   iommu/amd: Add su...
2031
2032
  
  	dev_data->ats.enabled = false;
355bf553e   Joerg Roedel   AMD IOMMU: add de...
2033
  }
e275a2a0f   Joerg Roedel   AMD IOMMU: add de...
2034

15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2035
2036
2037
2038
2039
2040
  /*
   * Find out the protection domain structure for a given PCI device. This
   * will give us the pointer to the page table root for example.
   */
  static struct protection_domain *domain_for_device(struct device *dev)
  {
71f775809   Joerg Roedel   x86/amd-iommu: St...
2041
  	struct iommu_dev_data *dev_data;
2b02b091a   Joerg Roedel   x86/amd-iommu: Al...
2042
  	struct protection_domain *dom = NULL;
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2043
  	unsigned long flags;
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2044

657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
2045
  	dev_data   = get_dev_data(dev);
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2046

2b02b091a   Joerg Roedel   x86/amd-iommu: Al...
2047
2048
  	if (dev_data->domain)
  		return dev_data->domain;
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2049

71f775809   Joerg Roedel   x86/amd-iommu: St...
2050
2051
  	if (dev_data->alias_data != NULL) {
  		struct iommu_dev_data *alias_data = dev_data->alias_data;
2b02b091a   Joerg Roedel   x86/amd-iommu: Al...
2052
2053
2054
2055
2056
2057
2058
2059
  
  		read_lock_irqsave(&amd_iommu_devtable_lock, flags);
  		if (alias_data->domain != NULL) {
  			__attach_device(dev_data, alias_data->domain);
  			dom = alias_data->domain;
  		}
  		read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  	}
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2060
2061
2062
  
  	return dom;
  }
e275a2a0f   Joerg Roedel   AMD IOMMU: add de...
2063
2064
2065
  static int device_change_notifier(struct notifier_block *nb,
  				  unsigned long action, void *data)
  {
e275a2a0f   Joerg Roedel   AMD IOMMU: add de...
2066
  	struct dma_ops_domain *dma_domain;
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
2067
2068
2069
  	struct protection_domain *domain;
  	struct iommu_dev_data *dev_data;
  	struct device *dev = data;
e275a2a0f   Joerg Roedel   AMD IOMMU: add de...
2070
  	struct amd_iommu *iommu;
1ac4cbbc5   Joerg Roedel   AMD IOMMU: alloca...
2071
  	unsigned long flags;
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
2072
  	u16 devid;
e275a2a0f   Joerg Roedel   AMD IOMMU: add de...
2073

98fc5a693   Joerg Roedel   x86/amd-iommu: Us...
2074
2075
  	if (!check_device(dev))
  		return 0;
e275a2a0f   Joerg Roedel   AMD IOMMU: add de...
2076

5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
2077
2078
2079
  	devid    = get_device_id(dev);
  	iommu    = amd_iommu_rlookup_table[devid];
  	dev_data = get_dev_data(dev);
e275a2a0f   Joerg Roedel   AMD IOMMU: add de...
2080
2081
  
  	switch (action) {
c1eee67b2   Chris Wright   amd iommu: proper...
2082
  	case BUS_NOTIFY_UNBOUND_DRIVER:
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
2083
2084
  
  		domain = domain_for_device(dev);
e275a2a0f   Joerg Roedel   AMD IOMMU: add de...
2085
2086
  		if (!domain)
  			goto out;
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
2087
  		if (dev_data->passthrough)
a1ca331c8   Joerg Roedel   x86/amd-iommu: Do...
2088
  			break;
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2089
  		detach_device(dev);
e275a2a0f   Joerg Roedel   AMD IOMMU: add de...
2090
  		break;
1ac4cbbc5   Joerg Roedel   AMD IOMMU: alloca...
2091
  	case BUS_NOTIFY_ADD_DEVICE:
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
2092
2093
2094
2095
  
  		iommu_init_device(dev);
  
  		domain = domain_for_device(dev);
1ac4cbbc5   Joerg Roedel   AMD IOMMU: alloca...
2096
2097
2098
2099
  		/* allocate a protection domain if a device is added */
  		dma_domain = find_protection_domain(devid);
  		if (dma_domain)
  			goto out;
87a64d523   Joerg Roedel   x86/amd-iommu: Re...
2100
  		dma_domain = dma_ops_domain_alloc();
1ac4cbbc5   Joerg Roedel   AMD IOMMU: alloca...
2101
2102
2103
2104
2105
2106
2107
2108
2109
  		if (!dma_domain)
  			goto out;
  		dma_domain->target_dev = devid;
  
  		spin_lock_irqsave(&iommu_pd_list_lock, flags);
  		list_add_tail(&dma_domain->list, &iommu_pd_list);
  		spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
  
  		break;
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
2110
2111
2112
  	case BUS_NOTIFY_DEL_DEVICE:
  
  		iommu_uninit_device(dev);
e275a2a0f   Joerg Roedel   AMD IOMMU: add de...
2113
2114
2115
  	default:
  		goto out;
  	}
e275a2a0f   Joerg Roedel   AMD IOMMU: add de...
2116
2117
2118
2119
2120
  	iommu_completion_wait(iommu);
  
  out:
  	return 0;
  }
b25ae679f   Jaswinder Singh Rajput   x86: Mark device_...
2121
  static struct notifier_block device_nb = {
e275a2a0f   Joerg Roedel   AMD IOMMU: add de...
2122
2123
  	.notifier_call = device_change_notifier,
  };
355bf553e   Joerg Roedel   AMD IOMMU: add de...
2124

8638c4914   Joerg Roedel   x86/amd-iommu: Fi...
2125
2126
2127
2128
  void amd_iommu_init_notifier(void)
  {
  	bus_register_notifier(&pci_bus_type, &device_nb);
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
  /*****************************************************************************
   *
   * The next functions belong to the dma_ops mapping/unmapping code.
   *
   *****************************************************************************/
  
  /*
   * In the dma_ops path we only have the struct device. This function
   * finds the corresponding IOMMU, the protection domain and the
   * requestor id for a given device.
   * If the device is not yet associated with a domain this is also done
   * in this function.
   */
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2142
  static struct protection_domain *get_domain(struct device *dev)
b20ac0d4d   Joerg Roedel   x86, AMD IOMMU: a...
2143
  {
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2144
  	struct protection_domain *domain;
b20ac0d4d   Joerg Roedel   x86, AMD IOMMU: a...
2145
  	struct dma_ops_domain *dma_dom;
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2146
  	u16 devid = get_device_id(dev);
b20ac0d4d   Joerg Roedel   x86, AMD IOMMU: a...
2147

f99c0f1c7   Joerg Roedel   x86/amd-iommu: Us...
2148
  	if (!check_device(dev))
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2149
  		return ERR_PTR(-EINVAL);
b20ac0d4d   Joerg Roedel   x86, AMD IOMMU: a...
2150

94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2151
2152
2153
  	domain = domain_for_device(dev);
  	if (domain != NULL && !dma_ops_domain(domain))
  		return ERR_PTR(-EBUSY);
f99c0f1c7   Joerg Roedel   x86/amd-iommu: Us...
2154

94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2155
2156
  	if (domain != NULL)
  		return domain;
b20ac0d4d   Joerg Roedel   x86, AMD IOMMU: a...
2157

15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2158
  	/* Device not bount yet - bind it */
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2159
  	dma_dom = find_protection_domain(devid);
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2160
  	if (!dma_dom)
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2161
2162
  		dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
  	attach_device(dev, &dma_dom->domain);
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2163
2164
  	DUMP_printk("Using protection domain %d for device %s
  ",
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2165
  		    dma_dom->domain.id, dev_name(dev));
f91ba1906   Joerg Roedel   AMD IOMMU: set de...
2166

94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2167
  	return &dma_dom->domain;
b20ac0d4d   Joerg Roedel   x86, AMD IOMMU: a...
2168
  }
04bfdd840   Joerg Roedel   x86/amd-iommu: Fl...
2169
2170
  static void update_device_table(struct protection_domain *domain)
  {
492667dac   Joerg Roedel   x86/amd-iommu: Re...
2171
  	struct iommu_dev_data *dev_data;
04bfdd840   Joerg Roedel   x86/amd-iommu: Fl...
2172

ea61cddb9   Joerg Roedel   x86/amd-iommu: St...
2173
2174
  	list_for_each_entry(dev_data, &domain->dev_list, list)
  		set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
04bfdd840   Joerg Roedel   x86/amd-iommu: Fl...
2175
2176
2177
2178
2179
2180
2181
2182
  }
  
  static void update_domain(struct protection_domain *domain)
  {
  	if (!domain->updated)
  		return;
  
  	update_device_table(domain);
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
2183
2184
2185
  
  	domain_flush_devices(domain);
  	domain_flush_tlb_pde(domain);
04bfdd840   Joerg Roedel   x86/amd-iommu: Fl...
2186
2187
2188
  
  	domain->updated = false;
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2189
  /*
8bda3092b   Joerg Roedel   amd-iommu: move p...
2190
2191
2192
2193
2194
   * This function fetches the PTE for a given address in the aperture
   */
  static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
  			    unsigned long address)
  {
384de7291   Joerg Roedel   amd-iommu: make a...
2195
  	struct aperture_range *aperture;
8bda3092b   Joerg Roedel   amd-iommu: move p...
2196
  	u64 *pte, *pte_page;
384de7291   Joerg Roedel   amd-iommu: make a...
2197
2198
2199
2200
2201
  	aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
  	if (!aperture)
  		return NULL;
  
  	pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
8bda3092b   Joerg Roedel   amd-iommu: move p...
2202
  	if (!pte) {
cbb9d729f   Joerg Roedel   x86/amd-iommu: Ma...
2203
  		pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
abdc5eb3d   Joerg Roedel   x86/amd-iommu: Ch...
2204
  				GFP_ATOMIC);
384de7291   Joerg Roedel   amd-iommu: make a...
2205
2206
  		aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
  	} else
8c8c143cd   Joerg Roedel   x86/amd-iommu: Re...
2207
  		pte += PM_LEVEL_INDEX(0, address);
8bda3092b   Joerg Roedel   amd-iommu: move p...
2208

04bfdd840   Joerg Roedel   x86/amd-iommu: Fl...
2209
  	update_domain(&dom->domain);
8bda3092b   Joerg Roedel   amd-iommu: move p...
2210
2211
2212
2213
2214
  
  	return pte;
  }
  
  /*
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2215
2216
2217
   * This is the generic map function. It maps one 4kb page at paddr to
   * the given address in the DMA address space for the domain.
   */
680525e06   Joerg Roedel   x86/amd-iommu: Re...
2218
  static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2219
2220
2221
2222
2223
2224
2225
2226
2227
  				     unsigned long address,
  				     phys_addr_t paddr,
  				     int direction)
  {
  	u64 *pte, __pte;
  
  	WARN_ON(address > dom->aperture_size);
  
  	paddr &= PAGE_MASK;
8bda3092b   Joerg Roedel   amd-iommu: move p...
2228
  	pte  = dma_ops_get_pte(dom, address);
53812c115   Joerg Roedel   amd-iommu: handle...
2229
  	if (!pte)
8fd524b35   FUJITA Tomonori   x86: Kill bad_dma...
2230
  		return DMA_ERROR_CODE;
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
  
  	__pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
  
  	if (direction == DMA_TO_DEVICE)
  		__pte |= IOMMU_PTE_IR;
  	else if (direction == DMA_FROM_DEVICE)
  		__pte |= IOMMU_PTE_IW;
  	else if (direction == DMA_BIDIRECTIONAL)
  		__pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
  
  	WARN_ON(*pte);
  
  	*pte = __pte;
  
  	return (dma_addr_t)address;
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2247
2248
2249
  /*
   * The generic unmapping function for on page in the DMA address space.
   */
680525e06   Joerg Roedel   x86/amd-iommu: Re...
2250
  static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2251
2252
  				 unsigned long address)
  {
384de7291   Joerg Roedel   amd-iommu: make a...
2253
  	struct aperture_range *aperture;
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2254
2255
2256
2257
  	u64 *pte;
  
  	if (address >= dom->aperture_size)
  		return;
384de7291   Joerg Roedel   amd-iommu: make a...
2258
2259
2260
2261
2262
2263
2264
  	aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
  	if (!aperture)
  		return;
  
  	pte  = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
  	if (!pte)
  		return;
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2265

8c8c143cd   Joerg Roedel   x86/amd-iommu: Re...
2266
  	pte += PM_LEVEL_INDEX(0, address);
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2267
2268
2269
2270
2271
  
  	WARN_ON(!*pte);
  
  	*pte = 0ULL;
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2272
2273
  /*
   * This function contains common code for mapping of a physically
24f811603   Joerg Roedel   AMD IOMMU: fix ty...
2274
2275
   * contiguous memory region into DMA address space. It is used by all
   * mapping functions provided with this IOMMU driver.
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2276
2277
   * Must be called with the domain lock held.
   */
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2278
  static dma_addr_t __map_single(struct device *dev,
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2279
2280
2281
  			       struct dma_ops_domain *dma_dom,
  			       phys_addr_t paddr,
  			       size_t size,
6d4f343f8   Joerg Roedel   AMD IOMMU: align ...
2282
  			       int dir,
832a90c30   Joerg Roedel   AMD IOMMU: use co...
2283
2284
  			       bool align,
  			       u64 dma_mask)
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2285
2286
  {
  	dma_addr_t offset = paddr & ~PAGE_MASK;
53812c115   Joerg Roedel   amd-iommu: handle...
2287
  	dma_addr_t address, start, ret;
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2288
  	unsigned int pages;
6d4f343f8   Joerg Roedel   AMD IOMMU: align ...
2289
  	unsigned long align_mask = 0;
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2290
  	int i;
e3c449f52   Joerg Roedel   x86, AMD IOMMU: c...
2291
  	pages = iommu_num_pages(paddr, size, PAGE_SIZE);
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2292
  	paddr &= PAGE_MASK;
8ecaf8f19   Joerg Roedel   AMD IOMMU: add st...
2293
  	INC_STATS_COUNTER(total_map_requests);
c1858976f   Joerg Roedel   AMD IOMMU: add st...
2294
2295
  	if (pages > 1)
  		INC_STATS_COUNTER(cross_page);
6d4f343f8   Joerg Roedel   AMD IOMMU: align ...
2296
2297
  	if (align)
  		align_mask = (1UL << get_order(size)) - 1;
11b83888a   Joerg Roedel   amd-iommu: enlarg...
2298
  retry:
832a90c30   Joerg Roedel   AMD IOMMU: use co...
2299
2300
  	address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
  					  dma_mask);
8fd524b35   FUJITA Tomonori   x86: Kill bad_dma...
2301
  	if (unlikely(address == DMA_ERROR_CODE)) {
11b83888a   Joerg Roedel   amd-iommu: enlarg...
2302
2303
2304
2305
2306
2307
  		/*
  		 * setting next_address here will let the address
  		 * allocator only scan the new allocated range in the
  		 * first run. This is a small optimization.
  		 */
  		dma_dom->next_address = dma_dom->aperture_size;
576175c25   Joerg Roedel   x86/amd-iommu: Ma...
2308
  		if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
11b83888a   Joerg Roedel   amd-iommu: enlarg...
2309
2310
2311
  			goto out;
  
  		/*
af901ca18   André Goddard Rosa   tree-wide: fix as...
2312
  		 * aperture was successfully enlarged by 128 MB, try
11b83888a   Joerg Roedel   amd-iommu: enlarg...
2313
2314
2315
2316
  		 * allocation again
  		 */
  		goto retry;
  	}
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2317
2318
2319
  
  	start = address;
  	for (i = 0; i < pages; ++i) {
680525e06   Joerg Roedel   x86/amd-iommu: Re...
2320
  		ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
8fd524b35   FUJITA Tomonori   x86: Kill bad_dma...
2321
  		if (ret == DMA_ERROR_CODE)
53812c115   Joerg Roedel   amd-iommu: handle...
2322
  			goto out_unmap;
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2323
2324
2325
2326
  		paddr += PAGE_SIZE;
  		start += PAGE_SIZE;
  	}
  	address += offset;
5774f7c5f   Joerg Roedel   AMD IOMMU: add st...
2327
  	ADD_STATS_COUNTER(alloced_io_mem, size);
afa9fdc2f   FUJITA Tomonori   iommu: remove ful...
2328
  	if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
2329
  		domain_flush_tlb(&dma_dom->domain);
1c6557739   Joerg Roedel   AMD IOMMU: implem...
2330
  		dma_dom->need_flush = false;
318afd41d   Joerg Roedel   x86/amd-iommu: Ma...
2331
  	} else if (unlikely(amd_iommu_np_cache))
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
2332
  		domain_flush_pages(&dma_dom->domain, address, size);
270cab242   Joerg Roedel   AMD IOMMU: move T...
2333

cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2334
2335
  out:
  	return address;
53812c115   Joerg Roedel   amd-iommu: handle...
2336
2337
2338
2339
2340
  
  out_unmap:
  
  	for (--i; i >= 0; --i) {
  		start -= PAGE_SIZE;
680525e06   Joerg Roedel   x86/amd-iommu: Re...
2341
  		dma_ops_domain_unmap(dma_dom, start);
53812c115   Joerg Roedel   amd-iommu: handle...
2342
2343
2344
  	}
  
  	dma_ops_free_addresses(dma_dom, address, pages);
8fd524b35   FUJITA Tomonori   x86: Kill bad_dma...
2345
  	return DMA_ERROR_CODE;
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2346
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2347
2348
2349
2350
  /*
   * Does the reverse of the __map_single function. Must be called with
   * the domain lock held too
   */
cd8c82e87   Joerg Roedel   x86/amd-iommu: Re...
2351
  static void __unmap_single(struct dma_ops_domain *dma_dom,
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2352
2353
2354
2355
  			   dma_addr_t dma_addr,
  			   size_t size,
  			   int dir)
  {
04e0463e0   Joerg Roedel   x86/amd-iommu: Fi...
2356
  	dma_addr_t flush_addr;
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2357
2358
  	dma_addr_t i, start;
  	unsigned int pages;
8fd524b35   FUJITA Tomonori   x86: Kill bad_dma...
2359
  	if ((dma_addr == DMA_ERROR_CODE) ||
b8d9905d0   Joerg Roedel   AMD IOMMU: __unma...
2360
  	    (dma_addr + size > dma_dom->aperture_size))
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2361
  		return;
04e0463e0   Joerg Roedel   x86/amd-iommu: Fi...
2362
  	flush_addr = dma_addr;
e3c449f52   Joerg Roedel   x86, AMD IOMMU: c...
2363
  	pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2364
2365
2366
2367
  	dma_addr &= PAGE_MASK;
  	start = dma_addr;
  
  	for (i = 0; i < pages; ++i) {
680525e06   Joerg Roedel   x86/amd-iommu: Re...
2368
  		dma_ops_domain_unmap(dma_dom, start);
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2369
2370
  		start += PAGE_SIZE;
  	}
5774f7c5f   Joerg Roedel   AMD IOMMU: add st...
2371
  	SUB_STATS_COUNTER(alloced_io_mem, size);
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2372
  	dma_ops_free_addresses(dma_dom, dma_addr, pages);
270cab242   Joerg Roedel   AMD IOMMU: move T...
2373

80be308df   Joerg Roedel   AMD IOMMU: fix la...
2374
  	if (amd_iommu_unmap_flush || dma_dom->need_flush) {
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
2375
  		domain_flush_pages(&dma_dom->domain, flush_addr, size);
80be308df   Joerg Roedel   AMD IOMMU: fix la...
2376
2377
  		dma_dom->need_flush = false;
  	}
cb76c3229   Joerg Roedel   x86, AMD IOMMU: a...
2378
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2379
2380
2381
  /*
   * The exported map_single function for dma_ops.
   */
51491367c   FUJITA Tomonori   x86, AMD IOMMU: a...
2382
2383
2384
2385
  static dma_addr_t map_page(struct device *dev, struct page *page,
  			   unsigned long offset, size_t size,
  			   enum dma_data_direction dir,
  			   struct dma_attrs *attrs)
4da70b9e4   Joerg Roedel   x86, AMD IOMMU: a...
2386
2387
  {
  	unsigned long flags;
4da70b9e4   Joerg Roedel   x86, AMD IOMMU: a...
2388
  	struct protection_domain *domain;
4da70b9e4   Joerg Roedel   x86, AMD IOMMU: a...
2389
  	dma_addr_t addr;
832a90c30   Joerg Roedel   AMD IOMMU: use co...
2390
  	u64 dma_mask;
51491367c   FUJITA Tomonori   x86, AMD IOMMU: a...
2391
  	phys_addr_t paddr = page_to_phys(page) + offset;
4da70b9e4   Joerg Roedel   x86, AMD IOMMU: a...
2392

0f2a86f20   Joerg Roedel   AMD IOMMU: add st...
2393
  	INC_STATS_COUNTER(cnt_map_single);
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2394
2395
  	domain = get_domain(dev);
  	if (PTR_ERR(domain) == -EINVAL)
4da70b9e4   Joerg Roedel   x86, AMD IOMMU: a...
2396
  		return (dma_addr_t)paddr;
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2397
2398
  	else if (IS_ERR(domain))
  		return DMA_ERROR_CODE;
4da70b9e4   Joerg Roedel   x86, AMD IOMMU: a...
2399

f99c0f1c7   Joerg Roedel   x86/amd-iommu: Us...
2400
  	dma_mask = *dev->dma_mask;
4da70b9e4   Joerg Roedel   x86, AMD IOMMU: a...
2401
  	spin_lock_irqsave(&domain->lock, flags);
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2402

cd8c82e87   Joerg Roedel   x86/amd-iommu: Re...
2403
  	addr = __map_single(dev, domain->priv, paddr, size, dir, false,
832a90c30   Joerg Roedel   AMD IOMMU: use co...
2404
  			    dma_mask);
8fd524b35   FUJITA Tomonori   x86: Kill bad_dma...
2405
  	if (addr == DMA_ERROR_CODE)
4da70b9e4   Joerg Roedel   x86, AMD IOMMU: a...
2406
  		goto out;
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
2407
  	domain_flush_complete(domain);
4da70b9e4   Joerg Roedel   x86, AMD IOMMU: a...
2408
2409
2410
2411
2412
2413
  
  out:
  	spin_unlock_irqrestore(&domain->lock, flags);
  
  	return addr;
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2414
2415
2416
  /*
   * The exported unmap_single function for dma_ops.
   */
51491367c   FUJITA Tomonori   x86, AMD IOMMU: a...
2417
2418
  static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
  		       enum dma_data_direction dir, struct dma_attrs *attrs)
4da70b9e4   Joerg Roedel   x86, AMD IOMMU: a...
2419
2420
  {
  	unsigned long flags;
4da70b9e4   Joerg Roedel   x86, AMD IOMMU: a...
2421
  	struct protection_domain *domain;
4da70b9e4   Joerg Roedel   x86, AMD IOMMU: a...
2422

146a6917f   Joerg Roedel   AMD IOMMU: add st...
2423
  	INC_STATS_COUNTER(cnt_unmap_single);
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2424
2425
  	domain = get_domain(dev);
  	if (IS_ERR(domain))
5b28df6f4   Joerg Roedel   AMD IOMMU: add ch...
2426
  		return;
4da70b9e4   Joerg Roedel   x86, AMD IOMMU: a...
2427
  	spin_lock_irqsave(&domain->lock, flags);
cd8c82e87   Joerg Roedel   x86/amd-iommu: Re...
2428
  	__unmap_single(domain->priv, dma_addr, size, dir);
4da70b9e4   Joerg Roedel   x86, AMD IOMMU: a...
2429

17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
2430
  	domain_flush_complete(domain);
4da70b9e4   Joerg Roedel   x86, AMD IOMMU: a...
2431
2432
2433
  
  	spin_unlock_irqrestore(&domain->lock, flags);
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2434
2435
2436
2437
  /*
   * This is a special map_sg function which is used if we should map a
   * device which is not handled by an AMD IOMMU in the system.
   */
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
  static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
  			   int nelems, int dir)
  {
  	struct scatterlist *s;
  	int i;
  
  	for_each_sg(sglist, s, nelems, i) {
  		s->dma_address = (dma_addr_t)sg_phys(s);
  		s->dma_length  = s->length;
  	}
  
  	return nelems;
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2451
2452
2453
2454
  /*
   * The exported map_sg function for dma_ops (handles scatter-gather
   * lists).
   */
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2455
  static int map_sg(struct device *dev, struct scatterlist *sglist,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
2456
2457
  		  int nelems, enum dma_data_direction dir,
  		  struct dma_attrs *attrs)
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2458
2459
  {
  	unsigned long flags;
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2460
  	struct protection_domain *domain;
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2461
2462
2463
2464
  	int i;
  	struct scatterlist *s;
  	phys_addr_t paddr;
  	int mapped_elems = 0;
832a90c30   Joerg Roedel   AMD IOMMU: use co...
2465
  	u64 dma_mask;
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2466

d03f067a9   Joerg Roedel   AMD IOMMU: add st...
2467
  	INC_STATS_COUNTER(cnt_map_sg);
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2468
2469
  	domain = get_domain(dev);
  	if (PTR_ERR(domain) == -EINVAL)
f99c0f1c7   Joerg Roedel   x86/amd-iommu: Us...
2470
  		return map_sg_no_iommu(dev, sglist, nelems, dir);
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2471
2472
  	else if (IS_ERR(domain))
  		return 0;
dbcc112e3   Joerg Roedel   AMD IOMMU: check ...
2473

832a90c30   Joerg Roedel   AMD IOMMU: use co...
2474
  	dma_mask = *dev->dma_mask;
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2475

65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2476
2477
2478
2479
  	spin_lock_irqsave(&domain->lock, flags);
  
  	for_each_sg(sglist, s, nelems, i) {
  		paddr = sg_phys(s);
cd8c82e87   Joerg Roedel   x86/amd-iommu: Re...
2480
  		s->dma_address = __map_single(dev, domain->priv,
832a90c30   Joerg Roedel   AMD IOMMU: use co...
2481
2482
  					      paddr, s->length, dir, false,
  					      dma_mask);
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2483
2484
2485
2486
2487
2488
  
  		if (s->dma_address) {
  			s->dma_length = s->length;
  			mapped_elems++;
  		} else
  			goto unmap;
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2489
  	}
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
2490
  	domain_flush_complete(domain);
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2491
2492
2493
2494
2495
2496
2497
2498
  
  out:
  	spin_unlock_irqrestore(&domain->lock, flags);
  
  	return mapped_elems;
  unmap:
  	for_each_sg(sglist, s, mapped_elems, i) {
  		if (s->dma_address)
cd8c82e87   Joerg Roedel   x86/amd-iommu: Re...
2499
  			__unmap_single(domain->priv, s->dma_address,
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2500
2501
2502
2503
2504
2505
2506
2507
  				       s->dma_length, dir);
  		s->dma_address = s->dma_length = 0;
  	}
  
  	mapped_elems = 0;
  
  	goto out;
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2508
2509
2510
2511
  /*
   * The exported map_sg function for dma_ops (handles scatter-gather
   * lists).
   */
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2512
  static void unmap_sg(struct device *dev, struct scatterlist *sglist,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
2513
2514
  		     int nelems, enum dma_data_direction dir,
  		     struct dma_attrs *attrs)
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2515
2516
  {
  	unsigned long flags;
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2517
2518
  	struct protection_domain *domain;
  	struct scatterlist *s;
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2519
  	int i;
55877a6bc   Joerg Roedel   AMD IOMMU: add st...
2520
  	INC_STATS_COUNTER(cnt_unmap_sg);
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2521
2522
  	domain = get_domain(dev);
  	if (IS_ERR(domain))
5b28df6f4   Joerg Roedel   AMD IOMMU: add ch...
2523
  		return;
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2524
2525
2526
  	spin_lock_irqsave(&domain->lock, flags);
  
  	for_each_sg(sglist, s, nelems, i) {
cd8c82e87   Joerg Roedel   x86/amd-iommu: Re...
2527
  		__unmap_single(domain->priv, s->dma_address,
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2528
  			       s->dma_length, dir);
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2529
2530
  		s->dma_address = s->dma_length = 0;
  	}
17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
2531
  	domain_flush_complete(domain);
65b050adb   Joerg Roedel   x86, AMD IOMMU: a...
2532
2533
2534
  
  	spin_unlock_irqrestore(&domain->lock, flags);
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2535
2536
2537
  /*
   * The exported alloc_coherent function for dma_ops.
   */
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2538
2539
2540
2541
2542
  static void *alloc_coherent(struct device *dev, size_t size,
  			    dma_addr_t *dma_addr, gfp_t flag)
  {
  	unsigned long flags;
  	void *virt_addr;
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2543
  	struct protection_domain *domain;
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2544
  	phys_addr_t paddr;
832a90c30   Joerg Roedel   AMD IOMMU: use co...
2545
  	u64 dma_mask = dev->coherent_dma_mask;
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2546

c8f0fb36b   Joerg Roedel   AMD IOMMU: add st...
2547
  	INC_STATS_COUNTER(cnt_alloc_coherent);
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2548
2549
  	domain = get_domain(dev);
  	if (PTR_ERR(domain) == -EINVAL) {
f99c0f1c7   Joerg Roedel   x86/amd-iommu: Us...
2550
2551
2552
  		virt_addr = (void *)__get_free_pages(flag, get_order(size));
  		*dma_addr = __pa(virt_addr);
  		return virt_addr;
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2553
2554
  	} else if (IS_ERR(domain))
  		return NULL;
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2555

f99c0f1c7   Joerg Roedel   x86/amd-iommu: Us...
2556
2557
2558
  	dma_mask  = dev->coherent_dma_mask;
  	flag     &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  	flag     |= __GFP_ZERO;
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2559
2560
2561
  
  	virt_addr = (void *)__get_free_pages(flag, get_order(size));
  	if (!virt_addr)
b25ae679f   Jaswinder Singh Rajput   x86: Mark device_...
2562
  		return NULL;
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2563

5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2564
  	paddr = virt_to_phys(virt_addr);
832a90c30   Joerg Roedel   AMD IOMMU: use co...
2565
2566
  	if (!dma_mask)
  		dma_mask = *dev->dma_mask;
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2567
  	spin_lock_irqsave(&domain->lock, flags);
cd8c82e87   Joerg Roedel   x86/amd-iommu: Re...
2568
  	*dma_addr = __map_single(dev, domain->priv, paddr,
832a90c30   Joerg Roedel   AMD IOMMU: use co...
2569
  				 size, DMA_BIDIRECTIONAL, true, dma_mask);
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2570

8fd524b35   FUJITA Tomonori   x86: Kill bad_dma...
2571
  	if (*dma_addr == DMA_ERROR_CODE) {
367d04c4e   Jiri Slaby   amd_iommu: fix lo...
2572
  		spin_unlock_irqrestore(&domain->lock, flags);
5b28df6f4   Joerg Roedel   AMD IOMMU: add ch...
2573
  		goto out_free;
367d04c4e   Jiri Slaby   amd_iommu: fix lo...
2574
  	}
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2575

17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
2576
  	domain_flush_complete(domain);
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2577

5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2578
2579
2580
  	spin_unlock_irqrestore(&domain->lock, flags);
  
  	return virt_addr;
5b28df6f4   Joerg Roedel   AMD IOMMU: add ch...
2581
2582
2583
2584
2585
2586
  
  out_free:
  
  	free_pages((unsigned long)virt_addr, get_order(size));
  
  	return NULL;
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2587
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2588
2589
  /*
   * The exported free_coherent function for dma_ops.
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2590
   */
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2591
2592
2593
2594
  static void free_coherent(struct device *dev, size_t size,
  			  void *virt_addr, dma_addr_t dma_addr)
  {
  	unsigned long flags;
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2595
  	struct protection_domain *domain;
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2596

5d31ee7e0   Joerg Roedel   AMD IOMMU: add st...
2597
  	INC_STATS_COUNTER(cnt_free_coherent);
94f6d190e   Joerg Roedel   x86/amd-iommu: Si...
2598
2599
  	domain = get_domain(dev);
  	if (IS_ERR(domain))
5b28df6f4   Joerg Roedel   AMD IOMMU: add ch...
2600
  		goto free_mem;
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2601
  	spin_lock_irqsave(&domain->lock, flags);
cd8c82e87   Joerg Roedel   x86/amd-iommu: Re...
2602
  	__unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2603

17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
2604
  	domain_flush_complete(domain);
5d8b53cf3   Joerg Roedel   x86, AMD IOMMU: a...
2605
2606
2607
2608
2609
2610
  
  	spin_unlock_irqrestore(&domain->lock, flags);
  
  free_mem:
  	free_pages((unsigned long)virt_addr, get_order(size));
  }
c432f3df8   Joerg Roedel   x86, AMD IOMMU: a...
2611
  /*
b39ba6ad0   Joerg Roedel   AMD IOMMU: add dm...
2612
2613
2614
2615
2616
   * This function is called by the DMA layer to find out if we can handle a
   * particular device. It is part of the dma_ops.
   */
  static int amd_iommu_dma_supported(struct device *dev, u64 mask)
  {
420aef8a3   Joerg Roedel   x86/amd-iommu: Us...
2617
  	return check_device(dev);
b39ba6ad0   Joerg Roedel   AMD IOMMU: add dm...
2618
2619
2620
  }
  
  /*
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2621
2622
   * The function for pre-allocating protection domains.
   *
c432f3df8   Joerg Roedel   x86, AMD IOMMU: a...
2623
2624
2625
2626
   * If the driver core informs the DMA layer if a driver grabs a device
   * we don't need to preallocate the protection domains anymore.
   * For now we have to.
   */
0e93dd883   Jaswinder Singh Rajput   AMD IOMMU: preall...
2627
  static void prealloc_protection_domains(void)
c432f3df8   Joerg Roedel   x86, AMD IOMMU: a...
2628
  {
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
2629
  	struct iommu_dev_data *dev_data;
c432f3df8   Joerg Roedel   x86, AMD IOMMU: a...
2630
  	struct dma_ops_domain *dma_dom;
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
2631
  	struct pci_dev *dev = NULL;
98fc5a693   Joerg Roedel   x86/amd-iommu: Us...
2632
  	u16 devid;
c432f3df8   Joerg Roedel   x86, AMD IOMMU: a...
2633

d18c69d38   Chris Wright   x86/amd-iommu: us...
2634
  	for_each_pci_dev(dev) {
98fc5a693   Joerg Roedel   x86/amd-iommu: Us...
2635
2636
2637
  
  		/* Do we handle this device? */
  		if (!check_device(&dev->dev))
c432f3df8   Joerg Roedel   x86, AMD IOMMU: a...
2638
  			continue;
98fc5a693   Joerg Roedel   x86/amd-iommu: Us...
2639

5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
  		dev_data = get_dev_data(&dev->dev);
  		if (!amd_iommu_force_isolation && dev_data->iommu_v2) {
  			/* Make sure passthrough domain is allocated */
  			alloc_passthrough_domain();
  			dev_data->passthrough = true;
  			attach_device(&dev->dev, pt_domain);
  			pr_info("AMD-Vi: Using passthough domain for device %s
  ",
  				dev_name(&dev->dev));
  		}
98fc5a693   Joerg Roedel   x86/amd-iommu: Us...
2650
  		/* Is there already any domain for it? */
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2651
  		if (domain_for_device(&dev->dev))
c432f3df8   Joerg Roedel   x86, AMD IOMMU: a...
2652
  			continue;
98fc5a693   Joerg Roedel   x86/amd-iommu: Us...
2653
2654
  
  		devid = get_device_id(&dev->dev);
87a64d523   Joerg Roedel   x86/amd-iommu: Re...
2655
  		dma_dom = dma_ops_domain_alloc();
c432f3df8   Joerg Roedel   x86, AMD IOMMU: a...
2656
2657
2658
  		if (!dma_dom)
  			continue;
  		init_unity_mappings_for_device(dma_dom, devid);
bd60b735c   Joerg Roedel   AMD IOMMU: don't ...
2659
  		dma_dom->target_dev = devid;
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2660
  		attach_device(&dev->dev, &dma_dom->domain);
be8312977   Joerg Roedel   x86/amd-iommu: at...
2661

bd60b735c   Joerg Roedel   AMD IOMMU: don't ...
2662
  		list_add_tail(&dma_dom->list, &iommu_pd_list);
c432f3df8   Joerg Roedel   x86, AMD IOMMU: a...
2663
2664
  	}
  }
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
2665
  static struct dma_map_ops amd_iommu_dma_ops = {
6631ee9d0   Joerg Roedel   x86, AMD IOMMU: a...
2666
2667
  	.alloc_coherent = alloc_coherent,
  	.free_coherent = free_coherent,
51491367c   FUJITA Tomonori   x86, AMD IOMMU: a...
2668
2669
  	.map_page = map_page,
  	.unmap_page = unmap_page,
6631ee9d0   Joerg Roedel   x86, AMD IOMMU: a...
2670
2671
  	.map_sg = map_sg,
  	.unmap_sg = unmap_sg,
b39ba6ad0   Joerg Roedel   AMD IOMMU: add dm...
2672
  	.dma_supported = amd_iommu_dma_supported,
6631ee9d0   Joerg Roedel   x86, AMD IOMMU: a...
2673
  };
27c2127a1   Joerg Roedel   x86/amd-iommu: Us...
2674
2675
  static unsigned device_dma_ops_init(void)
  {
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
2676
  	struct iommu_dev_data *dev_data;
27c2127a1   Joerg Roedel   x86/amd-iommu: Us...
2677
2678
2679
2680
2681
2682
2683
2684
  	struct pci_dev *pdev = NULL;
  	unsigned unhandled = 0;
  
  	for_each_pci_dev(pdev) {
  		if (!check_device(&pdev->dev)) {
  			unhandled += 1;
  			continue;
  		}
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
2685
2686
2687
2688
2689
2690
  		dev_data = get_dev_data(&pdev->dev);
  
  		if (!dev_data->passthrough)
  			pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
  		else
  			pdev->dev.archdata.dma_ops = &nommu_dma_ops;
27c2127a1   Joerg Roedel   x86/amd-iommu: Us...
2691
2692
2693
2694
  	}
  
  	return unhandled;
  }
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2695
2696
2697
  /*
   * The function which clues the AMD IOMMU driver into dma_ops.
   */
f53250943   Joerg Roedel   x86/amd-iommu: Fi...
2698
2699
2700
  
  void __init amd_iommu_init_api(void)
  {
2cc21c423   Joerg Roedel   iommu/amd: Use bu...
2701
  	bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
f53250943   Joerg Roedel   x86/amd-iommu: Fi...
2702
  }
6631ee9d0   Joerg Roedel   x86, AMD IOMMU: a...
2703
2704
2705
  int __init amd_iommu_init_dma_ops(void)
  {
  	struct amd_iommu *iommu;
27c2127a1   Joerg Roedel   x86/amd-iommu: Us...
2706
  	int ret, unhandled;
6631ee9d0   Joerg Roedel   x86, AMD IOMMU: a...
2707

431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2708
2709
2710
2711
2712
  	/*
  	 * first allocate a default protection domain for every IOMMU we
  	 * found in the system. Devices not assigned to any other
  	 * protection domain will be assigned to the default one.
  	 */
3bd221724   Joerg Roedel   amd-iommu: introd...
2713
  	for_each_iommu(iommu) {
87a64d523   Joerg Roedel   x86/amd-iommu: Re...
2714
  		iommu->default_dom = dma_ops_domain_alloc();
6631ee9d0   Joerg Roedel   x86, AMD IOMMU: a...
2715
2716
  		if (iommu->default_dom == NULL)
  			return -ENOMEM;
e2dc14a2a   Joerg Roedel   AMD IOMMU: add a ...
2717
  		iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
6631ee9d0   Joerg Roedel   x86, AMD IOMMU: a...
2718
2719
2720
2721
  		ret = iommu_init_unity_mappings(iommu);
  		if (ret)
  			goto free_domains;
  	}
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2722
  	/*
8793abeb7   Joerg Roedel   x86/amd-iommu: Re...
2723
  	 * Pre-allocate the protection domains for each device.
431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2724
  	 */
8793abeb7   Joerg Roedel   x86/amd-iommu: Re...
2725
  	prealloc_protection_domains();
6631ee9d0   Joerg Roedel   x86, AMD IOMMU: a...
2726
2727
  
  	iommu_detected = 1;
75f1cdf1d   FUJITA Tomonori   x86: Handle HW IO...
2728
  	swiotlb = 0;
6631ee9d0   Joerg Roedel   x86, AMD IOMMU: a...
2729

431b2a201   Joerg Roedel   x86, AMD IOMMU: a...
2730
  	/* Make the driver finally visible to the drivers */
27c2127a1   Joerg Roedel   x86/amd-iommu: Us...
2731
2732
2733
2734
2735
  	unhandled = device_dma_ops_init();
  	if (unhandled && max_pfn > MAX_DMA32_PFN) {
  		/* There are unhandled devices - initialize swiotlb for them */
  		swiotlb = 1;
  	}
6631ee9d0   Joerg Roedel   x86, AMD IOMMU: a...
2736

7f26508bb   Joerg Roedel   AMD IOMMU: add in...
2737
  	amd_iommu_stats_init();
6631ee9d0   Joerg Roedel   x86, AMD IOMMU: a...
2738
2739
2740
  	return 0;
  
  free_domains:
3bd221724   Joerg Roedel   amd-iommu: introd...
2741
  	for_each_iommu(iommu) {
6631ee9d0   Joerg Roedel   x86, AMD IOMMU: a...
2742
2743
2744
2745
2746
2747
  		if (iommu->default_dom)
  			dma_ops_domain_free(iommu->default_dom);
  	}
  
  	return ret;
  }
6d98cd804   Joerg Roedel   AMD IOMMU: add do...
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
  
  /*****************************************************************************
   *
   * The following functions belong to the exported interface of AMD IOMMU
   *
   * This interface allows access to lower level functions of the IOMMU
   * like protection domain handling and assignement of devices to domains
   * which is not possible with the dma_ops interface.
   *
   *****************************************************************************/
6d98cd804   Joerg Roedel   AMD IOMMU: add do...
2758
2759
  static void cleanup_domain(struct protection_domain *domain)
  {
492667dac   Joerg Roedel   x86/amd-iommu: Re...
2760
  	struct iommu_dev_data *dev_data, *next;
6d98cd804   Joerg Roedel   AMD IOMMU: add do...
2761
  	unsigned long flags;
6d98cd804   Joerg Roedel   AMD IOMMU: add do...
2762
2763
  
  	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
492667dac   Joerg Roedel   x86/amd-iommu: Re...
2764
  	list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
ec9e79ef0   Joerg Roedel   x86/amd-iommu: Us...
2765
  		__detach_device(dev_data);
492667dac   Joerg Roedel   x86/amd-iommu: Re...
2766
2767
  		atomic_set(&dev_data->bind, 0);
  	}
6d98cd804   Joerg Roedel   AMD IOMMU: add do...
2768
2769
2770
  
  	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  }
2650815fb   Joerg Roedel   x86/amd-iommu: Ad...
2771
2772
2773
2774
  static void protection_domain_free(struct protection_domain *domain)
  {
  	if (!domain)
  		return;
aeb26f553   Joerg Roedel   x86/amd-iommu: Im...
2775
  	del_domain_from_list(domain);
2650815fb   Joerg Roedel   x86/amd-iommu: Ad...
2776
2777
2778
2779
2780
2781
2782
  	if (domain->id)
  		domain_id_free(domain->id);
  
  	kfree(domain);
  }
  
  static struct protection_domain *protection_domain_alloc(void)
c156e347d   Joerg Roedel   AMD IOMMU: add do...
2783
2784
2785
2786
2787
  {
  	struct protection_domain *domain;
  
  	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
  	if (!domain)
2650815fb   Joerg Roedel   x86/amd-iommu: Ad...
2788
  		return NULL;
c156e347d   Joerg Roedel   AMD IOMMU: add do...
2789
2790
  
  	spin_lock_init(&domain->lock);
5d214fe6e   Joerg Roedel   x86/amd-iommu: Pr...
2791
  	mutex_init(&domain->api_lock);
c156e347d   Joerg Roedel   AMD IOMMU: add do...
2792
2793
  	domain->id = domain_id_alloc();
  	if (!domain->id)
2650815fb   Joerg Roedel   x86/amd-iommu: Ad...
2794
  		goto out_err;
7c392cbe9   Joerg Roedel   x86/amd-iommu: Ke...
2795
  	INIT_LIST_HEAD(&domain->dev_list);
2650815fb   Joerg Roedel   x86/amd-iommu: Ad...
2796

aeb26f553   Joerg Roedel   x86/amd-iommu: Im...
2797
  	add_domain_to_list(domain);
2650815fb   Joerg Roedel   x86/amd-iommu: Ad...
2798
2799
2800
2801
2802
2803
2804
  	return domain;
  
  out_err:
  	kfree(domain);
  
  	return NULL;
  }
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
  static int __init alloc_passthrough_domain(void)
  {
  	if (pt_domain != NULL)
  		return 0;
  
  	/* allocate passthrough domain */
  	pt_domain = protection_domain_alloc();
  	if (!pt_domain)
  		return -ENOMEM;
  
  	pt_domain->mode = PAGE_MODE_NONE;
  
  	return 0;
  }
2650815fb   Joerg Roedel   x86/amd-iommu: Ad...
2819
2820
2821
2822
2823
2824
  static int amd_iommu_domain_init(struct iommu_domain *dom)
  {
  	struct protection_domain *domain;
  
  	domain = protection_domain_alloc();
  	if (!domain)
c156e347d   Joerg Roedel   AMD IOMMU: add do...
2825
  		goto out_free;
2650815fb   Joerg Roedel   x86/amd-iommu: Ad...
2826
2827
  
  	domain->mode    = PAGE_MODE_3_LEVEL;
c156e347d   Joerg Roedel   AMD IOMMU: add do...
2828
2829
2830
  	domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
  	if (!domain->pt_root)
  		goto out_free;
f3572db82   Joerg Roedel   iommu/amd: Add fu...
2831
  	domain->iommu_domain = dom;
c156e347d   Joerg Roedel   AMD IOMMU: add do...
2832
2833
2834
2835
2836
  	dom->priv = domain;
  
  	return 0;
  
  out_free:
2650815fb   Joerg Roedel   x86/amd-iommu: Ad...
2837
  	protection_domain_free(domain);
c156e347d   Joerg Roedel   AMD IOMMU: add do...
2838
2839
2840
  
  	return -ENOMEM;
  }
98383fc30   Joerg Roedel   AMD IOMMU: add do...
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
  static void amd_iommu_domain_destroy(struct iommu_domain *dom)
  {
  	struct protection_domain *domain = dom->priv;
  
  	if (!domain)
  		return;
  
  	if (domain->dev_cnt > 0)
  		cleanup_domain(domain);
  
  	BUG_ON(domain->dev_cnt != 0);
132bd68f1   Joerg Roedel   iommu/amd: Add am...
2852
2853
  	if (domain->mode != PAGE_MODE_NONE)
  		free_pagetable(domain);
98383fc30   Joerg Roedel   AMD IOMMU: add do...
2854

52815b756   Joerg Roedel   iommu/amd: Add su...
2855
2856
  	if (domain->flags & PD_IOMMUV2_MASK)
  		free_gcr3_table(domain);
8b408fe4f   Joerg Roedel   x86/amd-iommu: Us...
2857
  	protection_domain_free(domain);
98383fc30   Joerg Roedel   AMD IOMMU: add do...
2858
2859
2860
  
  	dom->priv = NULL;
  }
684f28888   Joerg Roedel   AMD IOMMU: add de...
2861
2862
2863
  static void amd_iommu_detach_device(struct iommu_domain *dom,
  				    struct device *dev)
  {
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
2864
  	struct iommu_dev_data *dev_data = dev->archdata.iommu;
684f28888   Joerg Roedel   AMD IOMMU: add de...
2865
  	struct amd_iommu *iommu;
684f28888   Joerg Roedel   AMD IOMMU: add de...
2866
  	u16 devid;
98fc5a693   Joerg Roedel   x86/amd-iommu: Us...
2867
  	if (!check_device(dev))
684f28888   Joerg Roedel   AMD IOMMU: add de...
2868
  		return;
98fc5a693   Joerg Roedel   x86/amd-iommu: Us...
2869
  	devid = get_device_id(dev);
684f28888   Joerg Roedel   AMD IOMMU: add de...
2870

657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
2871
  	if (dev_data->domain != NULL)
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2872
  		detach_device(dev);
684f28888   Joerg Roedel   AMD IOMMU: add de...
2873
2874
2875
2876
  
  	iommu = amd_iommu_rlookup_table[devid];
  	if (!iommu)
  		return;
684f28888   Joerg Roedel   AMD IOMMU: add de...
2877
2878
  	iommu_completion_wait(iommu);
  }
01106066a   Joerg Roedel   AMD IOMMU: add de...
2879
2880
2881
2882
  static int amd_iommu_attach_device(struct iommu_domain *dom,
  				   struct device *dev)
  {
  	struct protection_domain *domain = dom->priv;
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
2883
  	struct iommu_dev_data *dev_data;
01106066a   Joerg Roedel   AMD IOMMU: add de...
2884
  	struct amd_iommu *iommu;
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2885
  	int ret;
01106066a   Joerg Roedel   AMD IOMMU: add de...
2886

98fc5a693   Joerg Roedel   x86/amd-iommu: Us...
2887
  	if (!check_device(dev))
01106066a   Joerg Roedel   AMD IOMMU: add de...
2888
  		return -EINVAL;
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
2889
  	dev_data = dev->archdata.iommu;
f62dda66b   Joerg Roedel   x86/amd-iommu: St...
2890
  	iommu = amd_iommu_rlookup_table[dev_data->devid];
01106066a   Joerg Roedel   AMD IOMMU: add de...
2891
2892
  	if (!iommu)
  		return -EINVAL;
657cbb6b6   Joerg Roedel   x86/amd-iommu: Us...
2893
  	if (dev_data->domain)
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2894
  		detach_device(dev);
01106066a   Joerg Roedel   AMD IOMMU: add de...
2895

15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2896
  	ret = attach_device(dev, domain);
01106066a   Joerg Roedel   AMD IOMMU: add de...
2897
2898
  
  	iommu_completion_wait(iommu);
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
2899
  	return ret;
01106066a   Joerg Roedel   AMD IOMMU: add de...
2900
  }
468e2366c   Joerg Roedel   x86/amd-iommu: Im...
2901
  static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
5009065d3   Ohad Ben-Cohen   iommu/core: stop ...
2902
  			 phys_addr_t paddr, size_t page_size, int iommu_prot)
c6229ca64   Joerg Roedel   AMD IOMMU: add do...
2903
2904
  {
  	struct protection_domain *domain = dom->priv;
c6229ca64   Joerg Roedel   AMD IOMMU: add do...
2905
2906
  	int prot = 0;
  	int ret;
132bd68f1   Joerg Roedel   iommu/amd: Add am...
2907
2908
  	if (domain->mode == PAGE_MODE_NONE)
  		return -EINVAL;
c6229ca64   Joerg Roedel   AMD IOMMU: add do...
2909
2910
2911
2912
  	if (iommu_prot & IOMMU_READ)
  		prot |= IOMMU_PROT_IR;
  	if (iommu_prot & IOMMU_WRITE)
  		prot |= IOMMU_PROT_IW;
5d214fe6e   Joerg Roedel   x86/amd-iommu: Pr...
2913
  	mutex_lock(&domain->api_lock);
795e74f7a   Joerg Roedel   Merge branch 'iom...
2914
  	ret = iommu_map_page(domain, iova, paddr, prot, page_size);
5d214fe6e   Joerg Roedel   x86/amd-iommu: Pr...
2915
  	mutex_unlock(&domain->api_lock);
795e74f7a   Joerg Roedel   Merge branch 'iom...
2916
  	return ret;
c6229ca64   Joerg Roedel   AMD IOMMU: add do...
2917
  }
5009065d3   Ohad Ben-Cohen   iommu/core: stop ...
2918
2919
  static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
  			   size_t page_size)
eb74ff6cc   Joerg Roedel   AMD IOMMU: add do...
2920
  {
eb74ff6cc   Joerg Roedel   AMD IOMMU: add do...
2921
  	struct protection_domain *domain = dom->priv;
5009065d3   Ohad Ben-Cohen   iommu/core: stop ...
2922
  	size_t unmap_size;
eb74ff6cc   Joerg Roedel   AMD IOMMU: add do...
2923

132bd68f1   Joerg Roedel   iommu/amd: Add am...
2924
2925
  	if (domain->mode == PAGE_MODE_NONE)
  		return -EINVAL;
5d214fe6e   Joerg Roedel   x86/amd-iommu: Pr...
2926
  	mutex_lock(&domain->api_lock);
468e2366c   Joerg Roedel   x86/amd-iommu: Im...
2927
  	unmap_size = iommu_unmap_page(domain, iova, page_size);
795e74f7a   Joerg Roedel   Merge branch 'iom...
2928
  	mutex_unlock(&domain->api_lock);
eb74ff6cc   Joerg Roedel   AMD IOMMU: add do...
2929

17b124bf1   Joerg Roedel   x86/amd-iommu: Re...
2930
  	domain_flush_tlb_pde(domain);
5d214fe6e   Joerg Roedel   x86/amd-iommu: Pr...
2931

5009065d3   Ohad Ben-Cohen   iommu/core: stop ...
2932
  	return unmap_size;
eb74ff6cc   Joerg Roedel   AMD IOMMU: add do...
2933
  }
645c4c8d7   Joerg Roedel   AMD IOMMU: add do...
2934
2935
2936
2937
  static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
  					  unsigned long iova)
  {
  	struct protection_domain *domain = dom->priv;
f03152bb7   Joerg Roedel   x86/amd-iommu: Ma...
2938
  	unsigned long offset_mask;
645c4c8d7   Joerg Roedel   AMD IOMMU: add do...
2939
  	phys_addr_t paddr;
f03152bb7   Joerg Roedel   x86/amd-iommu: Ma...
2940
  	u64 *pte, __pte;
645c4c8d7   Joerg Roedel   AMD IOMMU: add do...
2941

132bd68f1   Joerg Roedel   iommu/amd: Add am...
2942
2943
  	if (domain->mode == PAGE_MODE_NONE)
  		return iova;
24cd77231   Joerg Roedel   x86/amd-iommu: Ma...
2944
  	pte = fetch_pte(domain, iova);
645c4c8d7   Joerg Roedel   AMD IOMMU: add do...
2945

a6d41a402   Joerg Roedel   x86/amd-iommu: Us...
2946
  	if (!pte || !IOMMU_PTE_PRESENT(*pte))
645c4c8d7   Joerg Roedel   AMD IOMMU: add do...
2947
  		return 0;
f03152bb7   Joerg Roedel   x86/amd-iommu: Ma...
2948
2949
2950
2951
2952
2953
2954
  	if (PM_PTE_LEVEL(*pte) == 0)
  		offset_mask = PAGE_SIZE - 1;
  	else
  		offset_mask = PTE_PAGE_SIZE(*pte) - 1;
  
  	__pte = *pte & PM_ADDR_MASK;
  	paddr = (__pte & ~offset_mask) | (iova & offset_mask);
645c4c8d7   Joerg Roedel   AMD IOMMU: add do...
2955
2956
2957
  
  	return paddr;
  }
dbb9fd863   Sheng Yang   iommu: Add domain...
2958
2959
2960
  static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
  				    unsigned long cap)
  {
80a506b8f   Joerg Roedel   x86/amd-iommu: Ex...
2961
2962
2963
2964
  	switch (cap) {
  	case IOMMU_CAP_CACHE_COHERENCY:
  		return 1;
  	}
dbb9fd863   Sheng Yang   iommu: Add domain...
2965
2966
  	return 0;
  }
8fbdce659   Alex Williamson   iommu/amd: Implem...
2967
2968
2969
  static int amd_iommu_device_group(struct device *dev, unsigned int *groupid)
  {
  	struct iommu_dev_data *dev_data = dev->archdata.iommu;
bcb71abe7   Alex Williamson   iommu: Add option...
2970
2971
  	struct pci_dev *pdev = to_pci_dev(dev);
  	u16 devid;
8fbdce659   Alex Williamson   iommu/amd: Implem...
2972
2973
2974
  
  	if (!dev_data)
  		return -ENODEV;
bcb71abe7   Alex Williamson   iommu: Add option...
2975
2976
2977
2978
2979
2980
2981
  	if (pdev->is_virtfn || !iommu_group_mf)
  		devid = dev_data->devid;
  	else
  		devid = calc_devid(pdev->bus->number,
  				   PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
  
  	*groupid = amd_iommu_alias_table[devid];
8fbdce659   Alex Williamson   iommu/amd: Implem...
2982
2983
2984
  
  	return 0;
  }
26961efe0   Joerg Roedel   AMD IOMMU: regist...
2985
2986
2987
2988
2989
  static struct iommu_ops amd_iommu_ops = {
  	.domain_init = amd_iommu_domain_init,
  	.domain_destroy = amd_iommu_domain_destroy,
  	.attach_dev = amd_iommu_attach_device,
  	.detach_dev = amd_iommu_detach_device,
468e2366c   Joerg Roedel   x86/amd-iommu: Im...
2990
2991
  	.map = amd_iommu_map,
  	.unmap = amd_iommu_unmap,
26961efe0   Joerg Roedel   AMD IOMMU: regist...
2992
  	.iova_to_phys = amd_iommu_iova_to_phys,
dbb9fd863   Sheng Yang   iommu: Add domain...
2993
  	.domain_has_cap = amd_iommu_domain_has_cap,
8fbdce659   Alex Williamson   iommu/amd: Implem...
2994
  	.device_group = amd_iommu_device_group,
aa3de9c05   Ohad Ben-Cohen   iommu/amd: announ...
2995
  	.pgsize_bitmap	= AMD_IOMMU_PGSIZES,
26961efe0   Joerg Roedel   AMD IOMMU: regist...
2996
  };
0feae533d   Joerg Roedel   x86/amd-iommu: Ad...
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
  /*****************************************************************************
   *
   * The next functions do a basic initialization of IOMMU for pass through
   * mode
   *
   * In passthrough mode the IOMMU is initialized and enabled but not used for
   * DMA-API translation.
   *
   *****************************************************************************/
  
  int __init amd_iommu_init_passthrough(void)
  {
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
3009
  	struct iommu_dev_data *dev_data;
0feae533d   Joerg Roedel   x86/amd-iommu: Ad...
3010
  	struct pci_dev *dev = NULL;
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
3011
  	struct amd_iommu *iommu;
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
3012
  	u16 devid;
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
3013
  	int ret;
0feae533d   Joerg Roedel   x86/amd-iommu: Ad...
3014

5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
3015
3016
3017
  	ret = alloc_passthrough_domain();
  	if (ret)
  		return ret;
0feae533d   Joerg Roedel   x86/amd-iommu: Ad...
3018

6c54aabd5   Kulikov Vasiliy   x86/amd-iommu: Us...
3019
  	for_each_pci_dev(dev) {
98fc5a693   Joerg Roedel   x86/amd-iommu: Us...
3020
  		if (!check_device(&dev->dev))
0feae533d   Joerg Roedel   x86/amd-iommu: Ad...
3021
  			continue;
5abcdba4f   Joerg Roedel   iommu/amd: Put IO...
3022
3023
  		dev_data = get_dev_data(&dev->dev);
  		dev_data->passthrough = true;
98fc5a693   Joerg Roedel   x86/amd-iommu: Us...
3024
  		devid = get_device_id(&dev->dev);
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
3025
  		iommu = amd_iommu_rlookup_table[devid];
0feae533d   Joerg Roedel   x86/amd-iommu: Ad...
3026
3027
  		if (!iommu)
  			continue;
15898bbcb   Joerg Roedel   x86/amd-iommu: Le...
3028
  		attach_device(&dev->dev, pt_domain);
0feae533d   Joerg Roedel   x86/amd-iommu: Ad...
3029
  	}
2655d7a29   Joerg Roedel   iommu/amd: Init s...
3030
  	amd_iommu_stats_init();
0feae533d   Joerg Roedel   x86/amd-iommu: Ad...
3031
3032
3033
3034
3035
  	pr_info("AMD-Vi: Initialized for Passthrough Mode
  ");
  
  	return 0;
  }
72e1dcc41   Joerg Roedel   iommu/amd: Implem...
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
  
  /* IOMMUv2 specific functions */
  int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
  {
  	return atomic_notifier_chain_register(&ppr_notifier, nb);
  }
  EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
  
  int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
  {
  	return atomic_notifier_chain_unregister(&ppr_notifier, nb);
  }
  EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
132bd68f1   Joerg Roedel   iommu/amd: Add am...
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
  
  void amd_iommu_domain_direct_map(struct iommu_domain *dom)
  {
  	struct protection_domain *domain = dom->priv;
  	unsigned long flags;
  
  	spin_lock_irqsave(&domain->lock, flags);
  
  	/* Update data structure */
  	domain->mode    = PAGE_MODE_NONE;
  	domain->updated = true;
  
  	/* Make changes visible to IOMMUs */
  	update_domain(domain);
  
  	/* Page-table is not visible to IOMMU anymore, so free it */
  	free_pagetable(domain);
  
  	spin_unlock_irqrestore(&domain->lock, flags);
  }
  EXPORT_SYMBOL(amd_iommu_domain_direct_map);
52815b756   Joerg Roedel   iommu/amd: Add su...
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
  
  int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
  {
  	struct protection_domain *domain = dom->priv;
  	unsigned long flags;
  	int levels, ret;
  
  	if (pasids <= 0 || pasids > (PASID_MASK + 1))
  		return -EINVAL;
  
  	/* Number of GCR3 table levels required */
  	for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
  		levels += 1;
  
  	if (levels > amd_iommu_max_glx_val)
  		return -EINVAL;
  
  	spin_lock_irqsave(&domain->lock, flags);
  
  	/*
  	 * Save us all sanity checks whether devices already in the
  	 * domain support IOMMUv2. Just force that the domain has no
  	 * devices attached when it is switched into IOMMUv2 mode.
  	 */
  	ret = -EBUSY;
  	if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
  		goto out;
  
  	ret = -ENOMEM;
  	domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
  	if (domain->gcr3_tbl == NULL)
  		goto out;
  
  	domain->glx      = levels;
  	domain->flags   |= PD_IOMMUV2_MASK;
  	domain->updated  = true;
  
  	update_domain(domain);
  
  	ret = 0;
  
  out:
  	spin_unlock_irqrestore(&domain->lock, flags);
  
  	return ret;
  }
  EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
22e266c79   Joerg Roedel   iommu/amd: Implem...
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
  
  static int __flush_pasid(struct protection_domain *domain, int pasid,
  			 u64 address, bool size)
  {
  	struct iommu_dev_data *dev_data;
  	struct iommu_cmd cmd;
  	int i, ret;
  
  	if (!(domain->flags & PD_IOMMUV2_MASK))
  		return -EINVAL;
  
  	build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
  
  	/*
  	 * IOMMU TLB needs to be flushed before Device TLB to
  	 * prevent device TLB refill from IOMMU TLB
  	 */
  	for (i = 0; i < amd_iommus_present; ++i) {
  		if (domain->dev_iommu[i] == 0)
  			continue;
  
  		ret = iommu_queue_command(amd_iommus[i], &cmd);
  		if (ret != 0)
  			goto out;
  	}
  
  	/* Wait until IOMMU TLB flushes are complete */
  	domain_flush_complete(domain);
  
  	/* Now flush device TLBs */
  	list_for_each_entry(dev_data, &domain->dev_list, list) {
  		struct amd_iommu *iommu;
  		int qdep;
  
  		BUG_ON(!dev_data->ats.enabled);
  
  		qdep  = dev_data->ats.qdep;
  		iommu = amd_iommu_rlookup_table[dev_data->devid];
  
  		build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
  				      qdep, address, size);
  
  		ret = iommu_queue_command(iommu, &cmd);
  		if (ret != 0)
  			goto out;
  	}
  
  	/* Wait until all device TLBs are flushed */
  	domain_flush_complete(domain);
  
  	ret = 0;
  
  out:
  
  	return ret;
  }
  
  static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
  				  u64 address)
  {
399be2f51   Joerg Roedel   iommu/amd: Add st...
3177
  	INC_STATS_COUNTER(invalidate_iotlb);
22e266c79   Joerg Roedel   iommu/amd: Implem...
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
  	return __flush_pasid(domain, pasid, address, false);
  }
  
  int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
  			 u64 address)
  {
  	struct protection_domain *domain = dom->priv;
  	unsigned long flags;
  	int ret;
  
  	spin_lock_irqsave(&domain->lock, flags);
  	ret = __amd_iommu_flush_page(domain, pasid, address);
  	spin_unlock_irqrestore(&domain->lock, flags);
  
  	return ret;
  }
  EXPORT_SYMBOL(amd_iommu_flush_page);
  
  static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
  {
399be2f51   Joerg Roedel   iommu/amd: Add st...
3198
  	INC_STATS_COUNTER(invalidate_iotlb_all);
22e266c79   Joerg Roedel   iommu/amd: Implem...
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
  	return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
  			     true);
  }
  
  int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
  {
  	struct protection_domain *domain = dom->priv;
  	unsigned long flags;
  	int ret;
  
  	spin_lock_irqsave(&domain->lock, flags);
  	ret = __amd_iommu_flush_tlb(domain, pasid);
  	spin_unlock_irqrestore(&domain->lock, flags);
  
  	return ret;
  }
  EXPORT_SYMBOL(amd_iommu_flush_tlb);
b16137b11   Joerg Roedel   iommu/amd: Implem...
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
  static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
  {
  	int index;
  	u64 *pte;
  
  	while (true) {
  
  		index = (pasid >> (9 * level)) & 0x1ff;
  		pte   = &root[index];
  
  		if (level == 0)
  			break;
  
  		if (!(*pte & GCR3_VALID)) {
  			if (!alloc)
  				return NULL;
  
  			root = (void *)get_zeroed_page(GFP_ATOMIC);
  			if (root == NULL)
  				return NULL;
  
  			*pte = __pa(root) | GCR3_VALID;
  		}
  
  		root = __va(*pte & PAGE_MASK);
  
  		level -= 1;
  	}
  
  	return pte;
  }
  
  static int __set_gcr3(struct protection_domain *domain, int pasid,
  		      unsigned long cr3)
  {
  	u64 *pte;
  
  	if (domain->mode != PAGE_MODE_NONE)
  		return -EINVAL;
  
  	pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
  	if (pte == NULL)
  		return -ENOMEM;
  
  	*pte = (cr3 & PAGE_MASK) | GCR3_VALID;
  
  	return __amd_iommu_flush_tlb(domain, pasid);
  }
  
  static int __clear_gcr3(struct protection_domain *domain, int pasid)
  {
  	u64 *pte;
  
  	if (domain->mode != PAGE_MODE_NONE)
  		return -EINVAL;
  
  	pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
  	if (pte == NULL)
  		return 0;
  
  	*pte = 0;
  
  	return __amd_iommu_flush_tlb(domain, pasid);
  }
  
  int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
  			      unsigned long cr3)
  {
  	struct protection_domain *domain = dom->priv;
  	unsigned long flags;
  	int ret;
  
  	spin_lock_irqsave(&domain->lock, flags);
  	ret = __set_gcr3(domain, pasid, cr3);
  	spin_unlock_irqrestore(&domain->lock, flags);
  
  	return ret;
  }
  EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
  
  int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
  {
  	struct protection_domain *domain = dom->priv;
  	unsigned long flags;
  	int ret;
  
  	spin_lock_irqsave(&domain->lock, flags);
  	ret = __clear_gcr3(domain, pasid);
  	spin_unlock_irqrestore(&domain->lock, flags);
  
  	return ret;
  }
  EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
c99afa25b   Joerg Roedel   iommu/amd: Implem...
3309
3310
3311
3312
3313
3314
3315
  
  int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
  			   int status, int tag)
  {
  	struct iommu_dev_data *dev_data;
  	struct amd_iommu *iommu;
  	struct iommu_cmd cmd;
399be2f51   Joerg Roedel   iommu/amd: Add st...
3316
  	INC_STATS_COUNTER(complete_ppr);
c99afa25b   Joerg Roedel   iommu/amd: Implem...
3317
3318
3319
3320
3321
3322
3323
3324
3325
  	dev_data = get_dev_data(&pdev->dev);
  	iommu    = amd_iommu_rlookup_table[dev_data->devid];
  
  	build_complete_ppr(&cmd, dev_data->devid, pasid, status,
  			   tag, dev_data->pri_tlp);
  
  	return iommu_queue_command(iommu, &cmd);
  }
  EXPORT_SYMBOL(amd_iommu_complete_ppr);
f3572db82   Joerg Roedel   iommu/amd: Add fu...
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
  
  struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
  {
  	struct protection_domain *domain;
  
  	domain = get_domain(&pdev->dev);
  	if (IS_ERR(domain))
  		return NULL;
  
  	/* Only return IOMMUv2 domains */
  	if (!(domain->flags & PD_IOMMUV2_MASK))
  		return NULL;
  
  	return domain->iommu_domain;
  }
  EXPORT_SYMBOL(amd_iommu_get_v2_domain);
6a113ddc0   Joerg Roedel   iommu/amd: Add de...
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
  
  void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
  {
  	struct iommu_dev_data *dev_data;
  
  	if (!amd_iommu_v2_supported())
  		return;
  
  	dev_data = get_dev_data(&pdev->dev);
  	dev_data->errata |= (1 << erratum);
  }
  EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
52efdb89d   Joerg Roedel   iommu/amd: Add am...
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
  
  int amd_iommu_device_info(struct pci_dev *pdev,
                            struct amd_iommu_device_info *info)
  {
  	int max_pasids;
  	int pos;
  
  	if (pdev == NULL || info == NULL)
  		return -EINVAL;
  
  	if (!amd_iommu_v2_supported())
  		return -EINVAL;
  
  	memset(info, 0, sizeof(*info));
  
  	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
  	if (pos)
  		info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
  
  	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
  	if (pos)
  		info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
  
  	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
  	if (pos) {
  		int features;
  
  		max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
  		max_pasids = min(max_pasids, (1 << 20));
  
  		info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
  		info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
  
  		features = pci_pasid_features(pdev);
  		if (features & PCI_PASID_CAP_EXEC)
  			info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
  		if (features & PCI_PASID_CAP_PRIV)
  			info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
  	}
  
  	return 0;
  }
  EXPORT_SYMBOL(amd_iommu_device_info);