Blame view

drivers/iommu/s390-iommu.c 9.28 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
2
3
4
5
6
7
8
9
10
11
  /*
   * IOMMU API for s390 PCI devices
   *
   * Copyright IBM Corp. 2015
   * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
   */
  
  #include <linux/pci.h>
  #include <linux/iommu.h>
  #include <linux/iommu-helper.h>
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
12
13
14
15
16
17
18
19
20
  #include <linux/sizes.h>
  #include <asm/pci_dma.h>
  
  /*
   * Physically contiguous memory regions can be mapped with 4 KiB alignment,
   * we allow all page sizes that are an order of 4KiB (no special large page
   * support so far).
   */
  #define S390_IOMMU_PGSIZES	(~0xFFFUL)
cceb84519   Arvind Yadav   iommu/s390: Const...
21
  static const struct iommu_ops s390_iommu_ops;
f42c22351   Joerg Roedel   iommu/s390: Add s...
22

8128f23c4   Gerald Schaefer   iommu/s390: Add i...
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
  struct s390_domain {
  	struct iommu_domain	domain;
  	struct list_head	devices;
  	unsigned long		*dma_table;
  	spinlock_t		dma_table_lock;
  	spinlock_t		list_lock;
  };
  
  struct s390_domain_device {
  	struct list_head	list;
  	struct zpci_dev		*zdev;
  };
  
  static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
  {
  	return container_of(dom, struct s390_domain, domain);
  }
  
  static bool s390_iommu_capable(enum iommu_cap cap)
  {
  	switch (cap) {
  	case IOMMU_CAP_CACHE_COHERENCY:
  		return true;
  	case IOMMU_CAP_INTR_REMAP:
  		return true;
  	default:
  		return false;
  	}
  }
7cd757878   Sebastian Ott   iommu/s390: Fix s...
52
  static struct iommu_domain *s390_domain_alloc(unsigned domain_type)
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
  {
  	struct s390_domain *s390_domain;
  
  	if (domain_type != IOMMU_DOMAIN_UNMANAGED)
  		return NULL;
  
  	s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
  	if (!s390_domain)
  		return NULL;
  
  	s390_domain->dma_table = dma_alloc_cpu_table();
  	if (!s390_domain->dma_table) {
  		kfree(s390_domain);
  		return NULL;
  	}
  
  	spin_lock_init(&s390_domain->dma_table_lock);
  	spin_lock_init(&s390_domain->list_lock);
  	INIT_LIST_HEAD(&s390_domain->devices);
  
  	return &s390_domain->domain;
  }
7cd757878   Sebastian Ott   iommu/s390: Fix s...
75
  static void s390_domain_free(struct iommu_domain *domain)
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
76
77
78
79
80
81
82
83
84
85
86
  {
  	struct s390_domain *s390_domain = to_s390_domain(domain);
  
  	dma_cleanup_tables(s390_domain->dma_table);
  	kfree(s390_domain);
  }
  
  static int s390_iommu_attach_device(struct iommu_domain *domain,
  				    struct device *dev)
  {
  	struct s390_domain *s390_domain = to_s390_domain(domain);
d08d6f5d7   Pierre Morel   s390/pci: adaptat...
87
  	struct zpci_dev *zdev = to_zpci_dev(dev);
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
  	struct s390_domain_device *domain_device;
  	unsigned long flags;
  	int rc;
  
  	if (!zdev)
  		return -ENODEV;
  
  	domain_device = kzalloc(sizeof(*domain_device), GFP_KERNEL);
  	if (!domain_device)
  		return -ENOMEM;
  
  	if (zdev->dma_table)
  		zpci_dma_exit_device(zdev);
  
  	zdev->dma_table = s390_domain->dma_table;
bb2b7ffbc   Sebastian Ott   iommu/s390: simpl...
103
  	rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
  				(u64) zdev->dma_table);
  	if (rc)
  		goto out_restore;
  
  	spin_lock_irqsave(&s390_domain->list_lock, flags);
  	/* First device defines the DMA range limits */
  	if (list_empty(&s390_domain->devices)) {
  		domain->geometry.aperture_start = zdev->start_dma;
  		domain->geometry.aperture_end = zdev->end_dma;
  		domain->geometry.force_aperture = true;
  	/* Allow only devices with identical DMA range limits */
  	} else if (domain->geometry.aperture_start != zdev->start_dma ||
  		   domain->geometry.aperture_end != zdev->end_dma) {
  		rc = -EINVAL;
  		spin_unlock_irqrestore(&s390_domain->list_lock, flags);
  		goto out_restore;
  	}
  	domain_device->zdev = zdev;
  	zdev->s390_domain = s390_domain;
  	list_add(&domain_device->list, &s390_domain->devices);
  	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
  
  	return 0;
  
  out_restore:
  	zpci_dma_init_device(zdev);
  	kfree(domain_device);
  
  	return rc;
  }
  
  static void s390_iommu_detach_device(struct iommu_domain *domain,
  				     struct device *dev)
  {
  	struct s390_domain *s390_domain = to_s390_domain(domain);
d08d6f5d7   Pierre Morel   s390/pci: adaptat...
139
  	struct zpci_dev *zdev = to_zpci_dev(dev);
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
  	struct s390_domain_device *domain_device, *tmp;
  	unsigned long flags;
  	int found = 0;
  
  	if (!zdev)
  		return;
  
  	spin_lock_irqsave(&s390_domain->list_lock, flags);
  	list_for_each_entry_safe(domain_device, tmp, &s390_domain->devices,
  				 list) {
  		if (domain_device->zdev == zdev) {
  			list_del(&domain_device->list);
  			kfree(domain_device);
  			found = 1;
  			break;
  		}
  	}
  	spin_unlock_irqrestore(&s390_domain->list_lock, flags);
  
  	if (found) {
  		zdev->s390_domain = NULL;
  		zpci_unregister_ioat(zdev, 0);
  		zpci_dma_init_device(zdev);
  	}
  }
522af649e   Joerg Roedel   iommu/s390: Conve...
165
  static struct iommu_device *s390_iommu_probe_device(struct device *dev)
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
166
  {
d08d6f5d7   Pierre Morel   s390/pci: adaptat...
167
  	struct zpci_dev *zdev = to_zpci_dev(dev);
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
168

522af649e   Joerg Roedel   iommu/s390: Conve...
169
  	return &zdev->iommu_dev;
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
170
  }
522af649e   Joerg Roedel   iommu/s390: Conve...
171
  static void s390_iommu_release_device(struct device *dev)
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
172
  {
d08d6f5d7   Pierre Morel   s390/pci: adaptat...
173
  	struct zpci_dev *zdev = to_zpci_dev(dev);
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
174
175
176
177
178
179
180
181
  	struct iommu_domain *domain;
  
  	/*
  	 * This is a workaround for a scenario where the IOMMU API common code
  	 * "forgets" to call the detach_dev callback: After binding a device
  	 * to vfio-pci and completing the VFIO_SET_IOMMU ioctl (which triggers
  	 * the attach_dev), removing the device via
  	 * "echo 1 > /sys/bus/pci/devices/.../remove" won't trigger detach_dev,
522af649e   Joerg Roedel   iommu/s390: Conve...
182
  	 * only release_device will be called via the BUS_NOTIFY_REMOVED_DEVICE
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
183
184
185
186
187
188
189
190
191
  	 * notifier.
  	 *
  	 * So let's call detach_dev from here if it hasn't been called before.
  	 */
  	if (zdev && zdev->s390_domain) {
  		domain = iommu_get_domain_for_dev(dev);
  		if (domain)
  			s390_iommu_detach_device(domain, dev);
  	}
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
192
193
194
195
196
197
198
199
200
201
  }
  
  static int s390_iommu_update_trans(struct s390_domain *s390_domain,
  				   unsigned long pa, dma_addr_t dma_addr,
  				   size_t size, int flags)
  {
  	struct s390_domain_device *domain_device;
  	u8 *page_addr = (u8 *) (pa & PAGE_MASK);
  	dma_addr_t start_dma_addr = dma_addr;
  	unsigned long irq_flags, nr_pages, i;
66728eeea   Sebastian Ott   s390/pci_dma: han...
202
  	unsigned long *entry;
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
203
204
205
206
207
208
209
210
211
212
213
214
  	int rc = 0;
  
  	if (dma_addr < s390_domain->domain.geometry.aperture_start ||
  	    dma_addr + size > s390_domain->domain.geometry.aperture_end)
  		return -EINVAL;
  
  	nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
  	if (!nr_pages)
  		return 0;
  
  	spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags);
  	for (i = 0; i < nr_pages; i++) {
66728eeea   Sebastian Ott   s390/pci_dma: han...
215
216
217
218
219
220
  		entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
  		if (!entry) {
  			rc = -ENOMEM;
  			goto undo_cpu_trans;
  		}
  		dma_update_cpu_trans(entry, page_addr, flags);
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
221
222
223
224
225
226
227
228
229
230
231
232
  		page_addr += PAGE_SIZE;
  		dma_addr += PAGE_SIZE;
  	}
  
  	spin_lock(&s390_domain->list_lock);
  	list_for_each_entry(domain_device, &s390_domain->devices, list) {
  		rc = zpci_refresh_trans((u64) domain_device->zdev->fh << 32,
  					start_dma_addr, nr_pages * PAGE_SIZE);
  		if (rc)
  			break;
  	}
  	spin_unlock(&s390_domain->list_lock);
66728eeea   Sebastian Ott   s390/pci_dma: han...
233
234
235
236
237
238
239
240
241
242
243
244
245
246
  
  undo_cpu_trans:
  	if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
  		flags = ZPCI_PTE_INVALID;
  		while (i-- > 0) {
  			page_addr -= PAGE_SIZE;
  			dma_addr -= PAGE_SIZE;
  			entry = dma_walk_cpu_trans(s390_domain->dma_table,
  						   dma_addr);
  			if (!entry)
  				break;
  			dma_update_cpu_trans(entry, page_addr, flags);
  		}
  	}
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
247
248
249
250
251
252
  	spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags);
  
  	return rc;
  }
  
  static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
781ca2de8   Tom Murphy   iommu: Add gfp pa...
253
  			  phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
  {
  	struct s390_domain *s390_domain = to_s390_domain(domain);
  	int flags = ZPCI_PTE_VALID, rc = 0;
  
  	if (!(prot & IOMMU_READ))
  		return -EINVAL;
  
  	if (!(prot & IOMMU_WRITE))
  		flags |= ZPCI_TABLE_PROTECTED;
  
  	rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova,
  				     size, flags);
  
  	return rc;
  }
  
  static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain,
  					   dma_addr_t iova)
  {
  	struct s390_domain *s390_domain = to_s390_domain(domain);
  	unsigned long *sto, *pto, *rto, flags;
  	unsigned int rtx, sx, px;
  	phys_addr_t phys = 0;
  
  	if (iova < domain->geometry.aperture_start ||
  	    iova > domain->geometry.aperture_end)
  		return 0;
  
  	rtx = calc_rtx(iova);
  	sx = calc_sx(iova);
  	px = calc_px(iova);
  	rto = s390_domain->dma_table;
  
  	spin_lock_irqsave(&s390_domain->dma_table_lock, flags);
  	if (rto && reg_entry_isvalid(rto[rtx])) {
  		sto = get_rt_sto(rto[rtx]);
  		if (sto && reg_entry_isvalid(sto[sx])) {
  			pto = get_st_pto(sto[sx]);
  			if (pto && pt_entry_isvalid(pto[px]))
  				phys = pto[px] & ZPCI_PTE_ADDR_MASK;
  		}
  	}
  	spin_unlock_irqrestore(&s390_domain->dma_table_lock, flags);
  
  	return phys;
  }
  
  static size_t s390_iommu_unmap(struct iommu_domain *domain,
56f8af5e9   Will Deacon   iommu: Pass struc...
302
303
  			       unsigned long iova, size_t size,
  			       struct iommu_iotlb_gather *gather)
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
  {
  	struct s390_domain *s390_domain = to_s390_domain(domain);
  	int flags = ZPCI_PTE_INVALID;
  	phys_addr_t paddr;
  	int rc;
  
  	paddr = s390_iommu_iova_to_phys(domain, iova);
  	if (!paddr)
  		return 0;
  
  	rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova,
  				     size, flags);
  	if (rc)
  		return 0;
  
  	return size;
  }
f42c22351   Joerg Roedel   iommu/s390: Add s...
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
  int zpci_init_iommu(struct zpci_dev *zdev)
  {
  	int rc = 0;
  
  	rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
  				    "s390-iommu.%08x", zdev->fid);
  	if (rc)
  		goto out_err;
  
  	iommu_device_set_ops(&zdev->iommu_dev, &s390_iommu_ops);
  
  	rc = iommu_device_register(&zdev->iommu_dev);
  	if (rc)
  		goto out_sysfs;
  
  	return 0;
  
  out_sysfs:
  	iommu_device_sysfs_remove(&zdev->iommu_dev);
  
  out_err:
  	return rc;
  }
  
  void zpci_destroy_iommu(struct zpci_dev *zdev)
  {
  	iommu_device_unregister(&zdev->iommu_dev);
  	iommu_device_sysfs_remove(&zdev->iommu_dev);
  }
cceb84519   Arvind Yadav   iommu/s390: Const...
350
  static const struct iommu_ops s390_iommu_ops = {
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
351
352
353
354
355
356
357
358
  	.capable = s390_iommu_capable,
  	.domain_alloc = s390_domain_alloc,
  	.domain_free = s390_domain_free,
  	.attach_dev = s390_iommu_attach_device,
  	.detach_dev = s390_iommu_detach_device,
  	.map = s390_iommu_map,
  	.unmap = s390_iommu_unmap,
  	.iova_to_phys = s390_iommu_iova_to_phys,
522af649e   Joerg Roedel   iommu/s390: Conve...
359
360
  	.probe_device = s390_iommu_probe_device,
  	.release_device = s390_iommu_release_device,
0929deca4   Joerg Roedel   iommu/s390: Use i...
361
  	.device_group = generic_device_group,
8128f23c4   Gerald Schaefer   iommu/s390: Add i...
362
363
364
365
366
367
368
369
  	.pgsize_bitmap = S390_IOMMU_PGSIZES,
  };
  
  static int __init s390_iommu_init(void)
  {
  	return bus_set_iommu(&pci_bus_type, &s390_iommu_ops);
  }
  subsys_initcall(s390_iommu_init);