Blame view

drivers/iommu/dma-iommu.c 35.6 KB
caab277b1   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-only
0db2e5d18   Robin Murphy   iommu: Implement ...
2
3
4
5
6
7
8
  /*
   * A fairly generic DMA-API to IOMMU-API glue layer.
   *
   * Copyright (C) 2014-2015 ARM Ltd.
   *
   * based in part on arch/arm/mm/dma-mapping.c:
   * Copyright (C) 2000-2004 Russell King
0db2e5d18   Robin Murphy   iommu: Implement ...
9
   */
f51dc8926   Shameer Kolothum   iommu/dma: Add HW...
10
  #include <linux/acpi_iort.h>
0db2e5d18   Robin Murphy   iommu: Implement ...
11
  #include <linux/device.h>
0a0f0d8be   Christoph Hellwig   dma-mapping: spli...
12
  #include <linux/dma-map-ops.h>
0db2e5d18   Robin Murphy   iommu: Implement ...
13
  #include <linux/dma-iommu.h>
5b11e9cd4   Robin Murphy   iommu/dma: Add so...
14
  #include <linux/gfp.h>
0db2e5d18   Robin Murphy   iommu: Implement ...
15
16
17
  #include <linux/huge_mm.h>
  #include <linux/iommu.h>
  #include <linux/iova.h>
44bb7e243   Robin Murphy   iommu/dma: Add su...
18
  #include <linux/irq.h>
0db2e5d18   Robin Murphy   iommu: Implement ...
19
  #include <linux/mm.h>
c18647900   Robin Murphy   iommu/dma: Relax ...
20
  #include <linux/mutex.h>
fade1ec05   Robin Murphy   iommu/dma: Avoid ...
21
  #include <linux/pci.h>
5b11e9cd4   Robin Murphy   iommu/dma: Add so...
22
23
  #include <linux/scatterlist.h>
  #include <linux/vmalloc.h>
795bbbb9b   Tom Murphy   iommu/dma-iommu: ...
24
  #include <linux/crash_dump.h>
0db2e5d18   Robin Murphy   iommu: Implement ...
25

44bb7e243   Robin Murphy   iommu/dma: Add su...
26
27
28
29
30
  struct iommu_dma_msi_page {
  	struct list_head	list;
  	dma_addr_t		iova;
  	phys_addr_t		phys;
  };
fdbe574eb   Robin Murphy   iommu/dma: Allow ...
31
32
33
34
  enum iommu_dma_cookie_type {
  	IOMMU_DMA_IOVA_COOKIE,
  	IOMMU_DMA_MSI_COOKIE,
  };
44bb7e243   Robin Murphy   iommu/dma: Add su...
35
  struct iommu_dma_cookie {
fdbe574eb   Robin Murphy   iommu/dma: Allow ...
36
37
38
39
40
41
42
43
  	enum iommu_dma_cookie_type	type;
  	union {
  		/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
  		struct iova_domain	iovad;
  		/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
  		dma_addr_t		msi_iova;
  	};
  	struct list_head		msi_page_list;
2da274cdf   Zhen Lei   iommu/dma: Add su...
44
45
46
  
  	/* Domain for flush queue callback; NULL if flush queue not in use */
  	struct iommu_domain		*fq_domain;
44bb7e243   Robin Murphy   iommu/dma: Add su...
47
  };
fdbe574eb   Robin Murphy   iommu/dma: Allow ...
48
49
50
51
52
53
  static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
  {
  	if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
  		return cookie->iovad.granule;
  	return PAGE_SIZE;
  }
fdbe574eb   Robin Murphy   iommu/dma: Allow ...
54
55
56
57
58
59
  static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
  {
  	struct iommu_dma_cookie *cookie;
  
  	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
  	if (cookie) {
fdbe574eb   Robin Murphy   iommu/dma: Allow ...
60
61
62
63
  		INIT_LIST_HEAD(&cookie->msi_page_list);
  		cookie->type = type;
  	}
  	return cookie;
44bb7e243   Robin Murphy   iommu/dma: Add su...
64
  }
0db2e5d18   Robin Murphy   iommu: Implement ...
65
66
67
68
69
70
71
72
73
  /**
   * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
   * @domain: IOMMU domain to prepare for DMA-API usage
   *
   * IOMMU drivers should normally call this from their domain_alloc
   * callback when domain->type == IOMMU_DOMAIN_DMA.
   */
  int iommu_get_dma_cookie(struct iommu_domain *domain)
  {
fdbe574eb   Robin Murphy   iommu/dma: Allow ...
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
  	if (domain->iova_cookie)
  		return -EEXIST;
  
  	domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
  	if (!domain->iova_cookie)
  		return -ENOMEM;
  
  	return 0;
  }
  EXPORT_SYMBOL(iommu_get_dma_cookie);
  
  /**
   * iommu_get_msi_cookie - Acquire just MSI remapping resources
   * @domain: IOMMU domain to prepare
   * @base: Start address of IOVA region for MSI mappings
   *
   * Users who manage their own IOVA allocation and do not want DMA API support,
   * but would still like to take advantage of automatic MSI remapping, can use
   * this to initialise their own domain appropriately. Users should reserve a
   * contiguous IOVA region, starting at @base, large enough to accommodate the
   * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
   * used by the devices attached to @domain.
   */
  int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
  {
44bb7e243   Robin Murphy   iommu/dma: Add su...
99
  	struct iommu_dma_cookie *cookie;
0db2e5d18   Robin Murphy   iommu: Implement ...
100

fdbe574eb   Robin Murphy   iommu/dma: Allow ...
101
102
  	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
  		return -EINVAL;
0db2e5d18   Robin Murphy   iommu: Implement ...
103
104
  	if (domain->iova_cookie)
  		return -EEXIST;
fdbe574eb   Robin Murphy   iommu/dma: Allow ...
105
  	cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
44bb7e243   Robin Murphy   iommu/dma: Add su...
106
107
  	if (!cookie)
  		return -ENOMEM;
0db2e5d18   Robin Murphy   iommu: Implement ...
108

fdbe574eb   Robin Murphy   iommu/dma: Allow ...
109
  	cookie->msi_iova = base;
44bb7e243   Robin Murphy   iommu/dma: Add su...
110
111
  	domain->iova_cookie = cookie;
  	return 0;
0db2e5d18   Robin Murphy   iommu: Implement ...
112
  }
fdbe574eb   Robin Murphy   iommu/dma: Allow ...
113
  EXPORT_SYMBOL(iommu_get_msi_cookie);
0db2e5d18   Robin Murphy   iommu: Implement ...
114
115
116
  
  /**
   * iommu_put_dma_cookie - Release a domain's DMA mapping resources
fdbe574eb   Robin Murphy   iommu/dma: Allow ...
117
118
   * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
   *          iommu_get_msi_cookie()
0db2e5d18   Robin Murphy   iommu: Implement ...
119
120
121
122
123
   *
   * IOMMU drivers should normally call this from their domain_free callback.
   */
  void iommu_put_dma_cookie(struct iommu_domain *domain)
  {
44bb7e243   Robin Murphy   iommu/dma: Add su...
124
125
  	struct iommu_dma_cookie *cookie = domain->iova_cookie;
  	struct iommu_dma_msi_page *msi, *tmp;
0db2e5d18   Robin Murphy   iommu: Implement ...
126

44bb7e243   Robin Murphy   iommu/dma: Add su...
127
  	if (!cookie)
0db2e5d18   Robin Murphy   iommu: Implement ...
128
  		return;
fdbe574eb   Robin Murphy   iommu/dma: Allow ...
129
  	if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
44bb7e243   Robin Murphy   iommu/dma: Add su...
130
131
132
133
134
135
136
  		put_iova_domain(&cookie->iovad);
  
  	list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
  		list_del(&msi->list);
  		kfree(msi);
  	}
  	kfree(cookie);
0db2e5d18   Robin Murphy   iommu: Implement ...
137
138
139
  	domain->iova_cookie = NULL;
  }
  EXPORT_SYMBOL(iommu_put_dma_cookie);
273df9635   Robin Murphy   iommu/dma: Make P...
140
141
142
143
144
145
  /**
   * iommu_dma_get_resv_regions - Reserved region driver helper
   * @dev: Device from iommu_get_resv_regions()
   * @list: Reserved region list from iommu_get_resv_regions()
   *
   * IOMMU drivers can use this to implement their .get_resv_regions callback
cd2c9fcf5   Shameer Kolothum   iommu/dma: Move P...
146
147
148
   * for general non-IOMMU-specific reservations. Currently, this covers GICv3
   * ITS region reservation on ACPI based ARM platforms that may require HW MSI
   * reservation.
273df9635   Robin Murphy   iommu/dma: Make P...
149
150
   */
  void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
fade1ec05   Robin Murphy   iommu/dma: Avoid ...
151
  {
fade1ec05   Robin Murphy   iommu/dma: Avoid ...
152

98cc4f719   Joerg Roedel   iommu/dma: Use he...
153
  	if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
cd2c9fcf5   Shameer Kolothum   iommu/dma: Move P...
154
  		iort_iommu_msi_get_resv_regions(dev, list);
273df9635   Robin Murphy   iommu/dma: Make P...
155

fade1ec05   Robin Murphy   iommu/dma: Avoid ...
156
  }
273df9635   Robin Murphy   iommu/dma: Make P...
157
  EXPORT_SYMBOL(iommu_dma_get_resv_regions);
fade1ec05   Robin Murphy   iommu/dma: Avoid ...
158

7c1b058c8   Robin Murphy   iommu/dma: Handle...
159
160
161
162
163
164
165
166
167
  static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
  		phys_addr_t start, phys_addr_t end)
  {
  	struct iova_domain *iovad = &cookie->iovad;
  	struct iommu_dma_msi_page *msi_page;
  	int i, num_pages;
  
  	start -= iova_offset(iovad, start);
  	num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
7c1b058c8   Robin Murphy   iommu/dma: Handle...
168
  	for (i = 0; i < num_pages; i++) {
65ac74f1d   Marc Zyngier   iommu/dma: Fix MS...
169
170
171
172
173
174
175
176
  		msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
  		if (!msi_page)
  			return -ENOMEM;
  
  		msi_page->phys = start;
  		msi_page->iova = start;
  		INIT_LIST_HEAD(&msi_page->list);
  		list_add(&msi_page->list, &cookie->msi_page_list);
7c1b058c8   Robin Murphy   iommu/dma: Handle...
177
178
179
180
181
  		start += iovad->granule;
  	}
  
  	return 0;
  }
aadad097c   Srinath Mannam   iommu/dma: Reserv...
182
  static int iova_reserve_pci_windows(struct pci_dev *dev,
cd2c9fcf5   Shameer Kolothum   iommu/dma: Move P...
183
184
185
186
187
  		struct iova_domain *iovad)
  {
  	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
  	struct resource_entry *window;
  	unsigned long lo, hi;
aadad097c   Srinath Mannam   iommu/dma: Reserv...
188
  	phys_addr_t start = 0, end;
cd2c9fcf5   Shameer Kolothum   iommu/dma: Move P...
189
190
191
192
193
194
195
196
197
  
  	resource_list_for_each_entry(window, &bridge->windows) {
  		if (resource_type(window->res) != IORESOURCE_MEM)
  			continue;
  
  		lo = iova_pfn(iovad, window->res->start - window->offset);
  		hi = iova_pfn(iovad, window->res->end - window->offset);
  		reserve_iova(iovad, lo, hi);
  	}
aadad097c   Srinath Mannam   iommu/dma: Reserv...
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
  
  	/* Get reserved DMA windows from host bridge */
  	resource_list_for_each_entry(window, &bridge->dma_ranges) {
  		end = window->res->start - window->offset;
  resv_iova:
  		if (end > start) {
  			lo = iova_pfn(iovad, start);
  			hi = iova_pfn(iovad, end);
  			reserve_iova(iovad, lo, hi);
  		} else {
  			/* dma_ranges list should be sorted */
  			dev_err(&dev->dev, "Failed to reserve IOVA
  ");
  			return -EINVAL;
  		}
  
  		start = window->res->end - window->offset + 1;
  		/* If window is last entry */
  		if (window->node.next == &bridge->dma_ranges &&
29fcea8ce   Arnd Bergmann   iommu: Fix intege...
217
218
  		    end != ~(phys_addr_t)0) {
  			end = ~(phys_addr_t)0;
aadad097c   Srinath Mannam   iommu/dma: Reserv...
219
220
221
222
223
  			goto resv_iova;
  		}
  	}
  
  	return 0;
cd2c9fcf5   Shameer Kolothum   iommu/dma: Move P...
224
  }
7c1b058c8   Robin Murphy   iommu/dma: Handle...
225
226
227
228
229
230
231
232
  static int iova_reserve_iommu_regions(struct device *dev,
  		struct iommu_domain *domain)
  {
  	struct iommu_dma_cookie *cookie = domain->iova_cookie;
  	struct iova_domain *iovad = &cookie->iovad;
  	struct iommu_resv_region *region;
  	LIST_HEAD(resv_regions);
  	int ret = 0;
aadad097c   Srinath Mannam   iommu/dma: Reserv...
233
234
235
236
237
  	if (dev_is_pci(dev)) {
  		ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
  		if (ret)
  			return ret;
  	}
cd2c9fcf5   Shameer Kolothum   iommu/dma: Move P...
238

7c1b058c8   Robin Murphy   iommu/dma: Handle...
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
  	iommu_get_resv_regions(dev, &resv_regions);
  	list_for_each_entry(region, &resv_regions, list) {
  		unsigned long lo, hi;
  
  		/* We ARE the software that manages these! */
  		if (region->type == IOMMU_RESV_SW_MSI)
  			continue;
  
  		lo = iova_pfn(iovad, region->start);
  		hi = iova_pfn(iovad, region->start + region->length - 1);
  		reserve_iova(iovad, lo, hi);
  
  		if (region->type == IOMMU_RESV_MSI)
  			ret = cookie_init_hw_msi_region(cookie, region->start,
  					region->start + region->length);
  		if (ret)
  			break;
  	}
  	iommu_put_resv_regions(dev, &resv_regions);
  
  	return ret;
  }
2da274cdf   Zhen Lei   iommu/dma: Add su...
261
262
263
264
265
266
267
268
269
270
271
272
273
  static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
  {
  	struct iommu_dma_cookie *cookie;
  	struct iommu_domain *domain;
  
  	cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
  	domain = cookie->fq_domain;
  	/*
  	 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
  	 * implies that ops->flush_iotlb_all must be non-NULL.
  	 */
  	domain->ops->flush_iotlb_all(domain);
  }
0db2e5d18   Robin Murphy   iommu: Implement ...
274
275
276
277
278
  /**
   * iommu_dma_init_domain - Initialise a DMA mapping domain
   * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
   * @base: IOVA at which the mappable address space starts
   * @size: Size of IOVA space
fade1ec05   Robin Murphy   iommu/dma: Avoid ...
279
   * @dev: Device the domain is being initialised for
0db2e5d18   Robin Murphy   iommu: Implement ...
280
281
282
283
284
285
   *
   * @base and @size should be exact multiples of IOMMU page granularity to
   * avoid rounding surprises. If necessary, we reserve the page at address 0
   * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
   * any change which could make prior IOVAs invalid will fail.
   */
06d60728f   Christoph Hellwig   iommu/dma: move t...
286
  static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
fade1ec05   Robin Murphy   iommu/dma: Avoid ...
287
  		u64 size, struct device *dev)
0db2e5d18   Robin Murphy   iommu: Implement ...
288
  {
fdbe574eb   Robin Murphy   iommu/dma: Allow ...
289
  	struct iommu_dma_cookie *cookie = domain->iova_cookie;
c61a4633a   Shaokun Zhang   iommu/dma: Remove...
290
  	unsigned long order, base_pfn;
6b0c54e7f   Yunsheng Lin   iommu/dma: Fix fo...
291
  	struct iova_domain *iovad;
2da274cdf   Zhen Lei   iommu/dma: Add su...
292
  	int attr;
0db2e5d18   Robin Murphy   iommu: Implement ...
293

fdbe574eb   Robin Murphy   iommu/dma: Allow ...
294
295
  	if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
  		return -EINVAL;
0db2e5d18   Robin Murphy   iommu: Implement ...
296

6b0c54e7f   Yunsheng Lin   iommu/dma: Fix fo...
297
  	iovad = &cookie->iovad;
0db2e5d18   Robin Murphy   iommu: Implement ...
298
  	/* Use the smallest supported page size for IOVA granularity */
d16e0faab   Robin Murphy   iommu: Allow sele...
299
  	order = __ffs(domain->pgsize_bitmap);
0db2e5d18   Robin Murphy   iommu: Implement ...
300
  	base_pfn = max_t(unsigned long, 1, base >> order);
0db2e5d18   Robin Murphy   iommu: Implement ...
301
302
303
304
305
306
307
308
309
310
311
312
  
  	/* Check the domain allows at least some access to the device... */
  	if (domain->geometry.force_aperture) {
  		if (base > domain->geometry.aperture_end ||
  		    base + size <= domain->geometry.aperture_start) {
  			pr_warn("specified DMA range outside IOMMU capability
  ");
  			return -EFAULT;
  		}
  		/* ...then finally give it a kicking to make sure it fits */
  		base_pfn = max_t(unsigned long, base_pfn,
  				domain->geometry.aperture_start >> order);
0db2e5d18   Robin Murphy   iommu: Implement ...
313
  	}
f51d7bb79   Robin Murphy   iommu/dma: Stop g...
314
  	/* start_pfn is always nonzero for an already-initialised domain */
0db2e5d18   Robin Murphy   iommu: Implement ...
315
316
  	if (iovad->start_pfn) {
  		if (1UL << order != iovad->granule ||
f51d7bb79   Robin Murphy   iommu/dma: Stop g...
317
  		    base_pfn != iovad->start_pfn) {
0db2e5d18   Robin Murphy   iommu: Implement ...
318
319
320
321
  			pr_warn("Incompatible range for DMA domain
  ");
  			return -EFAULT;
  		}
7c1b058c8   Robin Murphy   iommu/dma: Handle...
322
323
  
  		return 0;
0db2e5d18   Robin Murphy   iommu: Implement ...
324
  	}
7c1b058c8   Robin Murphy   iommu/dma: Handle...
325

aa3ac9469   Zhen Lei   iommu/iova: Make ...
326
  	init_iova_domain(iovad, 1UL << order, base_pfn);
2da274cdf   Zhen Lei   iommu/dma: Add su...
327
328
329
  
  	if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
  			DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
b34e9b0de   Tom Murphy   iommu/dma: Handle...
330
331
332
333
334
335
  		if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
  					NULL))
  			pr_warn("iova flush queue initialization failed
  ");
  		else
  			cookie->fq_domain = domain;
2da274cdf   Zhen Lei   iommu/dma: Add su...
336
  	}
7c1b058c8   Robin Murphy   iommu/dma: Handle...
337
338
339
340
  	if (!dev)
  		return 0;
  
  	return iova_reserve_iommu_regions(dev, domain);
0db2e5d18   Robin Murphy   iommu: Implement ...
341
  }
0db2e5d18   Robin Murphy   iommu: Implement ...
342

795bbbb9b   Tom Murphy   iommu/dma-iommu: ...
343
344
345
346
347
348
349
350
351
352
353
354
355
356
  static int iommu_dma_deferred_attach(struct device *dev,
  		struct iommu_domain *domain)
  {
  	const struct iommu_ops *ops = domain->ops;
  
  	if (!is_kdump_kernel())
  		return 0;
  
  	if (unlikely(ops->is_attach_deferred &&
  			ops->is_attach_deferred(domain, dev)))
  		return iommu_attach_device(domain, dev);
  
  	return 0;
  }
0db2e5d18   Robin Murphy   iommu: Implement ...
357
  /**
737c85ca1   Mitchel Humpherys   arm64/dma-mapping...
358
359
   * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
   *                    page flags.
0db2e5d18   Robin Murphy   iommu: Implement ...
360
361
   * @dir: Direction of DMA transfer
   * @coherent: Is the DMA master cache-coherent?
737c85ca1   Mitchel Humpherys   arm64/dma-mapping...
362
   * @attrs: DMA attributes for the mapping
0db2e5d18   Robin Murphy   iommu: Implement ...
363
364
365
   *
   * Return: corresponding IOMMU API page protection flags
   */
06d60728f   Christoph Hellwig   iommu/dma: move t...
366
  static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
737c85ca1   Mitchel Humpherys   arm64/dma-mapping...
367
  		     unsigned long attrs)
0db2e5d18   Robin Murphy   iommu: Implement ...
368
369
  {
  	int prot = coherent ? IOMMU_CACHE : 0;
737c85ca1   Mitchel Humpherys   arm64/dma-mapping...
370
371
  	if (attrs & DMA_ATTR_PRIVILEGED)
  		prot |= IOMMU_PRIV;
0db2e5d18   Robin Murphy   iommu: Implement ...
372
373
374
375
376
377
378
379
380
381
382
  	switch (dir) {
  	case DMA_BIDIRECTIONAL:
  		return prot | IOMMU_READ | IOMMU_WRITE;
  	case DMA_TO_DEVICE:
  		return prot | IOMMU_READ;
  	case DMA_FROM_DEVICE:
  		return prot | IOMMU_WRITE;
  	default:
  		return 0;
  	}
  }
842fe519f   Robin Murphy   iommu/dma: Conver...
383
  static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
bd036d2fd   Robin Murphy   iommu/dma: Ration...
384
  		size_t size, u64 dma_limit, struct device *dev)
0db2e5d18   Robin Murphy   iommu: Implement ...
385
  {
a44e66575   Robin Murphy   iommu/dma: Clean ...
386
387
  	struct iommu_dma_cookie *cookie = domain->iova_cookie;
  	struct iova_domain *iovad = &cookie->iovad;
bb65a64c7   Robin Murphy   iommu/dma: Plumb ...
388
  	unsigned long shift, iova_len, iova = 0;
0db2e5d18   Robin Murphy   iommu: Implement ...
389

a44e66575   Robin Murphy   iommu/dma: Clean ...
390
391
392
393
394
395
396
  	if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
  		cookie->msi_iova += size;
  		return cookie->msi_iova - size;
  	}
  
  	shift = iova_shift(iovad);
  	iova_len = size >> shift;
bb65a64c7   Robin Murphy   iommu/dma: Plumb ...
397
398
399
400
401
402
403
404
  	/*
  	 * Freeing non-power-of-two-sized allocations back into the IOVA caches
  	 * will come back to bite us badly, so we have to waste a bit of space
  	 * rounding up anything cacheable to make sure that can't happen. The
  	 * order of the unadjusted size will still match upon freeing.
  	 */
  	if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
  		iova_len = roundup_pow_of_two(iova_len);
a44e66575   Robin Murphy   iommu/dma: Clean ...
405

a7ba70f17   Nicolas Saenz Julienne   dma-mapping: trea...
406
  	dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
03bfdc311   Robin Murphy   iommu/dma: Respec...
407

c987ff0d3   Robin Murphy   iommu/dma: Respec...
408
  	if (domain->geometry.force_aperture)
bd036d2fd   Robin Murphy   iommu/dma: Ration...
409
  		dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
122fac030   Robin Murphy   iommu/dma: Implem...
410
411
412
  
  	/* Try to get PCI devices a SAC address */
  	if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
538d5b333   Tomasz Nowicki   iommu/iova: Make ...
413
414
  		iova = alloc_iova_fast(iovad, iova_len,
  				       DMA_BIT_MASK(32) >> shift, false);
bb65a64c7   Robin Murphy   iommu/dma: Plumb ...
415

122fac030   Robin Murphy   iommu/dma: Implem...
416
  	if (!iova)
538d5b333   Tomasz Nowicki   iommu/iova: Make ...
417
418
  		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
  				       true);
122fac030   Robin Murphy   iommu/dma: Implem...
419

bb65a64c7   Robin Murphy   iommu/dma: Plumb ...
420
  	return (dma_addr_t)iova << shift;
0db2e5d18   Robin Murphy   iommu: Implement ...
421
  }
842fe519f   Robin Murphy   iommu/dma: Conver...
422
423
  static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
  		dma_addr_t iova, size_t size)
0db2e5d18   Robin Murphy   iommu: Implement ...
424
  {
842fe519f   Robin Murphy   iommu/dma: Conver...
425
  	struct iova_domain *iovad = &cookie->iovad;
0db2e5d18   Robin Murphy   iommu: Implement ...
426

a44e66575   Robin Murphy   iommu/dma: Clean ...
427
  	/* The MSI case is only ever cleaning up its most recent allocation */
bb65a64c7   Robin Murphy   iommu/dma: Plumb ...
428
  	if (cookie->type == IOMMU_DMA_MSI_COOKIE)
a44e66575   Robin Murphy   iommu/dma: Clean ...
429
  		cookie->msi_iova -= size;
2da274cdf   Zhen Lei   iommu/dma: Add su...
430
431
432
  	else if (cookie->fq_domain)	/* non-strict mode */
  		queue_iova(iovad, iova_pfn(iovad, iova),
  				size >> iova_shift(iovad), 0);
bb65a64c7   Robin Murphy   iommu/dma: Plumb ...
433
  	else
1cc896ed6   Robin Murphy   iommu/dma: Don't ...
434
435
  		free_iova_fast(iovad, iova_pfn(iovad, iova),
  				size >> iova_shift(iovad));
842fe519f   Robin Murphy   iommu/dma: Conver...
436
  }
b61d271e5   Robin Murphy   iommu/dma: Move d...
437
  static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
842fe519f   Robin Murphy   iommu/dma: Conver...
438
439
  		size_t size)
  {
b61d271e5   Robin Murphy   iommu/dma: Move d...
440
  	struct iommu_domain *domain = iommu_get_dma_domain(dev);
a44e66575   Robin Murphy   iommu/dma: Clean ...
441
442
  	struct iommu_dma_cookie *cookie = domain->iova_cookie;
  	struct iova_domain *iovad = &cookie->iovad;
842fe519f   Robin Murphy   iommu/dma: Conver...
443
  	size_t iova_off = iova_offset(iovad, dma_addr);
a7d20dc19   Will Deacon   iommu: Introduce ...
444
445
  	struct iommu_iotlb_gather iotlb_gather;
  	size_t unmapped;
842fe519f   Robin Murphy   iommu/dma: Conver...
446
447
448
  
  	dma_addr -= iova_off;
  	size = iova_align(iovad, size + iova_off);
a7d20dc19   Will Deacon   iommu: Introduce ...
449
450
451
452
  	iommu_iotlb_gather_init(&iotlb_gather);
  
  	unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
  	WARN_ON(unmapped != size);
842fe519f   Robin Murphy   iommu/dma: Conver...
453

2da274cdf   Zhen Lei   iommu/dma: Add su...
454
  	if (!cookie->fq_domain)
aae4c8e27   Tom Murphy   iommu: Rename iom...
455
  		iommu_iotlb_sync(domain, &iotlb_gather);
a44e66575   Robin Murphy   iommu/dma: Clean ...
456
  	iommu_dma_free_iova(cookie, dma_addr, size);
0db2e5d18   Robin Murphy   iommu: Implement ...
457
  }
92aec09cc   Christoph Hellwig   iommu/dma: Move _...
458
  static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
bd036d2fd   Robin Murphy   iommu/dma: Ration...
459
  		size_t size, int prot, u64 dma_mask)
92aec09cc   Christoph Hellwig   iommu/dma: Move _...
460
  {
b61d271e5   Robin Murphy   iommu/dma: Move d...
461
  	struct iommu_domain *domain = iommu_get_dma_domain(dev);
92aec09cc   Christoph Hellwig   iommu/dma: Move _...
462
  	struct iommu_dma_cookie *cookie = domain->iova_cookie;
8af23fad6   Robin Murphy   iommu/dma: Handle...
463
464
  	struct iova_domain *iovad = &cookie->iovad;
  	size_t iova_off = iova_offset(iovad, phys);
92aec09cc   Christoph Hellwig   iommu/dma: Move _...
465
  	dma_addr_t iova;
795bbbb9b   Tom Murphy   iommu/dma-iommu: ...
466
467
  	if (unlikely(iommu_dma_deferred_attach(dev, domain)))
  		return DMA_MAPPING_ERROR;
8af23fad6   Robin Murphy   iommu/dma: Handle...
468
  	size = iova_align(iovad, size + iova_off);
92aec09cc   Christoph Hellwig   iommu/dma: Move _...
469

6e2350207   Tom Murphy   iommu/dma-iommu: ...
470
  	iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
92aec09cc   Christoph Hellwig   iommu/dma: Move _...
471
472
  	if (!iova)
  		return DMA_MAPPING_ERROR;
781ca2de8   Tom Murphy   iommu: Add gfp pa...
473
  	if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
92aec09cc   Christoph Hellwig   iommu/dma: Move _...
474
475
476
477
478
  		iommu_dma_free_iova(cookie, iova, size);
  		return DMA_MAPPING_ERROR;
  	}
  	return iova + iova_off;
  }
0db2e5d18   Robin Murphy   iommu: Implement ...
479
480
481
482
483
484
  static void __iommu_dma_free_pages(struct page **pages, int count)
  {
  	while (count--)
  		__free_page(pages[count]);
  	kvfree(pages);
  }
c4b17afb0   Ganapatrao Kulkarni   iommu/dma: Use NU...
485
486
  static struct page **__iommu_dma_alloc_pages(struct device *dev,
  		unsigned int count, unsigned long order_mask, gfp_t gfp)
0db2e5d18   Robin Murphy   iommu: Implement ...
487
488
  {
  	struct page **pages;
c4b17afb0   Ganapatrao Kulkarni   iommu/dma: Use NU...
489
  	unsigned int i = 0, nid = dev_to_node(dev);
3b6b7e19e   Robin Murphy   iommu/dma: Finish...
490
491
492
493
  
  	order_mask &= (2U << MAX_ORDER) - 1;
  	if (!order_mask)
  		return NULL;
0db2e5d18   Robin Murphy   iommu: Implement ...
494

c4b17afb0   Ganapatrao Kulkarni   iommu/dma: Use NU...
495
  	pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
0db2e5d18   Robin Murphy   iommu: Implement ...
496
497
498
499
500
  	if (!pages)
  		return NULL;
  
  	/* IOMMU can map any pages, so himem can also be used here */
  	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
4604393ca   Robin Murphy   iommu/dma: Remove...
501
502
  	/* It makes no sense to muck about with huge pages */
  	gfp &= ~__GFP_COMP;
0db2e5d18   Robin Murphy   iommu: Implement ...
503
504
  	while (count) {
  		struct page *page = NULL;
3b6b7e19e   Robin Murphy   iommu/dma: Finish...
505
  		unsigned int order_size;
0db2e5d18   Robin Murphy   iommu: Implement ...
506
507
508
509
  
  		/*
  		 * Higher-order allocations are a convenience rather
  		 * than a necessity, hence using __GFP_NORETRY until
3b6b7e19e   Robin Murphy   iommu/dma: Finish...
510
  		 * falling back to minimum-order allocations.
0db2e5d18   Robin Murphy   iommu: Implement ...
511
  		 */
3b6b7e19e   Robin Murphy   iommu/dma: Finish...
512
513
514
  		for (order_mask &= (2U << __fls(count)) - 1;
  		     order_mask; order_mask &= ~order_size) {
  			unsigned int order = __fls(order_mask);
c4b17afb0   Ganapatrao Kulkarni   iommu/dma: Use NU...
515
  			gfp_t alloc_flags = gfp;
3b6b7e19e   Robin Murphy   iommu/dma: Finish...
516
517
  
  			order_size = 1U << order;
c4b17afb0   Ganapatrao Kulkarni   iommu/dma: Use NU...
518
519
520
  			if (order_mask > order_size)
  				alloc_flags |= __GFP_NORETRY;
  			page = alloc_pages_node(nid, alloc_flags, order);
0db2e5d18   Robin Murphy   iommu: Implement ...
521
522
  			if (!page)
  				continue;
4604393ca   Robin Murphy   iommu/dma: Remove...
523
  			if (order)
0db2e5d18   Robin Murphy   iommu: Implement ...
524
  				split_page(page, order);
4604393ca   Robin Murphy   iommu/dma: Remove...
525
  			break;
0db2e5d18   Robin Murphy   iommu: Implement ...
526
  		}
0db2e5d18   Robin Murphy   iommu: Implement ...
527
528
529
530
  		if (!page) {
  			__iommu_dma_free_pages(pages, i);
  			return NULL;
  		}
3b6b7e19e   Robin Murphy   iommu/dma: Finish...
531
532
  		count -= order_size;
  		while (order_size--)
0db2e5d18   Robin Murphy   iommu: Implement ...
533
534
535
536
  			pages[i++] = page++;
  	}
  	return pages;
  }
0db2e5d18   Robin Murphy   iommu: Implement ...
537
  /**
21b95aaf5   Christoph Hellwig   iommu/dma: Refact...
538
   * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
0db2e5d18   Robin Murphy   iommu: Implement ...
539
540
541
   * @dev: Device to allocate memory for. Must be a real device
   *	 attached to an iommu_dma_domain
   * @size: Size of buffer in bytes
21b95aaf5   Christoph Hellwig   iommu/dma: Refact...
542
   * @dma_handle: Out argument for allocated DMA handle
0db2e5d18   Robin Murphy   iommu: Implement ...
543
   * @gfp: Allocation flags
e8d39a903   Christoph Hellwig   dma-iommu: implem...
544
   * @prot: pgprot_t to use for the remapped mapping
3b6b7e19e   Robin Murphy   iommu/dma: Finish...
545
   * @attrs: DMA attributes for this allocation
0db2e5d18   Robin Murphy   iommu: Implement ...
546
547
548
549
   *
   * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
   * but an IOMMU which supports smaller pages might not map the whole thing.
   *
21b95aaf5   Christoph Hellwig   iommu/dma: Refact...
550
   * Return: Mapped virtual address, or NULL on failure.
0db2e5d18   Robin Murphy   iommu: Implement ...
551
   */
21b95aaf5   Christoph Hellwig   iommu/dma: Refact...
552
  static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
e8d39a903   Christoph Hellwig   dma-iommu: implem...
553
554
  		dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
  		unsigned long attrs)
0db2e5d18   Robin Murphy   iommu: Implement ...
555
  {
43c5bf11a   Robin Murphy   iommu/dma: Use fa...
556
  	struct iommu_domain *domain = iommu_get_dma_domain(dev);
842fe519f   Robin Murphy   iommu/dma: Conver...
557
558
  	struct iommu_dma_cookie *cookie = domain->iova_cookie;
  	struct iova_domain *iovad = &cookie->iovad;
21b95aaf5   Christoph Hellwig   iommu/dma: Refact...
559
560
  	bool coherent = dev_is_dma_coherent(dev);
  	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
21b95aaf5   Christoph Hellwig   iommu/dma: Refact...
561
  	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
0db2e5d18   Robin Murphy   iommu: Implement ...
562
563
  	struct page **pages;
  	struct sg_table sgt;
842fe519f   Robin Murphy   iommu/dma: Conver...
564
  	dma_addr_t iova;
21b95aaf5   Christoph Hellwig   iommu/dma: Refact...
565
  	void *vaddr;
0db2e5d18   Robin Murphy   iommu: Implement ...
566

21b95aaf5   Christoph Hellwig   iommu/dma: Refact...
567
  	*dma_handle = DMA_MAPPING_ERROR;
0db2e5d18   Robin Murphy   iommu: Implement ...
568

795bbbb9b   Tom Murphy   iommu/dma-iommu: ...
569
570
  	if (unlikely(iommu_dma_deferred_attach(dev, domain)))
  		return NULL;
3b6b7e19e   Robin Murphy   iommu/dma: Finish...
571
572
573
574
575
576
577
  	min_size = alloc_sizes & -alloc_sizes;
  	if (min_size < PAGE_SIZE) {
  		min_size = PAGE_SIZE;
  		alloc_sizes |= PAGE_SIZE;
  	} else {
  		size = ALIGN(size, min_size);
  	}
00085f1ef   Krzysztof Kozlowski   dma-mapping: use ...
578
  	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
3b6b7e19e   Robin Murphy   iommu/dma: Finish...
579
580
581
  		alloc_sizes = min_size;
  
  	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
c4b17afb0   Ganapatrao Kulkarni   iommu/dma: Use NU...
582
583
  	pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
  					gfp);
0db2e5d18   Robin Murphy   iommu: Implement ...
584
585
  	if (!pages)
  		return NULL;
842fe519f   Robin Murphy   iommu/dma: Conver...
586
587
  	size = iova_align(iovad, size);
  	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
0db2e5d18   Robin Murphy   iommu: Implement ...
588
589
  	if (!iova)
  		goto out_free_pages;
0db2e5d18   Robin Murphy   iommu: Implement ...
590
591
  	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
  		goto out_free_iova;
21b95aaf5   Christoph Hellwig   iommu/dma: Refact...
592
  	if (!(ioprot & IOMMU_CACHE)) {
23f88e0a7   Christoph Hellwig   iommu/dma: Use fo...
593
594
595
596
597
  		struct scatterlist *sg;
  		int i;
  
  		for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
  			arch_dma_prep_coherent(sg_page(sg), sg->length);
0db2e5d18   Robin Murphy   iommu: Implement ...
598
  	}
781ca2de8   Tom Murphy   iommu: Add gfp pa...
599
  	if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
0db2e5d18   Robin Murphy   iommu: Implement ...
600
601
  			< size)
  		goto out_free_sg;
512317401   Christoph Hellwig   dma-mapping: alwa...
602
  	vaddr = dma_common_pages_remap(pages, size, prot,
21b95aaf5   Christoph Hellwig   iommu/dma: Refact...
603
604
605
606
607
  			__builtin_return_address(0));
  	if (!vaddr)
  		goto out_unmap;
  
  	*dma_handle = iova;
0db2e5d18   Robin Murphy   iommu: Implement ...
608
  	sg_free_table(&sgt);
21b95aaf5   Christoph Hellwig   iommu/dma: Refact...
609
  	return vaddr;
0db2e5d18   Robin Murphy   iommu: Implement ...
610

21b95aaf5   Christoph Hellwig   iommu/dma: Refact...
611
612
  out_unmap:
  	__iommu_dma_unmap(dev, iova, size);
0db2e5d18   Robin Murphy   iommu: Implement ...
613
614
615
  out_free_sg:
  	sg_free_table(&sgt);
  out_free_iova:
842fe519f   Robin Murphy   iommu/dma: Conver...
616
  	iommu_dma_free_iova(cookie, iova, size);
0db2e5d18   Robin Murphy   iommu: Implement ...
617
618
619
620
621
622
  out_free_pages:
  	__iommu_dma_free_pages(pages, count);
  	return NULL;
  }
  
  /**
06d60728f   Christoph Hellwig   iommu/dma: move t...
623
624
   * __iommu_dma_mmap - Map a buffer into provided user VMA
   * @pages: Array representing buffer from __iommu_dma_alloc()
0db2e5d18   Robin Murphy   iommu: Implement ...
625
626
627
628
629
630
   * @size: Size of buffer in bytes
   * @vma: VMA describing requested userspace mapping
   *
   * Maps the pages of the buffer in @pages into @vma. The caller is responsible
   * for verifying the correct size and protection of @vma beforehand.
   */
06d60728f   Christoph Hellwig   iommu/dma: move t...
631
632
  static int __iommu_dma_mmap(struct page **pages, size_t size,
  		struct vm_area_struct *vma)
0db2e5d18   Robin Murphy   iommu: Implement ...
633
  {
b0d0084fd   Souptick Joarder   iommu/dma-iommu.c...
634
  	return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
0db2e5d18   Robin Murphy   iommu: Implement ...
635
  }
06d60728f   Christoph Hellwig   iommu/dma: move t...
636
637
  static void iommu_dma_sync_single_for_cpu(struct device *dev,
  		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
0db2e5d18   Robin Murphy   iommu: Implement ...
638
  {
06d60728f   Christoph Hellwig   iommu/dma: move t...
639
  	phys_addr_t phys;
0db2e5d18   Robin Murphy   iommu: Implement ...
640

06d60728f   Christoph Hellwig   iommu/dma: move t...
641
642
  	if (dev_is_dma_coherent(dev))
  		return;
1cc896ed6   Robin Murphy   iommu/dma: Don't ...
643

06d60728f   Christoph Hellwig   iommu/dma: move t...
644
  	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
56e35f9c5   Christoph Hellwig   dma-mapping: drop...
645
  	arch_sync_dma_for_cpu(phys, size, dir);
0db2e5d18   Robin Murphy   iommu: Implement ...
646
  }
0db2e5d18   Robin Murphy   iommu: Implement ...
647

06d60728f   Christoph Hellwig   iommu/dma: move t...
648
649
  static void iommu_dma_sync_single_for_device(struct device *dev,
  		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
0db2e5d18   Robin Murphy   iommu: Implement ...
650
  {
06d60728f   Christoph Hellwig   iommu/dma: move t...
651
  	phys_addr_t phys;
0db2e5d18   Robin Murphy   iommu: Implement ...
652

06d60728f   Christoph Hellwig   iommu/dma: move t...
653
654
  	if (dev_is_dma_coherent(dev))
  		return;
1cc896ed6   Robin Murphy   iommu/dma: Don't ...
655

06d60728f   Christoph Hellwig   iommu/dma: move t...
656
  	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
56e35f9c5   Christoph Hellwig   dma-mapping: drop...
657
  	arch_sync_dma_for_device(phys, size, dir);
06d60728f   Christoph Hellwig   iommu/dma: move t...
658
  }
0db2e5d18   Robin Murphy   iommu: Implement ...
659

06d60728f   Christoph Hellwig   iommu/dma: move t...
660
661
662
663
664
665
666
667
668
669
670
  static void iommu_dma_sync_sg_for_cpu(struct device *dev,
  		struct scatterlist *sgl, int nelems,
  		enum dma_data_direction dir)
  {
  	struct scatterlist *sg;
  	int i;
  
  	if (dev_is_dma_coherent(dev))
  		return;
  
  	for_each_sg(sgl, sg, nelems, i)
56e35f9c5   Christoph Hellwig   dma-mapping: drop...
671
  		arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
06d60728f   Christoph Hellwig   iommu/dma: move t...
672
673
674
675
676
677
678
679
680
681
682
683
684
  }
  
  static void iommu_dma_sync_sg_for_device(struct device *dev,
  		struct scatterlist *sgl, int nelems,
  		enum dma_data_direction dir)
  {
  	struct scatterlist *sg;
  	int i;
  
  	if (dev_is_dma_coherent(dev))
  		return;
  
  	for_each_sg(sgl, sg, nelems, i)
56e35f9c5   Christoph Hellwig   dma-mapping: drop...
685
  		arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
0db2e5d18   Robin Murphy   iommu: Implement ...
686
  }
06d60728f   Christoph Hellwig   iommu/dma: move t...
687
688
689
  static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
  		unsigned long offset, size_t size, enum dma_data_direction dir,
  		unsigned long attrs)
51f8cc9e8   Robin Murphy   iommu/dma: Implem...
690
  {
06d60728f   Christoph Hellwig   iommu/dma: move t...
691
692
  	phys_addr_t phys = page_to_phys(page) + offset;
  	bool coherent = dev_is_dma_coherent(dev);
b61d271e5   Robin Murphy   iommu/dma: Move d...
693
  	int prot = dma_info_to_prot(dir, coherent, attrs);
06d60728f   Christoph Hellwig   iommu/dma: move t...
694
  	dma_addr_t dma_handle;
6e2350207   Tom Murphy   iommu/dma-iommu: ...
695
  	dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
06d60728f   Christoph Hellwig   iommu/dma: move t...
696
697
  	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
  	    dma_handle != DMA_MAPPING_ERROR)
56e35f9c5   Christoph Hellwig   dma-mapping: drop...
698
  		arch_sync_dma_for_device(phys, size, dir);
06d60728f   Christoph Hellwig   iommu/dma: move t...
699
  	return dma_handle;
51f8cc9e8   Robin Murphy   iommu/dma: Implem...
700
  }
06d60728f   Christoph Hellwig   iommu/dma: move t...
701
702
  static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
  		size_t size, enum dma_data_direction dir, unsigned long attrs)
0db2e5d18   Robin Murphy   iommu: Implement ...
703
  {
06d60728f   Christoph Hellwig   iommu/dma: move t...
704
705
  	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
  		iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
b61d271e5   Robin Murphy   iommu/dma: Move d...
706
  	__iommu_dma_unmap(dev, dma_handle, size);
0db2e5d18   Robin Murphy   iommu: Implement ...
707
708
709
710
  }
  
  /*
   * Prepare a successfully-mapped scatterlist to give back to the caller.
809eac54c   Robin Murphy   iommu/dma: Implem...
711
712
713
714
   *
   * At this point the segments are already laid out by iommu_dma_map_sg() to
   * avoid individually crossing any boundaries, so we merely need to check a
   * segment's start address to avoid concatenating across one.
0db2e5d18   Robin Murphy   iommu: Implement ...
715
716
717
718
   */
  static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
  		dma_addr_t dma_addr)
  {
809eac54c   Robin Murphy   iommu/dma: Implem...
719
720
721
722
  	struct scatterlist *s, *cur = sg;
  	unsigned long seg_mask = dma_get_seg_boundary(dev);
  	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
  	int i, count = 0;
0db2e5d18   Robin Murphy   iommu: Implement ...
723
724
  
  	for_each_sg(sg, s, nents, i) {
809eac54c   Robin Murphy   iommu/dma: Implem...
725
726
  		/* Restore this segment's original unaligned fields first */
  		unsigned int s_iova_off = sg_dma_address(s);
0db2e5d18   Robin Murphy   iommu: Implement ...
727
  		unsigned int s_length = sg_dma_len(s);
809eac54c   Robin Murphy   iommu/dma: Implem...
728
  		unsigned int s_iova_len = s->length;
0db2e5d18   Robin Murphy   iommu: Implement ...
729

809eac54c   Robin Murphy   iommu/dma: Implem...
730
  		s->offset += s_iova_off;
0db2e5d18   Robin Murphy   iommu: Implement ...
731
  		s->length = s_length;
cad34be74   Christoph Hellwig   iommu/dma-iommu: ...
732
  		sg_dma_address(s) = DMA_MAPPING_ERROR;
809eac54c   Robin Murphy   iommu/dma: Implem...
733
734
735
736
737
738
739
740
741
742
  		sg_dma_len(s) = 0;
  
  		/*
  		 * Now fill in the real DMA data. If...
  		 * - there is a valid output segment to append to
  		 * - and this segment starts on an IOVA page boundary
  		 * - but doesn't fall at a segment boundary
  		 * - and wouldn't make the resulting output segment too long
  		 */
  		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
ab2cbeb0e   Robin Murphy   iommu/dma: Handle...
743
  		    (max_len - cur_len >= s_length)) {
809eac54c   Robin Murphy   iommu/dma: Implem...
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
  			/* ...then concatenate it with the previous one */
  			cur_len += s_length;
  		} else {
  			/* Otherwise start the next output segment */
  			if (i > 0)
  				cur = sg_next(cur);
  			cur_len = s_length;
  			count++;
  
  			sg_dma_address(cur) = dma_addr + s_iova_off;
  		}
  
  		sg_dma_len(cur) = cur_len;
  		dma_addr += s_iova_len;
  
  		if (s_length + s_iova_off < s_iova_len)
  			cur_len = 0;
0db2e5d18   Robin Murphy   iommu: Implement ...
761
  	}
809eac54c   Robin Murphy   iommu/dma: Implem...
762
  	return count;
0db2e5d18   Robin Murphy   iommu: Implement ...
763
764
765
766
767
768
769
770
771
772
773
774
  }
  
  /*
   * If mapping failed, then just restore the original list,
   * but making sure the DMA fields are invalidated.
   */
  static void __invalidate_sg(struct scatterlist *sg, int nents)
  {
  	struct scatterlist *s;
  	int i;
  
  	for_each_sg(sg, s, nents, i) {
cad34be74   Christoph Hellwig   iommu/dma-iommu: ...
775
  		if (sg_dma_address(s) != DMA_MAPPING_ERROR)
07b48ac4b   Robin Murphy   iommu/dma: Restor...
776
  			s->offset += sg_dma_address(s);
0db2e5d18   Robin Murphy   iommu: Implement ...
777
778
  		if (sg_dma_len(s))
  			s->length = sg_dma_len(s);
cad34be74   Christoph Hellwig   iommu/dma-iommu: ...
779
  		sg_dma_address(s) = DMA_MAPPING_ERROR;
0db2e5d18   Robin Murphy   iommu: Implement ...
780
781
782
783
784
785
786
787
788
789
790
  		sg_dma_len(s) = 0;
  	}
  }
  
  /*
   * The DMA API client is passing in a scatterlist which could describe
   * any old buffer layout, but the IOMMU API requires everything to be
   * aligned to IOMMU pages. Hence the need for this complicated bit of
   * impedance-matching, to be able to hand off a suitably-aligned list,
   * but still preserve the original offsets and sizes for the caller.
   */
06d60728f   Christoph Hellwig   iommu/dma: move t...
791
792
  static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
  		int nents, enum dma_data_direction dir, unsigned long attrs)
0db2e5d18   Robin Murphy   iommu: Implement ...
793
  {
43c5bf11a   Robin Murphy   iommu/dma: Use fa...
794
  	struct iommu_domain *domain = iommu_get_dma_domain(dev);
842fe519f   Robin Murphy   iommu/dma: Conver...
795
796
  	struct iommu_dma_cookie *cookie = domain->iova_cookie;
  	struct iova_domain *iovad = &cookie->iovad;
0db2e5d18   Robin Murphy   iommu: Implement ...
797
  	struct scatterlist *s, *prev = NULL;
06d60728f   Christoph Hellwig   iommu/dma: move t...
798
  	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
842fe519f   Robin Murphy   iommu/dma: Conver...
799
  	dma_addr_t iova;
0db2e5d18   Robin Murphy   iommu: Implement ...
800
  	size_t iova_len = 0;
809eac54c   Robin Murphy   iommu/dma: Implem...
801
  	unsigned long mask = dma_get_seg_boundary(dev);
0db2e5d18   Robin Murphy   iommu: Implement ...
802
  	int i;
795bbbb9b   Tom Murphy   iommu/dma-iommu: ...
803
804
  	if (unlikely(iommu_dma_deferred_attach(dev, domain)))
  		return 0;
06d60728f   Christoph Hellwig   iommu/dma: move t...
805
806
  	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
  		iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
0db2e5d18   Robin Murphy   iommu: Implement ...
807
808
809
810
  	/*
  	 * Work out how much IOVA space we need, and align the segments to
  	 * IOVA granules for the IOMMU driver to handle. With some clever
  	 * trickery we can modify the list in-place, but reversibly, by
809eac54c   Robin Murphy   iommu/dma: Implem...
811
  	 * stashing the unaligned parts in the as-yet-unused DMA fields.
0db2e5d18   Robin Murphy   iommu: Implement ...
812
813
  	 */
  	for_each_sg(sg, s, nents, i) {
809eac54c   Robin Murphy   iommu/dma: Implem...
814
  		size_t s_iova_off = iova_offset(iovad, s->offset);
0db2e5d18   Robin Murphy   iommu: Implement ...
815
  		size_t s_length = s->length;
809eac54c   Robin Murphy   iommu/dma: Implem...
816
  		size_t pad_len = (mask - iova_len + 1) & mask;
0db2e5d18   Robin Murphy   iommu: Implement ...
817

809eac54c   Robin Murphy   iommu/dma: Implem...
818
  		sg_dma_address(s) = s_iova_off;
0db2e5d18   Robin Murphy   iommu: Implement ...
819
  		sg_dma_len(s) = s_length;
809eac54c   Robin Murphy   iommu/dma: Implem...
820
821
  		s->offset -= s_iova_off;
  		s_length = iova_align(iovad, s_length + s_iova_off);
0db2e5d18   Robin Murphy   iommu: Implement ...
822
823
824
  		s->length = s_length;
  
  		/*
809eac54c   Robin Murphy   iommu/dma: Implem...
825
826
827
828
829
830
831
832
833
834
835
  		 * Due to the alignment of our single IOVA allocation, we can
  		 * depend on these assumptions about the segment boundary mask:
  		 * - If mask size >= IOVA size, then the IOVA range cannot
  		 *   possibly fall across a boundary, so we don't care.
  		 * - If mask size < IOVA size, then the IOVA range must start
  		 *   exactly on a boundary, therefore we can lay things out
  		 *   based purely on segment lengths without needing to know
  		 *   the actual addresses beforehand.
  		 * - The mask must be a power of 2, so pad_len == 0 if
  		 *   iova_len == 0, thus we cannot dereference prev the first
  		 *   time through here (i.e. before it has a meaningful value).
0db2e5d18   Robin Murphy   iommu: Implement ...
836
  		 */
809eac54c   Robin Murphy   iommu/dma: Implem...
837
  		if (pad_len && pad_len < s_length - 1) {
0db2e5d18   Robin Murphy   iommu: Implement ...
838
839
840
841
842
843
844
  			prev->length += pad_len;
  			iova_len += pad_len;
  		}
  
  		iova_len += s_length;
  		prev = s;
  	}
842fe519f   Robin Murphy   iommu/dma: Conver...
845
  	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
0db2e5d18   Robin Murphy   iommu: Implement ...
846
847
848
849
850
851
852
  	if (!iova)
  		goto out_restore_sg;
  
  	/*
  	 * We'll leave any physical concatenation to the IOMMU driver's
  	 * implementation - it knows better than we do.
  	 */
781ca2de8   Tom Murphy   iommu: Add gfp pa...
853
  	if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
0db2e5d18   Robin Murphy   iommu: Implement ...
854
  		goto out_free_iova;
842fe519f   Robin Murphy   iommu/dma: Conver...
855
  	return __finalise_sg(dev, sg, nents, iova);
0db2e5d18   Robin Murphy   iommu: Implement ...
856
857
  
  out_free_iova:
842fe519f   Robin Murphy   iommu/dma: Conver...
858
  	iommu_dma_free_iova(cookie, iova, iova_len);
0db2e5d18   Robin Murphy   iommu: Implement ...
859
860
861
862
  out_restore_sg:
  	__invalidate_sg(sg, nents);
  	return 0;
  }
06d60728f   Christoph Hellwig   iommu/dma: move t...
863
864
  static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  		int nents, enum dma_data_direction dir, unsigned long attrs)
0db2e5d18   Robin Murphy   iommu: Implement ...
865
  {
842fe519f   Robin Murphy   iommu/dma: Conver...
866
867
868
  	dma_addr_t start, end;
  	struct scatterlist *tmp;
  	int i;
06d60728f   Christoph Hellwig   iommu/dma: move t...
869

1b9614231   Nathan Chancellor   iommu/dma: Fix co...
870
  	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
06d60728f   Christoph Hellwig   iommu/dma: move t...
871
  		iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
0db2e5d18   Robin Murphy   iommu: Implement ...
872
873
874
875
  	/*
  	 * The scatterlist segments are mapped into a single
  	 * contiguous IOVA allocation, so this is incredibly easy.
  	 */
842fe519f   Robin Murphy   iommu/dma: Conver...
876
877
878
879
880
881
882
  	start = sg_dma_address(sg);
  	for_each_sg(sg_next(sg), tmp, nents - 1, i) {
  		if (sg_dma_len(tmp) == 0)
  			break;
  		sg = tmp;
  	}
  	end = sg_dma_address(sg) + sg_dma_len(sg);
b61d271e5   Robin Murphy   iommu/dma: Move d...
883
  	__iommu_dma_unmap(dev, start, end - start);
0db2e5d18   Robin Murphy   iommu: Implement ...
884
  }
06d60728f   Christoph Hellwig   iommu/dma: move t...
885
  static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
51f8cc9e8   Robin Murphy   iommu/dma: Implem...
886
887
888
  		size_t size, enum dma_data_direction dir, unsigned long attrs)
  {
  	return __iommu_dma_map(dev, phys, size,
6e2350207   Tom Murphy   iommu/dma-iommu: ...
889
890
  			dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
  			dma_get_mask(dev));
51f8cc9e8   Robin Murphy   iommu/dma: Implem...
891
  }
06d60728f   Christoph Hellwig   iommu/dma: move t...
892
  static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
51f8cc9e8   Robin Murphy   iommu/dma: Implem...
893
894
  		size_t size, enum dma_data_direction dir, unsigned long attrs)
  {
b61d271e5   Robin Murphy   iommu/dma: Move d...
895
  	__iommu_dma_unmap(dev, handle, size);
51f8cc9e8   Robin Murphy   iommu/dma: Implem...
896
  }
8553f6e65   Robin Murphy   iommu/dma: Split ...
897
  static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
bcf4b9c4c   Robin Murphy   iommu/dma: Refact...
898
899
900
901
  {
  	size_t alloc_size = PAGE_ALIGN(size);
  	int count = alloc_size >> PAGE_SHIFT;
  	struct page *page = NULL, **pages = NULL;
bcf4b9c4c   Robin Murphy   iommu/dma: Refact...
902
  	/* Non-coherent atomic allocation? Easy */
e6475eb01   Christoph Hellwig   iommu/dma: Don't ...
903
  	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
c84dc6e68   David Rientjes   dma-pool: add add...
904
  	    dma_free_from_pool(dev, cpu_addr, alloc_size))
bcf4b9c4c   Robin Murphy   iommu/dma: Refact...
905
  		return;
e6475eb01   Christoph Hellwig   iommu/dma: Don't ...
906
  	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
bcf4b9c4c   Robin Murphy   iommu/dma: Refact...
907
908
909
910
  		/*
  		 * If it the address is remapped, then it's either non-coherent
  		 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
  		 */
5cf453797   Christoph Hellwig   dma-mapping: intr...
911
  		pages = dma_common_find_pages(cpu_addr);
bcf4b9c4c   Robin Murphy   iommu/dma: Refact...
912
913
  		if (!pages)
  			page = vmalloc_to_page(cpu_addr);
512317401   Christoph Hellwig   dma-mapping: alwa...
914
  		dma_common_free_remap(cpu_addr, alloc_size);
bcf4b9c4c   Robin Murphy   iommu/dma: Refact...
915
916
917
918
919
920
921
  	} else {
  		/* Lowmem means a coherent atomic or CMA allocation */
  		page = virt_to_page(cpu_addr);
  	}
  
  	if (pages)
  		__iommu_dma_free_pages(pages, count);
591fcf3b3   Nicolin Chen   iommu/dma: Apply ...
922
923
  	if (page)
  		dma_free_contiguous(dev, page, alloc_size);
bcf4b9c4c   Robin Murphy   iommu/dma: Refact...
924
  }
8553f6e65   Robin Murphy   iommu/dma: Split ...
925
926
927
928
929
930
  static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
  		dma_addr_t handle, unsigned long attrs)
  {
  	__iommu_dma_unmap(dev, handle, size);
  	__iommu_dma_free(dev, size, cpu_addr);
  }
ee1ef05d0   Christoph Hellwig   iommu/dma: Refact...
931
932
  static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
  		struct page **pagep, gfp_t gfp, unsigned long attrs)
06d60728f   Christoph Hellwig   iommu/dma: move t...
933
934
  {
  	bool coherent = dev_is_dma_coherent(dev);
9ad5d6edd   Robin Murphy   iommu/dma: Cleanu...
935
  	size_t alloc_size = PAGE_ALIGN(size);
90ae409f9   Christoph Hellwig   dma-direct: fix z...
936
  	int node = dev_to_node(dev);
9a4ab94af   Christoph Hellwig   iommu/dma: Merge ...
937
  	struct page *page = NULL;
9ad5d6edd   Robin Murphy   iommu/dma: Cleanu...
938
  	void *cpu_addr;
06d60728f   Christoph Hellwig   iommu/dma: move t...
939

591fcf3b3   Nicolin Chen   iommu/dma: Apply ...
940
  	page = dma_alloc_contiguous(dev, alloc_size, gfp);
072bebc06   Robin Murphy   iommu/dma: Refact...
941
  	if (!page)
90ae409f9   Christoph Hellwig   dma-direct: fix z...
942
943
  		page = alloc_pages_node(node, gfp, get_order(alloc_size));
  	if (!page)
072bebc06   Robin Murphy   iommu/dma: Refact...
944
  		return NULL;
e6475eb01   Christoph Hellwig   iommu/dma: Don't ...
945
  	if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
33dcb37ce   Christoph Hellwig   dma-mapping: fix ...
946
  		pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
072bebc06   Robin Murphy   iommu/dma: Refact...
947

9ad5d6edd   Robin Murphy   iommu/dma: Cleanu...
948
  		cpu_addr = dma_common_contiguous_remap(page, alloc_size,
512317401   Christoph Hellwig   dma-mapping: alwa...
949
  				prot, __builtin_return_address(0));
9ad5d6edd   Robin Murphy   iommu/dma: Cleanu...
950
  		if (!cpu_addr)
ee1ef05d0   Christoph Hellwig   iommu/dma: Refact...
951
  			goto out_free_pages;
8680aa5a5   Robin Murphy   iommu/dma: Don't ...
952
953
  
  		if (!coherent)
9ad5d6edd   Robin Murphy   iommu/dma: Cleanu...
954
  			arch_dma_prep_coherent(page, size);
8680aa5a5   Robin Murphy   iommu/dma: Don't ...
955
  	} else {
9ad5d6edd   Robin Murphy   iommu/dma: Cleanu...
956
  		cpu_addr = page_address(page);
8680aa5a5   Robin Murphy   iommu/dma: Don't ...
957
  	}
ee1ef05d0   Christoph Hellwig   iommu/dma: Refact...
958
959
  
  	*pagep = page;
9ad5d6edd   Robin Murphy   iommu/dma: Cleanu...
960
961
  	memset(cpu_addr, 0, alloc_size);
  	return cpu_addr;
072bebc06   Robin Murphy   iommu/dma: Refact...
962
  out_free_pages:
591fcf3b3   Nicolin Chen   iommu/dma: Apply ...
963
  	dma_free_contiguous(dev, page, alloc_size);
072bebc06   Robin Murphy   iommu/dma: Refact...
964
  	return NULL;
06d60728f   Christoph Hellwig   iommu/dma: move t...
965
  }
ee1ef05d0   Christoph Hellwig   iommu/dma: Refact...
966
967
968
969
970
971
972
973
974
  static void *iommu_dma_alloc(struct device *dev, size_t size,
  		dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
  {
  	bool coherent = dev_is_dma_coherent(dev);
  	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
  	struct page *page = NULL;
  	void *cpu_addr;
  
  	gfp |= __GFP_ZERO;
e6475eb01   Christoph Hellwig   iommu/dma: Don't ...
975
  	if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
e8d39a903   Christoph Hellwig   dma-iommu: implem...
976
977
978
979
  	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
  		return iommu_dma_alloc_remap(dev, size, handle, gfp,
  				dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
  	}
ee1ef05d0   Christoph Hellwig   iommu/dma: Refact...
980

e6475eb01   Christoph Hellwig   iommu/dma: Don't ...
981
982
  	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
  	    !gfpflags_allow_blocking(gfp) && !coherent)
9420139f5   Christoph Hellwig   dma-pool: fix coh...
983
984
  		page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
  					       gfp, NULL);
ee1ef05d0   Christoph Hellwig   iommu/dma: Refact...
985
986
987
988
  	else
  		cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
  	if (!cpu_addr)
  		return NULL;
6e2350207   Tom Murphy   iommu/dma-iommu: ...
989
990
  	*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
  			dev->coherent_dma_mask);
ee1ef05d0   Christoph Hellwig   iommu/dma: Refact...
991
992
993
994
995
996
997
  	if (*handle == DMA_MAPPING_ERROR) {
  		__iommu_dma_free(dev, size, cpu_addr);
  		return NULL;
  	}
  
  	return cpu_addr;
  }
e8d39a903   Christoph Hellwig   dma-iommu: implem...
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
  #ifdef CONFIG_DMA_REMAP
  static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size,
  		dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp)
  {
  	if (!gfpflags_allow_blocking(gfp)) {
  		struct page *page;
  
  		page = dma_common_alloc_pages(dev, size, handle, dir, gfp);
  		if (!page)
  			return NULL;
  		return page_address(page);
  	}
  
  	return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO,
  				     PAGE_KERNEL, 0);
  }
  
  static void iommu_dma_free_noncoherent(struct device *dev, size_t size,
  		void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir)
  {
  	__iommu_dma_unmap(dev, handle, size);
  	__iommu_dma_free(dev, size, cpu_addr);
  }
  #else
  #define iommu_dma_alloc_noncoherent		NULL
  #define iommu_dma_free_noncoherent		NULL
  #endif /* CONFIG_DMA_REMAP */
06d60728f   Christoph Hellwig   iommu/dma: move t...
1025
1026
1027
1028
1029
  static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
  		void *cpu_addr, dma_addr_t dma_addr, size_t size,
  		unsigned long attrs)
  {
  	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
efd9f10b7   Christoph Hellwig   iommu/dma: Refact...
1030
  	unsigned long pfn, off = vma->vm_pgoff;
06d60728f   Christoph Hellwig   iommu/dma: move t...
1031
  	int ret;
33dcb37ce   Christoph Hellwig   dma-mapping: fix ...
1032
  	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
06d60728f   Christoph Hellwig   iommu/dma: move t...
1033
1034
1035
1036
1037
1038
  
  	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
  		return ret;
  
  	if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
  		return -ENXIO;
e6475eb01   Christoph Hellwig   iommu/dma: Don't ...
1039
  	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
5cf453797   Christoph Hellwig   dma-mapping: intr...
1040
  		struct page **pages = dma_common_find_pages(cpu_addr);
06d60728f   Christoph Hellwig   iommu/dma: move t...
1041

efd9f10b7   Christoph Hellwig   iommu/dma: Refact...
1042
1043
1044
1045
1046
  		if (pages)
  			return __iommu_dma_mmap(pages, size, vma);
  		pfn = vmalloc_to_pfn(cpu_addr);
  	} else {
  		pfn = page_to_pfn(virt_to_page(cpu_addr));
06d60728f   Christoph Hellwig   iommu/dma: move t...
1047
  	}
efd9f10b7   Christoph Hellwig   iommu/dma: Refact...
1048
1049
1050
  	return remap_pfn_range(vma, vma->vm_start, pfn + off,
  			       vma->vm_end - vma->vm_start,
  			       vma->vm_page_prot);
06d60728f   Christoph Hellwig   iommu/dma: move t...
1051
  }
06d60728f   Christoph Hellwig   iommu/dma: move t...
1052
1053
1054
1055
  static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
  		void *cpu_addr, dma_addr_t dma_addr, size_t size,
  		unsigned long attrs)
  {
3fb3378bb   Christoph Hellwig   iommu/dma: Refact...
1056
1057
  	struct page *page;
  	int ret;
06d60728f   Christoph Hellwig   iommu/dma: move t...
1058

e6475eb01   Christoph Hellwig   iommu/dma: Don't ...
1059
  	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
5cf453797   Christoph Hellwig   dma-mapping: intr...
1060
  		struct page **pages = dma_common_find_pages(cpu_addr);
06d60728f   Christoph Hellwig   iommu/dma: move t...
1061

3fb3378bb   Christoph Hellwig   iommu/dma: Refact...
1062
1063
1064
1065
1066
1067
1068
1069
1070
  		if (pages) {
  			return sg_alloc_table_from_pages(sgt, pages,
  					PAGE_ALIGN(size) >> PAGE_SHIFT,
  					0, size, GFP_KERNEL);
  		}
  
  		page = vmalloc_to_page(cpu_addr);
  	} else {
  		page = virt_to_page(cpu_addr);
06d60728f   Christoph Hellwig   iommu/dma: move t...
1071
  	}
3fb3378bb   Christoph Hellwig   iommu/dma: Refact...
1072
1073
1074
1075
  	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
  	if (!ret)
  		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
  	return ret;
06d60728f   Christoph Hellwig   iommu/dma: move t...
1076
  }
158a6d3ce   Yoshihiro Shimoda   iommu/dma: add a ...
1077
1078
1079
1080
1081
1082
  static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
  {
  	struct iommu_domain *domain = iommu_get_dma_domain(dev);
  
  	return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
  }
06d60728f   Christoph Hellwig   iommu/dma: move t...
1083
1084
1085
  static const struct dma_map_ops iommu_dma_ops = {
  	.alloc			= iommu_dma_alloc,
  	.free			= iommu_dma_free,
efa70f2fd   Christoph Hellwig   dma-mapping: add ...
1086
1087
  	.alloc_pages		= dma_common_alloc_pages,
  	.free_pages		= dma_common_free_pages,
e8d39a903   Christoph Hellwig   dma-iommu: implem...
1088
1089
  	.alloc_noncoherent	= iommu_dma_alloc_noncoherent,
  	.free_noncoherent	= iommu_dma_free_noncoherent,
06d60728f   Christoph Hellwig   iommu/dma: move t...
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
  	.mmap			= iommu_dma_mmap,
  	.get_sgtable		= iommu_dma_get_sgtable,
  	.map_page		= iommu_dma_map_page,
  	.unmap_page		= iommu_dma_unmap_page,
  	.map_sg			= iommu_dma_map_sg,
  	.unmap_sg		= iommu_dma_unmap_sg,
  	.sync_single_for_cpu	= iommu_dma_sync_single_for_cpu,
  	.sync_single_for_device	= iommu_dma_sync_single_for_device,
  	.sync_sg_for_cpu	= iommu_dma_sync_sg_for_cpu,
  	.sync_sg_for_device	= iommu_dma_sync_sg_for_device,
  	.map_resource		= iommu_dma_map_resource,
  	.unmap_resource		= iommu_dma_unmap_resource,
158a6d3ce   Yoshihiro Shimoda   iommu/dma: add a ...
1102
  	.get_merge_boundary	= iommu_dma_get_merge_boundary,
06d60728f   Christoph Hellwig   iommu/dma: move t...
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
  };
  
  /*
   * The IOMMU core code allocates the default DMA domain, which the underlying
   * IOMMU driver needs to support via the dma-iommu layer.
   */
  void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
  {
  	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
  
  	if (!domain)
  		goto out_err;
  
  	/*
  	 * The IOMMU core code allocates the default DMA domain, which the
  	 * underlying IOMMU driver needs to support via the dma-iommu layer.
  	 */
  	if (domain->type == IOMMU_DOMAIN_DMA) {
  		if (iommu_dma_init_domain(domain, dma_base, size, dev))
  			goto out_err;
  		dev->dma_ops = &iommu_dma_ops;
  	}
  
  	return;
  out_err:
  	 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops
  ",
  		 dev_name(dev));
51f8cc9e8   Robin Murphy   iommu/dma: Implem...
1131
  }
44bb7e243   Robin Murphy   iommu/dma: Add su...
1132
1133
1134
1135
1136
  static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
  		phys_addr_t msi_addr, struct iommu_domain *domain)
  {
  	struct iommu_dma_cookie *cookie = domain->iova_cookie;
  	struct iommu_dma_msi_page *msi_page;
842fe519f   Robin Murphy   iommu/dma: Conver...
1137
  	dma_addr_t iova;
44bb7e243   Robin Murphy   iommu/dma: Add su...
1138
  	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
fdbe574eb   Robin Murphy   iommu/dma: Allow ...
1139
  	size_t size = cookie_msi_granule(cookie);
44bb7e243   Robin Murphy   iommu/dma: Add su...
1140

fdbe574eb   Robin Murphy   iommu/dma: Allow ...
1141
  	msi_addr &= ~(phys_addr_t)(size - 1);
44bb7e243   Robin Murphy   iommu/dma: Add su...
1142
1143
1144
  	list_for_each_entry(msi_page, &cookie->msi_page_list, list)
  		if (msi_page->phys == msi_addr)
  			return msi_page;
c18647900   Robin Murphy   iommu/dma: Relax ...
1145
  	msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
44bb7e243   Robin Murphy   iommu/dma: Add su...
1146
1147
  	if (!msi_page)
  		return NULL;
8af23fad6   Robin Murphy   iommu/dma: Handle...
1148
1149
  	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
  	if (!iova)
a44e66575   Robin Murphy   iommu/dma: Clean ...
1150
  		goto out_free_page;
44bb7e243   Robin Murphy   iommu/dma: Add su...
1151

8af23fad6   Robin Murphy   iommu/dma: Handle...
1152
1153
  	if (iommu_map(domain, iova, msi_addr, size, prot))
  		goto out_free_iova;
44bb7e243   Robin Murphy   iommu/dma: Add su...
1154
  	INIT_LIST_HEAD(&msi_page->list);
a44e66575   Robin Murphy   iommu/dma: Clean ...
1155
1156
  	msi_page->phys = msi_addr;
  	msi_page->iova = iova;
44bb7e243   Robin Murphy   iommu/dma: Add su...
1157
1158
  	list_add(&msi_page->list, &cookie->msi_page_list);
  	return msi_page;
8af23fad6   Robin Murphy   iommu/dma: Handle...
1159
1160
  out_free_iova:
  	iommu_dma_free_iova(cookie, iova, size);
44bb7e243   Robin Murphy   iommu/dma: Add su...
1161
1162
1163
1164
  out_free_page:
  	kfree(msi_page);
  	return NULL;
  }
ece6e6f02   Julien Grall   iommu/dma-iommu: ...
1165
  int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
44bb7e243   Robin Murphy   iommu/dma: Add su...
1166
  {
ece6e6f02   Julien Grall   iommu/dma-iommu: ...
1167
  	struct device *dev = msi_desc_to_dev(desc);
44bb7e243   Robin Murphy   iommu/dma: Add su...
1168
  	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
44bb7e243   Robin Murphy   iommu/dma: Add su...
1169
  	struct iommu_dma_msi_page *msi_page;
c18647900   Robin Murphy   iommu/dma: Relax ...
1170
  	static DEFINE_MUTEX(msi_prepare_lock); /* see below */
44bb7e243   Robin Murphy   iommu/dma: Add su...
1171

ece6e6f02   Julien Grall   iommu/dma-iommu: ...
1172
1173
1174
1175
  	if (!domain || !domain->iova_cookie) {
  		desc->iommu_cookie = NULL;
  		return 0;
  	}
44bb7e243   Robin Murphy   iommu/dma: Add su...
1176

44bb7e243   Robin Murphy   iommu/dma: Add su...
1177
  	/*
c18647900   Robin Murphy   iommu/dma: Relax ...
1178
1179
1180
  	 * In fact the whole prepare operation should already be serialised by
  	 * irq_domain_mutex further up the callchain, but that's pretty subtle
  	 * on its own, so consider this locking as failsafe documentation...
44bb7e243   Robin Murphy   iommu/dma: Add su...
1181
  	 */
c18647900   Robin Murphy   iommu/dma: Relax ...
1182
  	mutex_lock(&msi_prepare_lock);
44bb7e243   Robin Murphy   iommu/dma: Add su...
1183
  	msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
c18647900   Robin Murphy   iommu/dma: Relax ...
1184
  	mutex_unlock(&msi_prepare_lock);
44bb7e243   Robin Murphy   iommu/dma: Add su...
1185

ece6e6f02   Julien Grall   iommu/dma-iommu: ...
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
  	msi_desc_set_iommu_cookie(desc, msi_page);
  
  	if (!msi_page)
  		return -ENOMEM;
  	return 0;
  }
  
  void iommu_dma_compose_msi_msg(struct msi_desc *desc,
  			       struct msi_msg *msg)
  {
  	struct device *dev = msi_desc_to_dev(desc);
  	const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
  	const struct iommu_dma_msi_page *msi_page;
  
  	msi_page = msi_desc_get_iommu_cookie(desc);
  
  	if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
  		return;
  
  	msg->address_hi = upper_32_bits(msi_page->iova);
  	msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
  	msg->address_lo += lower_32_bits(msi_page->iova);
44bb7e243   Robin Murphy   iommu/dma: Add su...
1208
  }
06d60728f   Christoph Hellwig   iommu/dma: move t...
1209
1210
1211
1212
  
  static int iommu_dma_init(void)
  {
  	return iova_cache_get();
44bb7e243   Robin Murphy   iommu/dma: Add su...
1213
  }
06d60728f   Christoph Hellwig   iommu/dma: move t...
1214
  arch_initcall(iommu_dma_init);