Blame view

kernel/dma/direct.c 11.8 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
a8463d4b0   Christian Borntraeger   dma: Provide simp...
2
  /*
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
3
4
5
   * Copyright (C) 2018 Christoph Hellwig.
   *
   * DMA operations that map physical memory directly without using an IOMMU.
a8463d4b0   Christian Borntraeger   dma: Provide simp...
6
   */
57c8a661d   Mike Rapoport   mm: remove includ...
7
  #include <linux/memblock.h> /* for max_pfn */
a8463d4b0   Christian Borntraeger   dma: Provide simp...
8
9
  #include <linux/export.h>
  #include <linux/mm.h>
2e86a0478   Christoph Hellwig   dma-direct: use p...
10
  #include <linux/dma-direct.h>
a8463d4b0   Christian Borntraeger   dma: Provide simp...
11
  #include <linux/scatterlist.h>
080321d3b   Christoph Hellwig   dma-direct: add s...
12
  #include <linux/dma-contiguous.h>
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
13
  #include <linux/dma-noncoherent.h>
25f1e1887   Vladimir Murzin   dma: Take into ac...
14
  #include <linux/pfn.h>
c10f07aa2   Christoph Hellwig   dma/direct: Handl...
15
  #include <linux/set_memory.h>
55897af63   Christoph Hellwig   dma-direct: merge...
16
  #include <linux/swiotlb.h>
a8463d4b0   Christian Borntraeger   dma: Provide simp...
17

c61e96373   Christoph Hellwig   dma-direct: add s...
18
19
20
21
22
23
24
  /*
   * Most architectures use ZONE_DMA for the first 16 Megabytes, but
   * some use it for entirely different regions:
   */
  #ifndef ARCH_ZONE_DMA_BITS
  #define ARCH_ZONE_DMA_BITS 24
  #endif
58dfd4ac0   Christoph Hellwig   dma-direct: impro...
25
  static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
279759699   Christoph Hellwig   dma-direct: add d...
26
  {
58dfd4ac0   Christoph Hellwig   dma-direct: impro...
27
28
29
30
31
32
33
34
  	if (!dev->dma_mask) {
  		dev_err_once(dev, "DMA map on device without dma_mask
  ");
  	} else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
  		dev_err_once(dev,
  			"overflow %pad+%zu of DMA mask %llx bus mask %llx
  ",
  			&dma_addr, size, *dev->dma_mask, dev->bus_dma_mask);
279759699   Christoph Hellwig   dma-direct: add d...
35
  	}
58dfd4ac0   Christoph Hellwig   dma-direct: impro...
36
  	WARN_ON_ONCE(1);
279759699   Christoph Hellwig   dma-direct: add d...
37
  }
a20bb0583   Christoph Hellwig   dma-direct: add a...
38
39
40
  static inline dma_addr_t phys_to_dma_direct(struct device *dev,
  		phys_addr_t phys)
  {
9087c3758   Tom Lendacky   dma-direct: Force...
41
  	if (force_dma_unencrypted(dev))
a20bb0583   Christoph Hellwig   dma-direct: add a...
42
43
44
45
46
47
48
49
50
51
  		return __phys_to_dma(dev, phys);
  	return phys_to_dma(dev, phys);
  }
  
  u64 dma_direct_get_required_mask(struct device *dev)
  {
  	u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
  
  	return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
  }
7d21ee4c7   Christoph Hellwig   dma-direct: refin...
52
53
54
  static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
  		u64 *phys_mask)
  {
b4ebe6063   Christoph Hellwig   dma-direct: imple...
55
56
  	if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
  		dma_mask = dev->bus_dma_mask;
9087c3758   Tom Lendacky   dma-direct: Force...
57
  	if (force_dma_unencrypted(dev))
7d21ee4c7   Christoph Hellwig   dma-direct: refin...
58
59
60
  		*phys_mask = __dma_to_phys(dev, dma_mask);
  	else
  		*phys_mask = dma_to_phys(dev, dma_mask);
79ac32a42   Christoph Hellwig   dma-direct: docum...
61
62
63
64
65
66
67
68
  	/*
  	 * Optimistically try the zone that the physical address mask falls
  	 * into first.  If that returns memory that isn't actually addressable
  	 * we will fallback to the next lower zone and try again.
  	 *
  	 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
  	 * zones.
  	 */
7d21ee4c7   Christoph Hellwig   dma-direct: refin...
69
70
71
72
73
74
  	if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
  		return GFP_DMA;
  	if (*phys_mask <= DMA_BIT_MASK(32))
  		return GFP_DMA32;
  	return 0;
  }
95f183916   Christoph Hellwig   dma-direct: retry...
75
76
  static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
  {
a20bb0583   Christoph Hellwig   dma-direct: add a...
77
  	return phys_to_dma_direct(dev, phys) + size - 1 <=
b4ebe6063   Christoph Hellwig   dma-direct: imple...
78
  			min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
95f183916   Christoph Hellwig   dma-direct: retry...
79
  }
b18814e76   Christoph Hellwig   dma-direct: provi...
80
  struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
81
  		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
a8463d4b0   Christian Borntraeger   dma: Provide simp...
82
  {
90ae409f9   Christoph Hellwig   dma-direct: fix z...
83
84
  	size_t alloc_size = PAGE_ALIGN(size);
  	int node = dev_to_node(dev);
080321d3b   Christoph Hellwig   dma-direct: add s...
85
  	struct page *page = NULL;
7d21ee4c7   Christoph Hellwig   dma-direct: refin...
86
  	u64 phys_mask;
a8463d4b0   Christian Borntraeger   dma: Provide simp...
87

b9fd04262   Christoph Hellwig   dma-direct: respe...
88
89
  	if (attrs & DMA_ATTR_NO_WARN)
  		gfp |= __GFP_NOWARN;
e89f5b370   Christoph Hellwig   dma-mapping: Don'...
90
91
  	/* we always manually zero the memory once we are done: */
  	gfp &= ~__GFP_ZERO;
7d21ee4c7   Christoph Hellwig   dma-direct: refin...
92
93
  	gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
  			&phys_mask);
90ae409f9   Christoph Hellwig   dma-direct: fix z...
94
95
96
97
98
  	page = dma_alloc_contiguous(dev, alloc_size, gfp);
  	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
  		dma_free_contiguous(dev, page, alloc_size);
  		page = NULL;
  	}
95f183916   Christoph Hellwig   dma-direct: retry...
99
  again:
90ae409f9   Christoph Hellwig   dma-direct: fix z...
100
101
  	if (!page)
  		page = alloc_pages_node(node, gfp, get_order(alloc_size));
95f183916   Christoph Hellwig   dma-direct: retry...
102
  	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
b1d2dc009   Nicolin Chen   dma-contiguous: a...
103
  		dma_free_contiguous(dev, page, size);
95f183916   Christoph Hellwig   dma-direct: retry...
104
  		page = NULL;
de7eab301   Takashi Iwai   dma-direct: try r...
105
  		if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
7d21ee4c7   Christoph Hellwig   dma-direct: refin...
106
  		    phys_mask < DMA_BIT_MASK(64) &&
de7eab301   Takashi Iwai   dma-direct: try r...
107
108
109
110
  		    !(gfp & (GFP_DMA32 | GFP_DMA))) {
  			gfp |= GFP_DMA32;
  			goto again;
  		}
fbce251ba   Christoph Hellwig   dma-direct: we mi...
111
  		if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
95f183916   Christoph Hellwig   dma-direct: retry...
112
113
114
115
  			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
  			goto again;
  		}
  	}
b18814e76   Christoph Hellwig   dma-direct: provi...
116
117
118
119
120
121
122
123
124
125
  	return page;
  }
  
  void *dma_direct_alloc_pages(struct device *dev, size_t size,
  		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
  {
  	struct page *page;
  	void *ret;
  
  	page = __dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
080321d3b   Christoph Hellwig   dma-direct: add s...
126
127
  	if (!page)
  		return NULL;
b18814e76   Christoph Hellwig   dma-direct: provi...
128

cf14be0b4   Christoph Hellwig   dma-direct: fix D...
129
130
  	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
  	    !force_dma_unencrypted(dev)) {
d98849aff   Christoph Hellwig   dma-direct: handl...
131
132
133
  		/* remove any dirty cache lines on the kernel alias */
  		if (!PageHighMem(page))
  			arch_dma_prep_coherent(page, size);
cf14be0b4   Christoph Hellwig   dma-direct: fix D...
134
  		*dma_handle = phys_to_dma(dev, page_to_phys(page));
d98849aff   Christoph Hellwig   dma-direct: handl...
135
136
137
  		/* return the page pointer as the opaque cookie */
  		return page;
  	}
704f2c20e   Christoph Hellwig   dma-direct: rejec...
138
139
140
  	if (PageHighMem(page)) {
  		/*
  		 * Depending on the cma= arguments and per-arch setup
b1d2dc009   Nicolin Chen   dma-contiguous: a...
141
  		 * dma_alloc_contiguous could return highmem pages.
704f2c20e   Christoph Hellwig   dma-direct: rejec...
142
143
144
145
146
147
148
149
  		 * Without remapping there is no way to return them here,
  		 * so log an error and fail.
  		 */
  		dev_info(dev, "Rejecting highmem page from CMA.
  ");
  		__dma_direct_free_pages(dev, size, page);
  		return NULL;
  	}
c10f07aa2   Christoph Hellwig   dma/direct: Handl...
150
  	ret = page_address(page);
9087c3758   Tom Lendacky   dma-direct: Force...
151
  	if (force_dma_unencrypted(dev)) {
b18814e76   Christoph Hellwig   dma-direct: provi...
152
  		set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
c10f07aa2   Christoph Hellwig   dma/direct: Handl...
153
154
155
156
157
  		*dma_handle = __phys_to_dma(dev, page_to_phys(page));
  	} else {
  		*dma_handle = phys_to_dma(dev, page_to_phys(page));
  	}
  	memset(ret, 0, size);
c30700db9   Christoph Hellwig   dma-direct: provi...
158
159
  
  	if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
4b85faed2   Christoph Hellwig   dma-mapping: add ...
160
  	    dma_alloc_need_uncached(dev, attrs)) {
c30700db9   Christoph Hellwig   dma-direct: provi...
161
162
163
  		arch_dma_prep_coherent(page, size);
  		ret = uncached_kernel_address(ret);
  	}
c10f07aa2   Christoph Hellwig   dma/direct: Handl...
164
  	return ret;
a8463d4b0   Christian Borntraeger   dma: Provide simp...
165
  }
b18814e76   Christoph Hellwig   dma-direct: provi...
166
167
  void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
  {
b1d2dc009   Nicolin Chen   dma-contiguous: a...
168
  	dma_free_contiguous(dev, page, size);
b18814e76   Christoph Hellwig   dma-direct: provi...
169
  }
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
170
  void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
002e67454   Christoph Hellwig   dma-direct: renam...
171
  		dma_addr_t dma_addr, unsigned long attrs)
a8463d4b0   Christian Borntraeger   dma: Provide simp...
172
  {
c10f07aa2   Christoph Hellwig   dma/direct: Handl...
173
  	unsigned int page_order = get_order(size);
080321d3b   Christoph Hellwig   dma-direct: add s...
174

cf14be0b4   Christoph Hellwig   dma-direct: fix D...
175
176
  	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
  	    !force_dma_unencrypted(dev)) {
d98849aff   Christoph Hellwig   dma-direct: handl...
177
178
179
180
  		/* cpu_addr is a struct page cookie, not a kernel address */
  		__dma_direct_free_pages(dev, size, cpu_addr);
  		return;
  	}
9087c3758   Tom Lendacky   dma-direct: Force...
181
  	if (force_dma_unencrypted(dev))
c10f07aa2   Christoph Hellwig   dma/direct: Handl...
182
  		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
c30700db9   Christoph Hellwig   dma-direct: provi...
183
184
  
  	if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
4b85faed2   Christoph Hellwig   dma-mapping: add ...
185
  	    dma_alloc_need_uncached(dev, attrs))
c30700db9   Christoph Hellwig   dma-direct: provi...
186
  		cpu_addr = cached_kernel_address(cpu_addr);
b18814e76   Christoph Hellwig   dma-direct: provi...
187
  	__dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
a8463d4b0   Christian Borntraeger   dma: Provide simp...
188
  }
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
189
190
191
  void *dma_direct_alloc(struct device *dev, size_t size,
  		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
  {
c30700db9   Christoph Hellwig   dma-direct: provi...
192
  	if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
c2f2124e0   Christoph Hellwig   dma-direct: handl...
193
  	    dma_alloc_need_uncached(dev, attrs))
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
194
195
196
197
198
199
200
  		return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
  	return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
  }
  
  void dma_direct_free(struct device *dev, size_t size,
  		void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
  {
c30700db9   Christoph Hellwig   dma-direct: provi...
201
  	if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
c2f2124e0   Christoph Hellwig   dma-direct: handl...
202
  	    dma_alloc_need_uncached(dev, attrs))
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
203
204
205
206
  		arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
  	else
  		dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
  }
55897af63   Christoph Hellwig   dma-direct: merge...
207
208
209
  #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
      defined(CONFIG_SWIOTLB)
  void dma_direct_sync_single_for_device(struct device *dev,
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
210
211
  		dma_addr_t addr, size_t size, enum dma_data_direction dir)
  {
55897af63   Christoph Hellwig   dma-direct: merge...
212
213
214
215
216
217
218
  	phys_addr_t paddr = dma_to_phys(dev, addr);
  
  	if (unlikely(is_swiotlb_buffer(paddr)))
  		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
  
  	if (!dev_is_dma_coherent(dev))
  		arch_sync_dma_for_device(dev, paddr, size, dir);
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
219
  }
356da6d0c   Christoph Hellwig   dma-mapping: bypa...
220
  EXPORT_SYMBOL(dma_direct_sync_single_for_device);
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
221

55897af63   Christoph Hellwig   dma-direct: merge...
222
  void dma_direct_sync_sg_for_device(struct device *dev,
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
223
224
225
226
  		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
  {
  	struct scatterlist *sg;
  	int i;
55897af63   Christoph Hellwig   dma-direct: merge...
227
  	for_each_sg(sgl, sg, nents, i) {
449fa54d6   Fugang Duan   dma-direct: corre...
228
229
230
231
  		phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
  
  		if (unlikely(is_swiotlb_buffer(paddr)))
  			swiotlb_tbl_sync_single(dev, paddr, sg->length,
55897af63   Christoph Hellwig   dma-direct: merge...
232
  					dir, SYNC_FOR_DEVICE);
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
233

55897af63   Christoph Hellwig   dma-direct: merge...
234
  		if (!dev_is_dma_coherent(dev))
449fa54d6   Fugang Duan   dma-direct: corre...
235
  			arch_sync_dma_for_device(dev, paddr, sg->length,
55897af63   Christoph Hellwig   dma-direct: merge...
236
237
  					dir);
  	}
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
238
  }
356da6d0c   Christoph Hellwig   dma-mapping: bypa...
239
  EXPORT_SYMBOL(dma_direct_sync_sg_for_device);
17ac52471   Christoph Hellwig   dma-direct: use d...
240
  #endif
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
241
242
  
  #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
55897af63   Christoph Hellwig   dma-direct: merge...
243
244
245
      defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
      defined(CONFIG_SWIOTLB)
  void dma_direct_sync_single_for_cpu(struct device *dev,
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
246
247
  		dma_addr_t addr, size_t size, enum dma_data_direction dir)
  {
55897af63   Christoph Hellwig   dma-direct: merge...
248
249
250
251
252
253
254
255
256
  	phys_addr_t paddr = dma_to_phys(dev, addr);
  
  	if (!dev_is_dma_coherent(dev)) {
  		arch_sync_dma_for_cpu(dev, paddr, size, dir);
  		arch_sync_dma_for_cpu_all(dev);
  	}
  
  	if (unlikely(is_swiotlb_buffer(paddr)))
  		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
257
  }
356da6d0c   Christoph Hellwig   dma-mapping: bypa...
258
  EXPORT_SYMBOL(dma_direct_sync_single_for_cpu);
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
259

55897af63   Christoph Hellwig   dma-direct: merge...
260
  void dma_direct_sync_sg_for_cpu(struct device *dev,
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
261
262
263
264
  		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
  {
  	struct scatterlist *sg;
  	int i;
55897af63   Christoph Hellwig   dma-direct: merge...
265
  	for_each_sg(sgl, sg, nents, i) {
449fa54d6   Fugang Duan   dma-direct: corre...
266
  		phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
55897af63   Christoph Hellwig   dma-direct: merge...
267
  		if (!dev_is_dma_coherent(dev))
449fa54d6   Fugang Duan   dma-direct: corre...
268
269
270
271
  			arch_sync_dma_for_cpu(dev, paddr, sg->length, dir);
  
  		if (unlikely(is_swiotlb_buffer(paddr)))
  			swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
55897af63   Christoph Hellwig   dma-direct: merge...
272
273
  					SYNC_FOR_CPU);
  	}
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
274

55897af63   Christoph Hellwig   dma-direct: merge...
275
276
  	if (!dev_is_dma_coherent(dev))
  		arch_sync_dma_for_cpu_all(dev);
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
277
  }
356da6d0c   Christoph Hellwig   dma-mapping: bypa...
278
  EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
279

55897af63   Christoph Hellwig   dma-direct: merge...
280
  void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
281
282
  		size_t size, enum dma_data_direction dir, unsigned long attrs)
  {
55897af63   Christoph Hellwig   dma-direct: merge...
283
  	phys_addr_t phys = dma_to_phys(dev, addr);
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
284
285
  	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
  		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
55897af63   Christoph Hellwig   dma-direct: merge...
286
287
  
  	if (unlikely(is_swiotlb_buffer(phys)))
3fc1ca006   Lu Baolu   swiotlb: Split si...
288
  		swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
289
  }
356da6d0c   Christoph Hellwig   dma-mapping: bypa...
290
  EXPORT_SYMBOL(dma_direct_unmap_page);
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
291

55897af63   Christoph Hellwig   dma-direct: merge...
292
  void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
293
294
  		int nents, enum dma_data_direction dir, unsigned long attrs)
  {
55897af63   Christoph Hellwig   dma-direct: merge...
295
296
297
298
299
300
  	struct scatterlist *sg;
  	int i;
  
  	for_each_sg(sgl, sg, nents, i)
  		dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
  			     attrs);
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
301
  }
356da6d0c   Christoph Hellwig   dma-mapping: bypa...
302
  EXPORT_SYMBOL(dma_direct_unmap_sg);
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
303
  #endif
55897af63   Christoph Hellwig   dma-direct: merge...
304
305
306
307
  static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
  		size_t size)
  {
  	return swiotlb_force != SWIOTLB_FORCE &&
d7e02a931   Christoph Hellwig   dma-mapping: remo...
308
  		dma_capable(dev, dma_addr, size);
55897af63   Christoph Hellwig   dma-direct: merge...
309
  }
782e6769c   Christoph Hellwig   dma-mapping: prov...
310
  dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
002e67454   Christoph Hellwig   dma-direct: renam...
311
312
  		unsigned long offset, size_t size, enum dma_data_direction dir,
  		unsigned long attrs)
a8463d4b0   Christian Borntraeger   dma: Provide simp...
313
  {
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
314
315
  	phys_addr_t phys = page_to_phys(page) + offset;
  	dma_addr_t dma_addr = phys_to_dma(dev, phys);
279759699   Christoph Hellwig   dma-direct: add d...
316

55897af63   Christoph Hellwig   dma-direct: merge...
317
318
  	if (unlikely(!dma_direct_possible(dev, dma_addr, size)) &&
  	    !swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) {
58dfd4ac0   Christoph Hellwig   dma-direct: impro...
319
  		report_addr(dev, dma_addr, size);
b0cbeae49   Christoph Hellwig   dma-direct: remov...
320
  		return DMA_MAPPING_ERROR;
58dfd4ac0   Christoph Hellwig   dma-direct: impro...
321
  	}
bc3ec75de   Christoph Hellwig   dma-mapping: merg...
322

55897af63   Christoph Hellwig   dma-direct: merge...
323
324
  	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
  		arch_sync_dma_for_device(dev, phys, size, dir);
279759699   Christoph Hellwig   dma-direct: add d...
325
  	return dma_addr;
a8463d4b0   Christian Borntraeger   dma: Provide simp...
326
  }
356da6d0c   Christoph Hellwig   dma-mapping: bypa...
327
  EXPORT_SYMBOL(dma_direct_map_page);
a8463d4b0   Christian Borntraeger   dma: Provide simp...
328

782e6769c   Christoph Hellwig   dma-mapping: prov...
329
330
  int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
  		enum dma_data_direction dir, unsigned long attrs)
a8463d4b0   Christian Borntraeger   dma: Provide simp...
331
332
333
334
335
  {
  	int i;
  	struct scatterlist *sg;
  
  	for_each_sg(sgl, sg, nents, i) {
17ac52471   Christoph Hellwig   dma-direct: use d...
336
337
338
  		sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
  				sg->offset, sg->length, dir, attrs);
  		if (sg->dma_address == DMA_MAPPING_ERROR)
55897af63   Christoph Hellwig   dma-direct: merge...
339
  			goto out_unmap;
a8463d4b0   Christian Borntraeger   dma: Provide simp...
340
341
342
343
  		sg_dma_len(sg) = sg->length;
  	}
  
  	return nents;
55897af63   Christoph Hellwig   dma-direct: merge...
344
345
346
347
  
  out_unmap:
  	dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
  	return 0;
a8463d4b0   Christian Borntraeger   dma: Provide simp...
348
  }
356da6d0c   Christoph Hellwig   dma-mapping: bypa...
349
  EXPORT_SYMBOL(dma_direct_map_sg);
a8463d4b0   Christian Borntraeger   dma: Provide simp...
350

cfced7869   Christoph Hellwig   dma-mapping: remo...
351
352
353
354
  dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
  		size_t size, enum dma_data_direction dir, unsigned long attrs)
  {
  	dma_addr_t dma_addr = paddr;
b8444fa88   Christoph Hellwig   dma-direct: don't...
355
  	if (unlikely(!dma_capable(dev, dma_addr, size))) {
cfced7869   Christoph Hellwig   dma-mapping: remo...
356
357
358
359
360
361
362
  		report_addr(dev, dma_addr, size);
  		return DMA_MAPPING_ERROR;
  	}
  
  	return dma_addr;
  }
  EXPORT_SYMBOL(dma_direct_map_resource);
9d7a224b4   Christoph Hellwig   dma-direct: alway...
363
364
365
366
367
368
  /*
   * Because 32-bit DMA masks are so common we expect every architecture to be
   * able to satisfy them - either by not supporting more physical memory, or by
   * providing a ZONE_DMA32.  If neither is the case, the architecture needs to
   * use an IOMMU instead of the direct mapping.
   */
1a9777a8a   Christoph Hellwig   dma-direct: rejec...
369
370
  int dma_direct_supported(struct device *dev, u64 mask)
  {
9d7a224b4   Christoph Hellwig   dma-direct: alway...
371
372
373
374
375
376
377
378
  	u64 min_mask;
  
  	if (IS_ENABLED(CONFIG_ZONE_DMA))
  		min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS);
  	else
  		min_mask = DMA_BIT_MASK(32);
  
  	min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
c92a54cfa   Lendacky, Thomas   dma-direct: do no...
379
380
381
382
383
384
  	/*
  	 * This check needs to be against the actual bit mask value, so
  	 * use __phys_to_dma() here so that the SME encryption mask isn't
  	 * part of the check.
  	 */
  	return mask >= __phys_to_dma(dev, min_mask);
1a9777a8a   Christoph Hellwig   dma-direct: rejec...
385
  }
133d624b1   Joerg Roedel   dma: Introduce dm...
386
387
388
  
  size_t dma_direct_max_mapping_size(struct device *dev)
  {
133d624b1   Joerg Roedel   dma: Introduce dm...
389
  	/* If SWIOTLB is active, use its maximum mapping size */
a5008b59c   Christoph Hellwig   dma-direct: only ...
390
391
392
393
  	if (is_swiotlb_active() &&
  	    (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE))
  		return swiotlb_max_mapping_size(dev);
  	return SIZE_MAX;
133d624b1   Joerg Roedel   dma: Introduce dm...
394
  }