Blame view

lib/swiotlb.c 29.7 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
  /*
   * Dynamic DMA mapping support.
   *
563aaf064   Jan Beulich   [IA64] swiotlb cl...
4
   * This implementation is a fallback for platforms that do not support
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
5
6
7
8
9
10
11
12
13
   * I/O TLBs (aka DMA address translation hardware).
   * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
   * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
   * Copyright (C) 2000, 2003 Hewlett-Packard Co
   *	David Mosberger-Tang <davidm@hpl.hp.com>
   *
   * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
   * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
   *			unnecessary i-cache flushing.
569c8bf5d   John W. Linville   [PATCH] swiotlb: ...
14
15
16
   * 04/07/.. ak		Better overflow handling. Assorted fixes.
   * 05/09/10 linville	Add support for syncing ranges, support syncing for
   *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
fb05a3792   Becky Bruce   swiotlb: add supp...
17
   * 08/12/11 beckyb	Add highmem support
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
18
19
20
   */
  
  #include <linux/cache.h>
17e5ad6c0   Tony Luck   [PATCH] Removed r...
21
  #include <linux/dma-mapping.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
22
  #include <linux/mm.h>
8bc3bcc93   Paul Gortmaker   lib: reduce the u...
23
  #include <linux/export.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
24
25
  #include <linux/spinlock.h>
  #include <linux/string.h>
0016fdee9   Ian Campbell   swiotlb: move som...
26
  #include <linux/swiotlb.h>
fb05a3792   Becky Bruce   swiotlb: add supp...
27
  #include <linux/pfn.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
28
29
  #include <linux/types.h>
  #include <linux/ctype.h>
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
30
  #include <linux/highmem.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
31
  #include <linux/gfp.h>
84be456f8   Christoph Hellwig   remove <asm/scatt...
32
  #include <linux/scatterlist.h>
c7753208a   Tom Lendacky   x86, swiotlb: Add...
33
  #include <linux/mem_encrypt.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
34
35
  
  #include <asm/io.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
36
37
38
39
  #include <asm/dma.h>
  
  #include <linux/init.h>
  #include <linux/bootmem.h>
a85225092   FUJITA Tomonori   swiotlb: use iomm...
40
  #include <linux/iommu-helper.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
41

ce5be5a16   Thierry Reding   tracing/events: F...
42
  #define CREATE_TRACE_POINTS
2b2b614dd   Zoltan Kiss   tracing/events: A...
43
  #include <trace/events/swiotlb.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
44
45
  #define OFFSET(val,align) ((unsigned long)	\
  	                   ( (val) & ( (align) - 1)))
0b9afede3   Alex Williamson   [IA64] more robus...
46
47
48
49
50
51
52
53
  #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
  
  /*
   * Minimum IO TLB size to bother booting with.  Systems with mainly
   * 64bit capable cards will only lightly use the swiotlb.  If we can't
   * allocate a contiguous 1MB, we're probably in trouble anyway.
   */
  #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
ae7871be1   Geert Uytterhoeven   swiotlb: Convert ...
54
  enum swiotlb_force swiotlb_force;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
55
56
  
  /*
bfc5501f6   Konrad Rzeszutek Wilk   swiotlb: Make int...
57
58
   * Used to do a quick range check in swiotlb_tbl_unmap_single and
   * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
59
60
   * API.
   */
ff7204a74   Alexander Duyck   swiotlb: Make io_...
61
  static phys_addr_t io_tlb_start, io_tlb_end;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
62
63
  
  /*
b595076a1   Uwe Kleine-König   tree-wide: fix co...
64
   * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
65
66
67
68
69
70
71
72
   * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
   */
  static unsigned long io_tlb_nslabs;
  
  /*
   * When the IOMMU overflows we return a fallback buffer. This sets the size.
   */
  static unsigned long io_tlb_overflow = 32*1024;
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
73
  static phys_addr_t io_tlb_overflow_buffer;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
74
75
76
77
78
79
80
81
82
  
  /*
   * This is a free list describing the number of free entries available from
   * each index
   */
  static unsigned int *io_tlb_list;
  static unsigned int io_tlb_index;
  
  /*
7453c549f   Konrad Rzeszutek Wilk   swiotlb: Export s...
83
84
85
86
87
88
   * Max segment that we can provide which (if pages are contingous) will
   * not be bounced (unless SWIOTLB_FORCE is set).
   */
  unsigned int max_segment;
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
89
90
91
   * We need to save away the original address corresponding to a mapped entry
   * for the sync operations.
   */
8e0629c1d   Jan Beulich   swiotlb: don't as...
92
  #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
bc40ac669   Becky Bruce   swiotlb: store ph...
93
  static phys_addr_t *io_tlb_orig_addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
94
95
96
97
98
  
  /*
   * Protect the above data structures in the map and unmap calls
   */
  static DEFINE_SPINLOCK(io_tlb_lock);
5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
99
  static int late_alloc;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
100
101
102
103
  static int __init
  setup_io_tlb_npages(char *str)
  {
  	if (isdigit(*str)) {
e8579e72c   Alex Williamson   [IA64, X86_64] fi...
104
  		io_tlb_nslabs = simple_strtoul(str, &str, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
105
106
107
108
109
  		/* avoid tail segment of size < IO_TLB_SEGSIZE */
  		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
  	}
  	if (*str == ',')
  		++str;
fff5d9922   Geert Uytterhoeven   swiotlb: Add swio...
110
  	if (!strcmp(str, "force")) {
ae7871be1   Geert Uytterhoeven   swiotlb: Convert ...
111
  		swiotlb_force = SWIOTLB_FORCE;
fff5d9922   Geert Uytterhoeven   swiotlb: Add swio...
112
113
114
115
  	} else if (!strcmp(str, "noforce")) {
  		swiotlb_force = SWIOTLB_NO_FORCE;
  		io_tlb_nslabs = 1;
  	}
b18485e7a   FUJITA Tomonori   swiotlb: Remove t...
116

c729de8fc   Yinghai Lu   x86, kdump: Set c...
117
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
118
  }
c729de8fc   Yinghai Lu   x86, kdump: Set c...
119
  early_param("swiotlb", setup_io_tlb_npages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
120
  /* make io_tlb_overflow tunable too? */
f21ffe9f6   Konrad Rzeszutek Wilk   swiotlb: Expose s...
121
  unsigned long swiotlb_nr_tbl(void)
5f98ecdbc   FUJITA Tomonori   swiotlb: Export s...
122
123
124
  {
  	return io_tlb_nslabs;
  }
f21ffe9f6   Konrad Rzeszutek Wilk   swiotlb: Expose s...
125
  EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
c729de8fc   Yinghai Lu   x86, kdump: Set c...
126

7453c549f   Konrad Rzeszutek Wilk   swiotlb: Export s...
127
128
129
130
131
132
133
134
135
136
137
138
139
  unsigned int swiotlb_max_segment(void)
  {
  	return max_segment;
  }
  EXPORT_SYMBOL_GPL(swiotlb_max_segment);
  
  void swiotlb_set_max_segment(unsigned int val)
  {
  	if (swiotlb_force == SWIOTLB_FORCE)
  		max_segment = 1;
  	else
  		max_segment = rounddown(val, PAGE_SIZE);
  }
c729de8fc   Yinghai Lu   x86, kdump: Set c...
140
141
142
143
144
145
146
147
148
149
  /* default to 64MB */
  #define IO_TLB_DEFAULT_SIZE (64UL<<20)
  unsigned long swiotlb_size_or_default(void)
  {
  	unsigned long size;
  
  	size = io_tlb_nslabs << IO_TLB_SHIFT;
  
  	return size ? size : (IO_TLB_DEFAULT_SIZE);
  }
c7753208a   Tom Lendacky   x86, swiotlb: Add...
150
151
152
153
154
155
156
157
  void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { }
  
  /* For swiotlb, clear memory encryption mask from dma addresses */
  static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
  				      phys_addr_t address)
  {
  	return __sme_clr(phys_to_dma(hwdev, address));
  }
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
158
  /* Note that this doesn't work with highmem page */
70a7d3cc1   Jeremy Fitzhardinge   swiotlb: add hwde...
159
160
  static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
  				      volatile void *address)
e08e1f7ad   Ian Campbell   swiotlb: allow ar...
161
  {
862d196b2   FUJITA Tomonori   swiotlb: use phys...
162
  	return phys_to_dma(hwdev, virt_to_phys(address));
e08e1f7ad   Ian Campbell   swiotlb: allow ar...
163
  }
ac2cbab21   Yinghai Lu   x86: Don't panic ...
164
  static bool no_iotlb_memory;
ad32e8cb8   FUJITA Tomonori   swiotlb: Defer sw...
165
  void swiotlb_print_info(void)
2e5b2b86b   Ian Campbell   swiotlb: consolid...
166
  {
ad32e8cb8   FUJITA Tomonori   swiotlb: Defer sw...
167
  	unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
ff7204a74   Alexander Duyck   swiotlb: Make io_...
168
  	unsigned char *vstart, *vend;
2e5b2b86b   Ian Campbell   swiotlb: consolid...
169

ac2cbab21   Yinghai Lu   x86: Don't panic ...
170
171
172
173
174
  	if (no_iotlb_memory) {
  		pr_warn("software IO TLB: No low mem
  ");
  		return;
  	}
ff7204a74   Alexander Duyck   swiotlb: Make io_...
175
  	vstart = phys_to_virt(io_tlb_start);
c40dba06e   Alexander Duyck   swiotlb: Make io_...
176
  	vend = phys_to_virt(io_tlb_end);
2e5b2b86b   Ian Campbell   swiotlb: consolid...
177

3af684c7c   Bjorn Helgaas   swiotlb: print ph...
178
179
  	printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]
  ",
ff7204a74   Alexander Duyck   swiotlb: Make io_...
180
  	       (unsigned long long)io_tlb_start,
c40dba06e   Alexander Duyck   swiotlb: Make io_...
181
  	       (unsigned long long)io_tlb_end,
ff7204a74   Alexander Duyck   swiotlb: Make io_...
182
  	       bytes >> 20, vstart, vend - 1);
2e5b2b86b   Ian Campbell   swiotlb: consolid...
183
  }
c7753208a   Tom Lendacky   x86, swiotlb: Add...
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
  /*
   * Early SWIOTLB allocation may be too early to allow an architecture to
   * perform the desired operations.  This function allows the architecture to
   * call SWIOTLB when the operations are possible.  It needs to be called
   * before the SWIOTLB memory is used.
   */
  void __init swiotlb_update_mem_attributes(void)
  {
  	void *vaddr;
  	unsigned long bytes;
  
  	if (no_iotlb_memory || late_alloc)
  		return;
  
  	vaddr = phys_to_virt(io_tlb_start);
  	bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
  	swiotlb_set_mem_attributes(vaddr, bytes);
  	memset(vaddr, 0, bytes);
  
  	vaddr = phys_to_virt(io_tlb_overflow_buffer);
  	bytes = PAGE_ALIGN(io_tlb_overflow);
  	swiotlb_set_mem_attributes(vaddr, bytes);
  	memset(vaddr, 0, bytes);
  }
ac2cbab21   Yinghai Lu   x86: Don't panic ...
208
  int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
209
  {
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
210
  	void *v_overflow_buffer;
563aaf064   Jan Beulich   [IA64] swiotlb cl...
211
  	unsigned long i, bytes;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
212

abbceff7d   FUJITA Tomonori   swiotlb: add the ...
213
  	bytes = nslabs << IO_TLB_SHIFT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
214

abbceff7d   FUJITA Tomonori   swiotlb: add the ...
215
  	io_tlb_nslabs = nslabs;
ff7204a74   Alexander Duyck   swiotlb: Make io_...
216
217
  	io_tlb_start = __pa(tlb);
  	io_tlb_end = io_tlb_start + bytes;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
218
219
  
  	/*
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
220
221
  	 * Get the overflow emergency buffer
  	 */
ad6492b80   Yinghai Lu   memblock, nobootm...
222
  	v_overflow_buffer = memblock_virt_alloc_low_nopanic(
457ff1de2   Santosh Shilimkar   lib/swiotlb.c: us...
223
224
  						PAGE_ALIGN(io_tlb_overflow),
  						PAGE_SIZE);
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
225
  	if (!v_overflow_buffer)
ac2cbab21   Yinghai Lu   x86: Don't panic ...
226
  		return -ENOMEM;
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
227
228
229
230
  
  	io_tlb_overflow_buffer = __pa(v_overflow_buffer);
  
  	/*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
231
232
233
234
  	 * Allocate and initialize the free list array.  This array is used
  	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
  	 * between io_tlb_start and io_tlb_end.
  	 */
457ff1de2   Santosh Shilimkar   lib/swiotlb.c: us...
235
236
237
  	io_tlb_list = memblock_virt_alloc(
  				PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
  				PAGE_SIZE);
457ff1de2   Santosh Shilimkar   lib/swiotlb.c: us...
238
239
240
  	io_tlb_orig_addr = memblock_virt_alloc(
  				PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
  				PAGE_SIZE);
8e0629c1d   Jan Beulich   swiotlb: don't as...
241
242
243
244
245
  	for (i = 0; i < io_tlb_nslabs; i++) {
  		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
  		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
  	}
  	io_tlb_index = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
246

ad32e8cb8   FUJITA Tomonori   swiotlb: Defer sw...
247
248
  	if (verbose)
  		swiotlb_print_info();
ac2cbab21   Yinghai Lu   x86: Don't panic ...
249

7453c549f   Konrad Rzeszutek Wilk   swiotlb: Export s...
250
  	swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
ac2cbab21   Yinghai Lu   x86: Don't panic ...
251
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
252
  }
abbceff7d   FUJITA Tomonori   swiotlb: add the ...
253
254
255
256
  /*
   * Statically reserve bounce buffer space and initialize bounce buffer data
   * structures for the software IO TLB used to implement the DMA API.
   */
ac2cbab21   Yinghai Lu   x86: Don't panic ...
257
258
  void  __init
  swiotlb_init(int verbose)
abbceff7d   FUJITA Tomonori   swiotlb: add the ...
259
  {
c729de8fc   Yinghai Lu   x86, kdump: Set c...
260
  	size_t default_size = IO_TLB_DEFAULT_SIZE;
ff7204a74   Alexander Duyck   swiotlb: Make io_...
261
  	unsigned char *vstart;
abbceff7d   FUJITA Tomonori   swiotlb: add the ...
262
263
264
265
266
267
268
269
  	unsigned long bytes;
  
  	if (!io_tlb_nslabs) {
  		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
  		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
  	}
  
  	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
ac2cbab21   Yinghai Lu   x86: Don't panic ...
270
  	/* Get IO TLB memory from the low pages */
ad6492b80   Yinghai Lu   memblock, nobootm...
271
  	vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
ac2cbab21   Yinghai Lu   x86: Don't panic ...
272
273
  	if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
  		return;
abbceff7d   FUJITA Tomonori   swiotlb: add the ...
274

ac2cbab21   Yinghai Lu   x86: Don't panic ...
275
  	if (io_tlb_start)
457ff1de2   Santosh Shilimkar   lib/swiotlb.c: us...
276
277
  		memblock_free_early(io_tlb_start,
  				    PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
ac2cbab21   Yinghai Lu   x86: Don't panic ...
278
279
  	pr_warn("Cannot allocate SWIOTLB buffer");
  	no_iotlb_memory = true;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
280
  }
0b9afede3   Alex Williamson   [IA64] more robus...
281
282
283
284
285
286
  /*
   * Systems with larger DMA zones (those that don't support ISA) can
   * initialize the swiotlb later using the slab allocator if needed.
   * This should be just like above, but with some error catching.
   */
  int
563aaf064   Jan Beulich   [IA64] swiotlb cl...
287
  swiotlb_late_init_with_default_size(size_t default_size)
0b9afede3   Alex Williamson   [IA64] more robus...
288
  {
74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
289
  	unsigned long bytes, req_nslabs = io_tlb_nslabs;
ff7204a74   Alexander Duyck   swiotlb: Make io_...
290
  	unsigned char *vstart = NULL;
0b9afede3   Alex Williamson   [IA64] more robus...
291
  	unsigned int order;
74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
292
  	int rc = 0;
0b9afede3   Alex Williamson   [IA64] more robus...
293
294
295
296
297
298
299
300
301
  
  	if (!io_tlb_nslabs) {
  		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
  		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
  	}
  
  	/*
  	 * Get IO TLB memory from the low pages
  	 */
563aaf064   Jan Beulich   [IA64] swiotlb cl...
302
  	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
0b9afede3   Alex Williamson   [IA64] more robus...
303
  	io_tlb_nslabs = SLABS_PER_PAGE << order;
563aaf064   Jan Beulich   [IA64] swiotlb cl...
304
  	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
0b9afede3   Alex Williamson   [IA64] more robus...
305
306
  
  	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
ff7204a74   Alexander Duyck   swiotlb: Make io_...
307
308
309
  		vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
  						  order);
  		if (vstart)
0b9afede3   Alex Williamson   [IA64] more robus...
310
311
312
  			break;
  		order--;
  	}
ff7204a74   Alexander Duyck   swiotlb: Make io_...
313
  	if (!vstart) {
74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
314
315
316
  		io_tlb_nslabs = req_nslabs;
  		return -ENOMEM;
  	}
563aaf064   Jan Beulich   [IA64] swiotlb cl...
317
  	if (order != get_order(bytes)) {
0b9afede3   Alex Williamson   [IA64] more robus...
318
319
320
321
322
  		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
  		       "for software IO TLB
  ", (PAGE_SIZE << order) >> 20);
  		io_tlb_nslabs = SLABS_PER_PAGE << order;
  	}
ff7204a74   Alexander Duyck   swiotlb: Make io_...
323
  	rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
324
  	if (rc)
ff7204a74   Alexander Duyck   swiotlb: Make io_...
325
  		free_pages((unsigned long)vstart, order);
7453c549f   Konrad Rzeszutek Wilk   swiotlb: Export s...
326

74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
327
328
329
330
331
332
333
  	return rc;
  }
  
  int
  swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
  {
  	unsigned long i, bytes;
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
334
  	unsigned char *v_overflow_buffer;
74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
335
336
337
338
  
  	bytes = nslabs << IO_TLB_SHIFT;
  
  	io_tlb_nslabs = nslabs;
ff7204a74   Alexander Duyck   swiotlb: Make io_...
339
340
  	io_tlb_start = virt_to_phys(tlb);
  	io_tlb_end = io_tlb_start + bytes;
74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
341

c7753208a   Tom Lendacky   x86, swiotlb: Add...
342
  	swiotlb_set_mem_attributes(tlb, bytes);
ff7204a74   Alexander Duyck   swiotlb: Make io_...
343
  	memset(tlb, 0, bytes);
0b9afede3   Alex Williamson   [IA64] more robus...
344
345
  
  	/*
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
346
347
348
349
350
351
  	 * Get the overflow emergency buffer
  	 */
  	v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
  						     get_order(io_tlb_overflow));
  	if (!v_overflow_buffer)
  		goto cleanup2;
c7753208a   Tom Lendacky   x86, swiotlb: Add...
352
353
  	swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow);
  	memset(v_overflow_buffer, 0, io_tlb_overflow);
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
354
355
356
  	io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
  
  	/*
0b9afede3   Alex Williamson   [IA64] more robus...
357
358
359
360
361
362
363
  	 * Allocate and initialize the free list array.  This array is used
  	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
  	 * between io_tlb_start and io_tlb_end.
  	 */
  	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
  	                              get_order(io_tlb_nslabs * sizeof(int)));
  	if (!io_tlb_list)
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
364
  		goto cleanup3;
0b9afede3   Alex Williamson   [IA64] more robus...
365

bc40ac669   Becky Bruce   swiotlb: store ph...
366
367
368
369
  	io_tlb_orig_addr = (phys_addr_t *)
  		__get_free_pages(GFP_KERNEL,
  				 get_order(io_tlb_nslabs *
  					   sizeof(phys_addr_t)));
0b9afede3   Alex Williamson   [IA64] more robus...
370
  	if (!io_tlb_orig_addr)
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
371
  		goto cleanup4;
0b9afede3   Alex Williamson   [IA64] more robus...
372

8e0629c1d   Jan Beulich   swiotlb: don't as...
373
374
375
376
377
  	for (i = 0; i < io_tlb_nslabs; i++) {
  		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
  		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
  	}
  	io_tlb_index = 0;
0b9afede3   Alex Williamson   [IA64] more robus...
378

ad32e8cb8   FUJITA Tomonori   swiotlb: Defer sw...
379
  	swiotlb_print_info();
0b9afede3   Alex Williamson   [IA64] more robus...
380

5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
381
  	late_alloc = 1;
7453c549f   Konrad Rzeszutek Wilk   swiotlb: Export s...
382
  	swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
0b9afede3   Alex Williamson   [IA64] more robus...
383
384
385
  	return 0;
  
  cleanup4:
25667d675   Tony Luck   Revert "[IA64] sw...
386
387
  	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
  	                                                 sizeof(int)));
0b9afede3   Alex Williamson   [IA64] more robus...
388
  	io_tlb_list = NULL;
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
389
390
391
392
  cleanup3:
  	free_pages((unsigned long)v_overflow_buffer,
  		   get_order(io_tlb_overflow));
  	io_tlb_overflow_buffer = 0;
0b9afede3   Alex Williamson   [IA64] more robus...
393
  cleanup2:
c40dba06e   Alexander Duyck   swiotlb: Make io_...
394
  	io_tlb_end = 0;
ff7204a74   Alexander Duyck   swiotlb: Make io_...
395
  	io_tlb_start = 0;
74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
396
  	io_tlb_nslabs = 0;
7453c549f   Konrad Rzeszutek Wilk   swiotlb: Export s...
397
  	max_segment = 0;
0b9afede3   Alex Williamson   [IA64] more robus...
398
399
  	return -ENOMEM;
  }
5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
400
401
  void __init swiotlb_free(void)
  {
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
402
  	if (!io_tlb_orig_addr)
5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
403
404
405
  		return;
  
  	if (late_alloc) {
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
406
  		free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
407
408
409
410
411
  			   get_order(io_tlb_overflow));
  		free_pages((unsigned long)io_tlb_orig_addr,
  			   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
  		free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
  								 sizeof(int)));
ff7204a74   Alexander Duyck   swiotlb: Make io_...
412
  		free_pages((unsigned long)phys_to_virt(io_tlb_start),
5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
413
414
  			   get_order(io_tlb_nslabs << IO_TLB_SHIFT));
  	} else {
457ff1de2   Santosh Shilimkar   lib/swiotlb.c: us...
415
416
417
418
419
420
421
422
  		memblock_free_late(io_tlb_overflow_buffer,
  				   PAGE_ALIGN(io_tlb_overflow));
  		memblock_free_late(__pa(io_tlb_orig_addr),
  				   PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
  		memblock_free_late(__pa(io_tlb_list),
  				   PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
  		memblock_free_late(io_tlb_start,
  				   PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
423
  	}
f21ffe9f6   Konrad Rzeszutek Wilk   swiotlb: Expose s...
424
  	io_tlb_nslabs = 0;
7453c549f   Konrad Rzeszutek Wilk   swiotlb: Export s...
425
  	max_segment = 0;
5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
426
  }
9c5a36214   Akinobu Mita   x86: enable DMA C...
427
  int is_swiotlb_buffer(phys_addr_t paddr)
640aebfe0   FUJITA Tomonori   swiotlb: add is_s...
428
  {
ff7204a74   Alexander Duyck   swiotlb: Make io_...
429
  	return paddr >= io_tlb_start && paddr < io_tlb_end;
640aebfe0   FUJITA Tomonori   swiotlb: add is_s...
430
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
431
  /*
fb05a3792   Becky Bruce   swiotlb: add supp...
432
433
   * Bounce: copy the swiotlb buffer back to the original dma location
   */
af51a9f18   Alexander Duyck   swiotlb: Do not e...
434
435
  static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
  			   size_t size, enum dma_data_direction dir)
fb05a3792   Becky Bruce   swiotlb: add supp...
436
  {
af51a9f18   Alexander Duyck   swiotlb: Do not e...
437
438
  	unsigned long pfn = PFN_DOWN(orig_addr);
  	unsigned char *vaddr = phys_to_virt(tlb_addr);
fb05a3792   Becky Bruce   swiotlb: add supp...
439
440
441
  
  	if (PageHighMem(pfn_to_page(pfn))) {
  		/* The buffer does not have a mapping.  Map it in and copy */
af51a9f18   Alexander Duyck   swiotlb: Do not e...
442
  		unsigned int offset = orig_addr & ~PAGE_MASK;
fb05a3792   Becky Bruce   swiotlb: add supp...
443
444
445
446
447
  		char *buffer;
  		unsigned int sz = 0;
  		unsigned long flags;
  
  		while (size) {
67131ad05   Becky Bruce   swiotlb: fix comp...
448
  			sz = min_t(size_t, PAGE_SIZE - offset, size);
fb05a3792   Becky Bruce   swiotlb: add supp...
449
450
  
  			local_irq_save(flags);
c3eede8e0   Cong Wang   lib: remove the s...
451
  			buffer = kmap_atomic(pfn_to_page(pfn));
fb05a3792   Becky Bruce   swiotlb: add supp...
452
  			if (dir == DMA_TO_DEVICE)
af51a9f18   Alexander Duyck   swiotlb: Do not e...
453
  				memcpy(vaddr, buffer + offset, sz);
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
454
  			else
af51a9f18   Alexander Duyck   swiotlb: Do not e...
455
  				memcpy(buffer + offset, vaddr, sz);
c3eede8e0   Cong Wang   lib: remove the s...
456
  			kunmap_atomic(buffer);
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
457
  			local_irq_restore(flags);
fb05a3792   Becky Bruce   swiotlb: add supp...
458
459
460
  
  			size -= sz;
  			pfn++;
af51a9f18   Alexander Duyck   swiotlb: Do not e...
461
  			vaddr += sz;
fb05a3792   Becky Bruce   swiotlb: add supp...
462
  			offset = 0;
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
463
  		}
af51a9f18   Alexander Duyck   swiotlb: Do not e...
464
465
  	} else if (dir == DMA_TO_DEVICE) {
  		memcpy(vaddr, phys_to_virt(orig_addr), size);
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
466
  	} else {
af51a9f18   Alexander Duyck   swiotlb: Do not e...
467
  		memcpy(phys_to_virt(orig_addr), vaddr, size);
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
468
  	}
1b548f667   Jeremy Fitzhardinge   swiotlb: factor o...
469
  }
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
470
471
472
  phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
  				   dma_addr_t tbl_dma_addr,
  				   phys_addr_t orig_addr, size_t size,
0443fa003   Alexander Duyck   swiotlb: Add supp...
473
474
  				   enum dma_data_direction dir,
  				   unsigned long attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
475
476
  {
  	unsigned long flags;
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
477
  	phys_addr_t tlb_addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
478
479
  	unsigned int nslots, stride, index, wrap;
  	int i;
681cc5cd3   FUJITA Tomonori   iommu sg merging:...
480
481
482
  	unsigned long mask;
  	unsigned long offset_slots;
  	unsigned long max_slots;
ac2cbab21   Yinghai Lu   x86: Don't panic ...
483
484
  	if (no_iotlb_memory)
  		panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
648babb70   Tom Lendacky   swiotlb: Add warn...
485
486
487
  	if (sme_active())
  		pr_warn_once("SME is active and system is using DMA bounce buffers
  ");
681cc5cd3   FUJITA Tomonori   iommu sg merging:...
488
  	mask = dma_get_seg_boundary(hwdev);
681cc5cd3   FUJITA Tomonori   iommu sg merging:...
489

eb605a575   FUJITA Tomonori   swiotlb: add swio...
490
491
492
  	tbl_dma_addr &= mask;
  
  	offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
a5ddde4a5   Ian Campbell   swiotlb: add comm...
493
494
495
496
  
  	/*
   	 * Carefully handle integer overflow which can occur when mask == ~0UL.
   	 */
b15a3891c   Jan Beulich   avoid endless loo...
497
498
499
  	max_slots = mask + 1
  		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
  		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
500
501
  
  	/*
602d9858f   Nikita Yushchenko   swiotlb: ensure t...
502
503
  	 * For mappings greater than or equal to a page, we limit the stride
  	 * (and hence alignment) to a page size.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
504
505
  	 */
  	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
602d9858f   Nikita Yushchenko   swiotlb: ensure t...
506
  	if (size >= PAGE_SIZE)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
507
508
509
  		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
  	else
  		stride = 1;
348145458   Eric Sesterhenn   BUG_ON() Conversi...
510
  	BUG_ON(!nslots);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
511
512
513
514
515
516
  
  	/*
  	 * Find suitable number of IO TLB entries size that will fit this
  	 * request and allocate a buffer from that IO TLB pool.
  	 */
  	spin_lock_irqsave(&io_tlb_lock, flags);
a7133a155   Andrew Morton   lib/swiotlb.c: cl...
517
518
519
520
521
522
  	index = ALIGN(io_tlb_index, stride);
  	if (index >= io_tlb_nslabs)
  		index = 0;
  	wrap = index;
  
  	do {
a85225092   FUJITA Tomonori   swiotlb: use iomm...
523
524
  		while (iommu_is_span_boundary(index, nslots, offset_slots,
  					      max_slots)) {
b15a3891c   Jan Beulich   avoid endless loo...
525
526
527
  			index += stride;
  			if (index >= io_tlb_nslabs)
  				index = 0;
a7133a155   Andrew Morton   lib/swiotlb.c: cl...
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
  			if (index == wrap)
  				goto not_found;
  		}
  
  		/*
  		 * If we find a slot that indicates we have 'nslots' number of
  		 * contiguous buffers, we allocate the buffers from that slot
  		 * and mark the entries as '0' indicating unavailable.
  		 */
  		if (io_tlb_list[index] >= nslots) {
  			int count = 0;
  
  			for (i = index; i < (int) (index + nslots); i++)
  				io_tlb_list[i] = 0;
  			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
  				io_tlb_list[i] = ++count;
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
544
  			tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
545

a7133a155   Andrew Morton   lib/swiotlb.c: cl...
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
  			/*
  			 * Update the indices to avoid searching in the next
  			 * round.
  			 */
  			io_tlb_index = ((index + nslots) < io_tlb_nslabs
  					? (index + nslots) : 0);
  
  			goto found;
  		}
  		index += stride;
  		if (index >= io_tlb_nslabs)
  			index = 0;
  	} while (index != wrap);
  
  not_found:
  	spin_unlock_irqrestore(&io_tlb_lock, flags);
37efa60e1   Christian König   swiotlb: suppress...
562
  	if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
0cb637bff   Konrad Rzeszutek Wilk   swiotlb: Don't Do...
563
564
  		dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)
  ", size);
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
565
  	return SWIOTLB_MAP_ERROR;
a7133a155   Andrew Morton   lib/swiotlb.c: cl...
566
  found:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
567
568
569
570
571
572
573
  	spin_unlock_irqrestore(&io_tlb_lock, flags);
  
  	/*
  	 * Save away the mapping from the original address to the DMA address.
  	 * This is needed when we sync the memory.  Then we sync the buffer if
  	 * needed.
  	 */
bc40ac669   Becky Bruce   swiotlb: store ph...
574
  	for (i = 0; i < nslots; i++)
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
575
  		io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
0443fa003   Alexander Duyck   swiotlb: Add supp...
576
577
  	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
  	    (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
af51a9f18   Alexander Duyck   swiotlb: Do not e...
578
  		swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
579

e05ed4d1f   Alexander Duyck   swiotlb: Return p...
580
  	return tlb_addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
581
  }
d7ef1533a   Konrad Rzeszutek Wilk   swiotlb: Make swi...
582
  EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
583
584
  
  /*
eb605a575   FUJITA Tomonori   swiotlb: add swio...
585
586
   * Allocates bounce buffer and returns its kernel virtual address.
   */
023600f19   Alexandre Courbot   swiotlb: do not e...
587
588
  static phys_addr_t
  map_single(struct device *hwdev, phys_addr_t phys, size_t size,
0443fa003   Alexander Duyck   swiotlb: Add supp...
589
  	   enum dma_data_direction dir, unsigned long attrs)
eb605a575   FUJITA Tomonori   swiotlb: add swio...
590
  {
fff5d9922   Geert Uytterhoeven   swiotlb: Add swio...
591
592
593
594
595
596
597
598
  	dma_addr_t start_dma_addr;
  
  	if (swiotlb_force == SWIOTLB_NO_FORCE) {
  		dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa
  ",
  				     &phys);
  		return SWIOTLB_MAP_ERROR;
  	}
eb605a575   FUJITA Tomonori   swiotlb: add swio...
599

c7753208a   Tom Lendacky   x86, swiotlb: Add...
600
  	start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start);
0443fa003   Alexander Duyck   swiotlb: Add supp...
601
602
  	return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
  				      dir, attrs);
eb605a575   FUJITA Tomonori   swiotlb: add swio...
603
604
605
  }
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
606
607
   * dma_addr is the kernel virtual address of the bounce buffer to unmap.
   */
61ca08c32   Alexander Duyck   swiotlb: Use phys...
608
  void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
0443fa003   Alexander Duyck   swiotlb: Add supp...
609
610
  			      size_t size, enum dma_data_direction dir,
  			      unsigned long attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
611
612
613
  {
  	unsigned long flags;
  	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
61ca08c32   Alexander Duyck   swiotlb: Use phys...
614
615
  	int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
  	phys_addr_t orig_addr = io_tlb_orig_addr[index];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
616
617
618
619
  
  	/*
  	 * First, sync the memory before unmapping the entry
  	 */
8e0629c1d   Jan Beulich   swiotlb: don't as...
620
  	if (orig_addr != INVALID_PHYS_ADDR &&
0443fa003   Alexander Duyck   swiotlb: Add supp...
621
  	    !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
8e0629c1d   Jan Beulich   swiotlb: don't as...
622
  	    ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
af51a9f18   Alexander Duyck   swiotlb: Do not e...
623
  		swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
624
625
626
  
  	/*
  	 * Return the buffer to the free list by setting the corresponding
af901ca18   André Goddard Rosa   tree-wide: fix as...
627
  	 * entries to indicate the number of contiguous entries available.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
628
629
630
631
632
633
634
635
636
637
638
  	 * While returning the entries to the free list, we merge the entries
  	 * with slots below and above the pool being returned.
  	 */
  	spin_lock_irqsave(&io_tlb_lock, flags);
  	{
  		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
  			 io_tlb_list[index + nslots] : 0);
  		/*
  		 * Step 1: return the slots to the free list, merging the
  		 * slots with superceeding slots
  		 */
8e0629c1d   Jan Beulich   swiotlb: don't as...
639
  		for (i = index + nslots - 1; i >= index; i--) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
640
  			io_tlb_list[i] = ++count;
8e0629c1d   Jan Beulich   swiotlb: don't as...
641
642
  			io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
643
644
645
646
647
648
649
650
651
  		/*
  		 * Step 2: merge the returned slots with the preceding slots,
  		 * if available (non zero)
  		 */
  		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
  			io_tlb_list[i] = ++count;
  	}
  	spin_unlock_irqrestore(&io_tlb_lock, flags);
  }
d7ef1533a   Konrad Rzeszutek Wilk   swiotlb: Make swi...
652
  EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
653

fbfda893e   Alexander Duyck   swiotlb: Use phys...
654
655
656
  void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
  			     size_t size, enum dma_data_direction dir,
  			     enum dma_sync_target target)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
657
  {
fbfda893e   Alexander Duyck   swiotlb: Use phys...
658
659
  	int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
  	phys_addr_t orig_addr = io_tlb_orig_addr[index];
bc40ac669   Becky Bruce   swiotlb: store ph...
660

8e0629c1d   Jan Beulich   swiotlb: don't as...
661
662
  	if (orig_addr == INVALID_PHYS_ADDR)
  		return;
fbfda893e   Alexander Duyck   swiotlb: Use phys...
663
  	orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
df336d1c7   Keir Fraser   Fix swiotlb_sync_...
664

de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
665
666
667
  	switch (target) {
  	case SYNC_FOR_CPU:
  		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
af51a9f18   Alexander Duyck   swiotlb: Do not e...
668
  			swiotlb_bounce(orig_addr, tlb_addr,
fbfda893e   Alexander Duyck   swiotlb: Use phys...
669
  				       size, DMA_FROM_DEVICE);
348145458   Eric Sesterhenn   BUG_ON() Conversi...
670
671
  		else
  			BUG_ON(dir != DMA_TO_DEVICE);
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
672
673
674
  		break;
  	case SYNC_FOR_DEVICE:
  		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
af51a9f18   Alexander Duyck   swiotlb: Do not e...
675
  			swiotlb_bounce(orig_addr, tlb_addr,
fbfda893e   Alexander Duyck   swiotlb: Use phys...
676
  				       size, DMA_TO_DEVICE);
348145458   Eric Sesterhenn   BUG_ON() Conversi...
677
678
  		else
  			BUG_ON(dir != DMA_FROM_DEVICE);
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
679
680
  		break;
  	default:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
681
  		BUG();
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
682
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
683
  }
d7ef1533a   Konrad Rzeszutek Wilk   swiotlb: Make swi...
684
  EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
685
686
687
  
  void *
  swiotlb_alloc_coherent(struct device *hwdev, size_t size,
06a544971   Al Viro   [PATCH] gfp_t: dm...
688
  		       dma_addr_t *dma_handle, gfp_t flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
689
  {
37efa60e1   Christian König   swiotlb: suppress...
690
  	bool warn = !(flags & __GFP_NOWARN);
563aaf064   Jan Beulich   [IA64] swiotlb cl...
691
  	dma_addr_t dev_addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
692
693
  	void *ret;
  	int order = get_order(size);
284901a90   Yang Hongyang   dma-mapping: repl...
694
  	u64 dma_mask = DMA_BIT_MASK(32);
1e74f3000   FUJITA Tomonori   swiotlb: use cohe...
695
696
697
  
  	if (hwdev && hwdev->coherent_dma_mask)
  		dma_mask = hwdev->coherent_dma_mask;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
698

25667d675   Tony Luck   Revert "[IA64] sw...
699
  	ret = (void *)__get_free_pages(flags, order);
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
700
701
702
703
704
705
706
707
708
  	if (ret) {
  		dev_addr = swiotlb_virt_to_bus(hwdev, ret);
  		if (dev_addr + size - 1 > dma_mask) {
  			/*
  			 * The allocated memory isn't reachable by the device.
  			 */
  			free_pages((unsigned long) ret, order);
  			ret = NULL;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
709
710
711
  	}
  	if (!ret) {
  		/*
bfc5501f6   Konrad Rzeszutek Wilk   swiotlb: Make int...
712
713
  		 * We are either out of memory or the device can't DMA to
  		 * GFP_DMA memory; fall back on map_single(), which
ceb5ac326   Becky Bruce   swiotlb: comment ...
714
  		 * will grab memory from the lowest available address range.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
715
  		 */
37efa60e1   Christian König   swiotlb: suppress...
716
717
  		phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE,
  					       warn ? 0 : DMA_ATTR_NO_WARN);
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
718
  		if (paddr == SWIOTLB_MAP_ERROR)
94cc81f9a   Joerg Roedel   swiotlb: Warn on ...
719
  			goto err_warn;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
720

e05ed4d1f   Alexander Duyck   swiotlb: Return p...
721
  		ret = phys_to_virt(paddr);
c7753208a   Tom Lendacky   x86, swiotlb: Add...
722
  		dev_addr = swiotlb_phys_to_dma(hwdev, paddr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
723

61ca08c32   Alexander Duyck   swiotlb: Use phys...
724
725
726
727
728
729
  		/* Confirm address can be DMA'd by device */
  		if (dev_addr + size - 1 > dma_mask) {
  			printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx
  ",
  			       (unsigned long long)dma_mask,
  			       (unsigned long long)dev_addr);
a2b89b596   FUJITA Tomonori   swiotlb: remove p...
730

0443fa003   Alexander Duyck   swiotlb: Add supp...
731
732
733
734
  			/*
  			 * DMA_TO_DEVICE to avoid memcpy in unmap_single.
  			 * The DMA_ATTR_SKIP_CPU_SYNC is optional.
  			 */
61ca08c32   Alexander Duyck   swiotlb: Use phys...
735
  			swiotlb_tbl_unmap_single(hwdev, paddr,
0443fa003   Alexander Duyck   swiotlb: Add supp...
736
737
  						 size, DMA_TO_DEVICE,
  						 DMA_ATTR_SKIP_CPU_SYNC);
94cc81f9a   Joerg Roedel   swiotlb: Warn on ...
738
  			goto err_warn;
61ca08c32   Alexander Duyck   swiotlb: Use phys...
739
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
740
  	}
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
741

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
742
  	*dma_handle = dev_addr;
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
743
  	memset(ret, 0, size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
744
  	return ret;
94cc81f9a   Joerg Roedel   swiotlb: Warn on ...
745
746
  
  err_warn:
37efa60e1   Christian König   swiotlb: suppress...
747
748
749
750
751
752
  	if (warn && printk_ratelimit()) {
  		pr_warn("swiotlb: coherent allocation failed for device %s size=%zu
  ",
  			dev_name(hwdev), size);
  		dump_stack();
  	}
94cc81f9a   Joerg Roedel   swiotlb: Warn on ...
753
754
  
  	return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
755
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
756
  EXPORT_SYMBOL(swiotlb_alloc_coherent);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
757
758
759
  
  void
  swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
760
  		      dma_addr_t dev_addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
761
  {
862d196b2   FUJITA Tomonori   swiotlb: use phys...
762
  	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
763

aa24886e3   David Brownell   dma_free_coherent...
764
  	WARN_ON(irqs_disabled());
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
765
766
  	if (!is_swiotlb_buffer(paddr))
  		free_pages((unsigned long)vaddr, get_order(size));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
767
  	else
0443fa003   Alexander Duyck   swiotlb: Add supp...
768
769
770
771
772
773
  		/*
  		 * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
  		 * DMA_ATTR_SKIP_CPU_SYNC is optional.
  		 */
  		swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE,
  					 DMA_ATTR_SKIP_CPU_SYNC);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
774
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
775
  EXPORT_SYMBOL(swiotlb_free_coherent);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
776
777
  
  static void
22d482699   Konrad Rzeszutek Wilk   swiotlb: search a...
778
779
  swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
  	     int do_panic)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
780
  {
fff5d9922   Geert Uytterhoeven   swiotlb: Add swio...
781
782
  	if (swiotlb_force == SWIOTLB_NO_FORCE)
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
783
784
785
  	/*
  	 * Ran out of IOMMU space for this operation. This is very bad.
  	 * Unfortunately the drivers cannot handle this operation properly.
17e5ad6c0   Tony Luck   [PATCH] Removed r...
786
  	 * unless they check for dma_mapping_error (most don't)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
787
788
789
  	 * When the mapping is small enough return a static buffer to limit
  	 * the damage, or panic when the transfer is too big.
  	 */
0d2e18985   Geert Uytterhoeven   swiotlb: Rate-lim...
790
791
792
  	dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes
  ",
  			    size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
793

c7084b35e   Casey Dahlin   lib/swiotlb.c: Fi...
794
795
796
797
798
799
800
801
802
803
804
805
  	if (size <= io_tlb_overflow || !do_panic)
  		return;
  
  	if (dir == DMA_BIDIRECTIONAL)
  		panic("DMA: Random memory could be DMA accessed
  ");
  	if (dir == DMA_FROM_DEVICE)
  		panic("DMA: Random memory could be DMA written
  ");
  	if (dir == DMA_TO_DEVICE)
  		panic("DMA: Random memory could be DMA read
  ");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
806
807
808
809
  }
  
  /*
   * Map a single buffer of the indicated size for DMA in streaming mode.  The
17e5ad6c0   Tony Luck   [PATCH] Removed r...
810
   * physical address to use is returned.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
811
812
   *
   * Once the device is given the dma address, the device owns this memory until
ceb5ac326   Becky Bruce   swiotlb: comment ...
813
   * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
814
   */
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
815
816
817
  dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
  			    unsigned long offset, size_t size,
  			    enum dma_data_direction dir,
00085f1ef   Krzysztof Kozlowski   dma-mapping: use ...
818
  			    unsigned long attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
819
  {
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
820
  	phys_addr_t map, phys = page_to_phys(page) + offset;
862d196b2   FUJITA Tomonori   swiotlb: use phys...
821
  	dma_addr_t dev_addr = phys_to_dma(dev, phys);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
822

348145458   Eric Sesterhenn   BUG_ON() Conversi...
823
  	BUG_ON(dir == DMA_NONE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
824
  	/*
ceb5ac326   Becky Bruce   swiotlb: comment ...
825
  	 * If the address happens to be in the device's DMA window,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
826
827
828
  	 * we can safely return the device addr and not worry about bounce
  	 * buffering it.
  	 */
ae7871be1   Geert Uytterhoeven   swiotlb: Convert ...
829
  	if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
830
  		return dev_addr;
2b2b614dd   Zoltan Kiss   tracing/events: A...
831
  	trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
832
  	/* Oh well, have to allocate and map a bounce buffer. */
0443fa003   Alexander Duyck   swiotlb: Add supp...
833
  	map = map_single(dev, phys, size, dir, attrs);
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
834
  	if (map == SWIOTLB_MAP_ERROR) {
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
835
  		swiotlb_full(dev, size, dir, 1);
c7753208a   Tom Lendacky   x86, swiotlb: Add...
836
  		return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
837
  	}
c7753208a   Tom Lendacky   x86, swiotlb: Add...
838
  	dev_addr = swiotlb_phys_to_dma(dev, map);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
839

e05ed4d1f   Alexander Duyck   swiotlb: Return p...
840
  	/* Ensure that the address returned is DMA'ble */
0443fa003   Alexander Duyck   swiotlb: Add supp...
841
842
  	if (dma_capable(dev, dev_addr, size))
  		return dev_addr;
d29fa0cb7   Alexander Duyck   swiotlb: Minor fi...
843
844
  	attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  	swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
845

c7753208a   Tom Lendacky   x86, swiotlb: Add...
846
  	return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
847
  }
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
848
  EXPORT_SYMBOL_GPL(swiotlb_map_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
849
850
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
851
   * Unmap a single streaming mode DMA translation.  The dma_addr and size must
ceb5ac326   Becky Bruce   swiotlb: comment ...
852
   * match what was provided for in a previous swiotlb_map_page call.  All
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
853
854
855
856
857
   * other usages are undefined.
   *
   * After this call, reads by the cpu to the buffer are guaranteed to see
   * whatever the device wrote there.
   */
7fcebbd2d   Becky Bruce   swiotlb: rename u...
858
  static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
0443fa003   Alexander Duyck   swiotlb: Add supp...
859
860
  			 size_t size, enum dma_data_direction dir,
  			 unsigned long attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
861
  {
862d196b2   FUJITA Tomonori   swiotlb: use phys...
862
  	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
863

348145458   Eric Sesterhenn   BUG_ON() Conversi...
864
  	BUG_ON(dir == DMA_NONE);
7fcebbd2d   Becky Bruce   swiotlb: rename u...
865

02ca646e7   FUJITA Tomonori   swiotlb: remove u...
866
  	if (is_swiotlb_buffer(paddr)) {
0443fa003   Alexander Duyck   swiotlb: Add supp...
867
  		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
7fcebbd2d   Becky Bruce   swiotlb: rename u...
868
869
870
871
872
  		return;
  	}
  
  	if (dir != DMA_FROM_DEVICE)
  		return;
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
873
874
875
876
877
878
879
  	/*
  	 * phys_to_virt doesn't work with hihgmem page but we could
  	 * call dma_mark_clean() with hihgmem page here. However, we
  	 * are fine since dma_mark_clean() is null on POWERPC. We can
  	 * make dma_mark_clean() take a physical address if necessary.
  	 */
  	dma_mark_clean(phys_to_virt(paddr), size);
7fcebbd2d   Becky Bruce   swiotlb: rename u...
880
881
882
883
  }
  
  void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
  			size_t size, enum dma_data_direction dir,
00085f1ef   Krzysztof Kozlowski   dma-mapping: use ...
884
  			unsigned long attrs)
7fcebbd2d   Becky Bruce   swiotlb: rename u...
885
  {
0443fa003   Alexander Duyck   swiotlb: Add supp...
886
  	unmap_single(hwdev, dev_addr, size, dir, attrs);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
887
  }
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
888
  EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
874d6a955   FUJITA Tomonori   swiotlb: clean up...
889

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
890
891
892
893
  /*
   * Make physical memory consistent for a single streaming mode DMA translation
   * after a transfer.
   *
ceb5ac326   Becky Bruce   swiotlb: comment ...
894
   * If you perform a swiotlb_map_page() but wish to interrogate the buffer
17e5ad6c0   Tony Luck   [PATCH] Removed r...
895
896
   * using the cpu, yet do not wish to teardown the dma mapping, you must
   * call this function before doing so.  At the next point you give the dma
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
897
898
899
   * address back to the card, you must first perform a
   * swiotlb_dma_sync_for_device, and then the device again owns the buffer
   */
be6b02678   Andrew Morton   [PATCH] swiotlb u...
900
  static void
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
901
  swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
d7ef1533a   Konrad Rzeszutek Wilk   swiotlb: Make swi...
902
903
  		    size_t size, enum dma_data_direction dir,
  		    enum dma_sync_target target)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
904
  {
862d196b2   FUJITA Tomonori   swiotlb: use phys...
905
  	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
906

348145458   Eric Sesterhenn   BUG_ON() Conversi...
907
  	BUG_ON(dir == DMA_NONE);
380d68783   Becky Bruce   swiotlb: use swio...
908

02ca646e7   FUJITA Tomonori   swiotlb: remove u...
909
  	if (is_swiotlb_buffer(paddr)) {
fbfda893e   Alexander Duyck   swiotlb: Use phys...
910
  		swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
380d68783   Becky Bruce   swiotlb: use swio...
911
912
913
914
915
  		return;
  	}
  
  	if (dir != DMA_FROM_DEVICE)
  		return;
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
916
  	dma_mark_clean(phys_to_virt(paddr), size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
917
918
919
  }
  
  void
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
920
  swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
921
  			    size_t size, enum dma_data_direction dir)
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
922
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
923
  	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
924
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
925
  EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
926
927
  
  void
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
928
  swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
929
  			       size_t size, enum dma_data_direction dir)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
930
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
931
  	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
932
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
933
  EXPORT_SYMBOL(swiotlb_sync_single_for_device);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
934
935
936
  
  /*
   * Map a set of buffers described by scatterlist in streaming mode for DMA.
ceb5ac326   Becky Bruce   swiotlb: comment ...
937
   * This is the scatter-gather version of the above swiotlb_map_page
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
938
939
940
941
942
943
944
945
946
947
   * interface.  Here the scatter gather list elements are each tagged with the
   * appropriate dma address and length.  They are obtained via
   * sg_dma_{address,length}(SG).
   *
   * NOTE: An implementation may be able to use a smaller number of
   *       DMA address/length pairs than there are SG table elements.
   *       (for example via virtual mapping capabilities)
   *       The routine returns the number of addr/length pairs actually
   *       used, at most nents.
   *
ceb5ac326   Becky Bruce   swiotlb: comment ...
948
   * Device ownership issues as mentioned above for swiotlb_map_page are the
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
949
950
951
   * same here.
   */
  int
309df0c50   Arthur Kepner   dma/ia64: update ...
952
  swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
00085f1ef   Krzysztof Kozlowski   dma-mapping: use ...
953
  		     enum dma_data_direction dir, unsigned long attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
954
  {
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
955
  	struct scatterlist *sg;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
956
  	int i;
348145458   Eric Sesterhenn   BUG_ON() Conversi...
957
  	BUG_ON(dir == DMA_NONE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
958

dbfd49fe9   Jens Axboe   swiotlb: sg chain...
959
  	for_each_sg(sgl, sg, nelems, i) {
961d7d0ee   Ian Campbell   swiotlb: do not u...
960
  		phys_addr_t paddr = sg_phys(sg);
862d196b2   FUJITA Tomonori   swiotlb: use phys...
961
  		dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
bc40ac669   Becky Bruce   swiotlb: store ph...
962

ae7871be1   Geert Uytterhoeven   swiotlb: Convert ...
963
  		if (swiotlb_force == SWIOTLB_FORCE ||
b9394647a   FUJITA Tomonori   swiotlb: use dma_...
964
  		    !dma_capable(hwdev, dev_addr, sg->length)) {
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
965
  			phys_addr_t map = map_single(hwdev, sg_phys(sg),
0443fa003   Alexander Duyck   swiotlb: Add supp...
966
  						     sg->length, dir, attrs);
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
967
  			if (map == SWIOTLB_MAP_ERROR) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
968
969
970
  				/* Don't panic here, we expect map_sg users
  				   to do proper error handling. */
  				swiotlb_full(hwdev, sg->length, dir, 0);
d29fa0cb7   Alexander Duyck   swiotlb: Minor fi...
971
  				attrs |= DMA_ATTR_SKIP_CPU_SYNC;
309df0c50   Arthur Kepner   dma/ia64: update ...
972
973
  				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
  						       attrs);
4d86ec7a8   EunBong Song   swiotlb: replace ...
974
  				sg_dma_len(sgl) = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
975
976
  				return 0;
  			}
c7753208a   Tom Lendacky   x86, swiotlb: Add...
977
  			sg->dma_address = swiotlb_phys_to_dma(hwdev, map);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
978
979
  		} else
  			sg->dma_address = dev_addr;
4d86ec7a8   EunBong Song   swiotlb: replace ...
980
  		sg_dma_len(sg) = sg->length;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
981
982
983
  	}
  	return nelems;
  }
309df0c50   Arthur Kepner   dma/ia64: update ...
984
  EXPORT_SYMBOL(swiotlb_map_sg_attrs);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
985
986
  /*
   * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
ceb5ac326   Becky Bruce   swiotlb: comment ...
987
   * concerning calls here are the same as for swiotlb_unmap_page() above.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
988
989
   */
  void
309df0c50   Arthur Kepner   dma/ia64: update ...
990
  swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
00085f1ef   Krzysztof Kozlowski   dma-mapping: use ...
991
992
  		       int nelems, enum dma_data_direction dir,
  		       unsigned long attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
993
  {
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
994
  	struct scatterlist *sg;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
995
  	int i;
348145458   Eric Sesterhenn   BUG_ON() Conversi...
996
  	BUG_ON(dir == DMA_NONE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
997

7fcebbd2d   Becky Bruce   swiotlb: rename u...
998
  	for_each_sg(sgl, sg, nelems, i)
0443fa003   Alexander Duyck   swiotlb: Add supp...
999
1000
  		unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
  			     attrs);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1001
  }
309df0c50   Arthur Kepner   dma/ia64: update ...
1002
  EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1003
1004
1005
1006
1007
1008
1009
  /*
   * Make physical memory consistent for a set of streaming mode DMA translations
   * after a transfer.
   *
   * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
   * and usage.
   */
be6b02678   Andrew Morton   [PATCH] swiotlb u...
1010
  static void
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
1011
  swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
d7ef1533a   Konrad Rzeszutek Wilk   swiotlb: Make swi...
1012
1013
  		int nelems, enum dma_data_direction dir,
  		enum dma_sync_target target)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1014
  {
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
1015
  	struct scatterlist *sg;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1016
  	int i;
380d68783   Becky Bruce   swiotlb: use swio...
1017
1018
  	for_each_sg(sgl, sg, nelems, i)
  		swiotlb_sync_single(hwdev, sg->dma_address,
4d86ec7a8   EunBong Song   swiotlb: replace ...
1019
  				    sg_dma_len(sg), dir, target);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1020
1021
1022
  }
  
  void
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
1023
  swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
1024
  			int nelems, enum dma_data_direction dir)
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
1025
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
1026
  	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
1027
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
1028
  EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
1029
1030
  
  void
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1031
  swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
1032
  			   int nelems, enum dma_data_direction dir)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1033
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
1034
  	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1035
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
1036
  EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1037
1038
  
  int
8d8bb39b9   FUJITA Tomonori   dma-mapping: add ...
1039
  swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1040
  {
c7753208a   Tom Lendacky   x86, swiotlb: Add...
1041
  	return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1042
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
1043
  EXPORT_SYMBOL(swiotlb_dma_mapping_error);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1044
1045
  
  /*
17e5ad6c0   Tony Luck   [PATCH] Removed r...
1046
   * Return whether the given device DMA address mask can be supported
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1047
   * properly.  For example, if your device can only drive the low 24-bits
17e5ad6c0   Tony Luck   [PATCH] Removed r...
1048
   * during bus mastering, then you would pass 0x00ffffff as the mask to
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1049
1050
1051
   * this function.
   */
  int
563aaf064   Jan Beulich   [IA64] swiotlb cl...
1052
  swiotlb_dma_supported(struct device *hwdev, u64 mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1053
  {
c7753208a   Tom Lendacky   x86, swiotlb: Add...
1054
  	return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1055
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1056
  EXPORT_SYMBOL(swiotlb_dma_supported);