Blame view

lib/swiotlb.c 27 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
  /*
   * Dynamic DMA mapping support.
   *
563aaf064   Jan Beulich   [IA64] swiotlb cl...
4
   * This implementation is a fallback for platforms that do not support
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
5
6
7
8
9
10
11
12
13
   * I/O TLBs (aka DMA address translation hardware).
   * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
   * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
   * Copyright (C) 2000, 2003 Hewlett-Packard Co
   *	David Mosberger-Tang <davidm@hpl.hp.com>
   *
   * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
   * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
   *			unnecessary i-cache flushing.
569c8bf5d   John W. Linville   [PATCH] swiotlb: ...
14
15
16
   * 04/07/.. ak		Better overflow handling. Assorted fixes.
   * 05/09/10 linville	Add support for syncing ranges, support syncing for
   *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
fb05a3792   Becky Bruce   swiotlb: add supp...
17
   * 08/12/11 beckyb	Add highmem support
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
18
19
20
   */
  
  #include <linux/cache.h>
17e5ad6c0   Tony Luck   [PATCH] Removed r...
21
  #include <linux/dma-mapping.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
22
  #include <linux/mm.h>
8bc3bcc93   Paul Gortmaker   lib: reduce the u...
23
  #include <linux/export.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
24
25
  #include <linux/spinlock.h>
  #include <linux/string.h>
0016fdee9   Ian Campbell   swiotlb: move som...
26
  #include <linux/swiotlb.h>
fb05a3792   Becky Bruce   swiotlb: add supp...
27
  #include <linux/pfn.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
28
29
  #include <linux/types.h>
  #include <linux/ctype.h>
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
30
  #include <linux/highmem.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
31
  #include <linux/gfp.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
32
33
  
  #include <asm/io.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
34
  #include <asm/dma.h>
17e5ad6c0   Tony Luck   [PATCH] Removed r...
35
  #include <asm/scatterlist.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
36
37
38
  
  #include <linux/init.h>
  #include <linux/bootmem.h>
a85225092   FUJITA Tomonori   swiotlb: use iomm...
39
  #include <linux/iommu-helper.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
40

ce5be5a16   Thierry Reding   tracing/events: F...
41
  #define CREATE_TRACE_POINTS
2b2b614dd   Zoltan Kiss   tracing/events: A...
42
  #include <trace/events/swiotlb.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
43
44
  #define OFFSET(val,align) ((unsigned long)	\
  	                   ( (val) & ( (align) - 1)))
0b9afede3   Alex Williamson   [IA64] more robus...
45
46
47
48
49
50
51
52
  #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
  
  /*
   * Minimum IO TLB size to bother booting with.  Systems with mainly
   * 64bit capable cards will only lightly use the swiotlb.  If we can't
   * allocate a contiguous 1MB, we're probably in trouble anyway.
   */
  #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
53
54
55
  int swiotlb_force;
  
  /*
bfc5501f6   Konrad Rzeszutek Wilk   swiotlb: Make int...
56
57
   * Used to do a quick range check in swiotlb_tbl_unmap_single and
   * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
58
59
   * API.
   */
ff7204a74   Alexander Duyck   swiotlb: Make io_...
60
  static phys_addr_t io_tlb_start, io_tlb_end;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
61
62
  
  /*
b595076a1   Uwe Kleine-König   tree-wide: fix co...
63
   * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
64
65
66
67
68
69
70
71
   * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
   */
  static unsigned long io_tlb_nslabs;
  
  /*
   * When the IOMMU overflows we return a fallback buffer. This sets the size.
   */
  static unsigned long io_tlb_overflow = 32*1024;
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
72
  static phys_addr_t io_tlb_overflow_buffer;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
73
74
75
76
77
78
79
80
81
82
83
84
  
  /*
   * This is a free list describing the number of free entries available from
   * each index
   */
  static unsigned int *io_tlb_list;
  static unsigned int io_tlb_index;
  
  /*
   * We need to save away the original address corresponding to a mapped entry
   * for the sync operations.
   */
bc40ac669   Becky Bruce   swiotlb: store ph...
85
  static phys_addr_t *io_tlb_orig_addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
86
87
88
89
90
  
  /*
   * Protect the above data structures in the map and unmap calls
   */
  static DEFINE_SPINLOCK(io_tlb_lock);
5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
91
  static int late_alloc;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
92
93
94
95
  static int __init
  setup_io_tlb_npages(char *str)
  {
  	if (isdigit(*str)) {
e8579e72c   Alex Williamson   [IA64, X86_64] fi...
96
  		io_tlb_nslabs = simple_strtoul(str, &str, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
97
98
99
100
101
  		/* avoid tail segment of size < IO_TLB_SEGSIZE */
  		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
  	}
  	if (*str == ',')
  		++str;
b18485e7a   FUJITA Tomonori   swiotlb: Remove t...
102
  	if (!strcmp(str, "force"))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
103
  		swiotlb_force = 1;
b18485e7a   FUJITA Tomonori   swiotlb: Remove t...
104

c729de8fc   Yinghai Lu   x86, kdump: Set c...
105
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
106
  }
c729de8fc   Yinghai Lu   x86, kdump: Set c...
107
  early_param("swiotlb", setup_io_tlb_npages);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
108
  /* make io_tlb_overflow tunable too? */
f21ffe9f6   Konrad Rzeszutek Wilk   swiotlb: Expose s...
109
  unsigned long swiotlb_nr_tbl(void)
5f98ecdbc   FUJITA Tomonori   swiotlb: Export s...
110
111
112
  {
  	return io_tlb_nslabs;
  }
f21ffe9f6   Konrad Rzeszutek Wilk   swiotlb: Expose s...
113
  EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
c729de8fc   Yinghai Lu   x86, kdump: Set c...
114
115
116
117
118
119
120
121
122
123
124
  
  /* default to 64MB */
  #define IO_TLB_DEFAULT_SIZE (64UL<<20)
  unsigned long swiotlb_size_or_default(void)
  {
  	unsigned long size;
  
  	size = io_tlb_nslabs << IO_TLB_SHIFT;
  
  	return size ? size : (IO_TLB_DEFAULT_SIZE);
  }
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
125
  /* Note that this doesn't work with highmem page */
70a7d3cc1   Jeremy Fitzhardinge   swiotlb: add hwde...
126
127
  static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
  				      volatile void *address)
e08e1f7ad   Ian Campbell   swiotlb: allow ar...
128
  {
862d196b2   FUJITA Tomonori   swiotlb: use phys...
129
  	return phys_to_dma(hwdev, virt_to_phys(address));
e08e1f7ad   Ian Campbell   swiotlb: allow ar...
130
  }
ac2cbab21   Yinghai Lu   x86: Don't panic ...
131
  static bool no_iotlb_memory;
ad32e8cb8   FUJITA Tomonori   swiotlb: Defer sw...
132
  void swiotlb_print_info(void)
2e5b2b86b   Ian Campbell   swiotlb: consolid...
133
  {
ad32e8cb8   FUJITA Tomonori   swiotlb: Defer sw...
134
  	unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
ff7204a74   Alexander Duyck   swiotlb: Make io_...
135
  	unsigned char *vstart, *vend;
2e5b2b86b   Ian Campbell   swiotlb: consolid...
136

ac2cbab21   Yinghai Lu   x86: Don't panic ...
137
138
139
140
141
  	if (no_iotlb_memory) {
  		pr_warn("software IO TLB: No low mem
  ");
  		return;
  	}
ff7204a74   Alexander Duyck   swiotlb: Make io_...
142
  	vstart = phys_to_virt(io_tlb_start);
c40dba06e   Alexander Duyck   swiotlb: Make io_...
143
  	vend = phys_to_virt(io_tlb_end);
2e5b2b86b   Ian Campbell   swiotlb: consolid...
144

3af684c7c   Bjorn Helgaas   swiotlb: print ph...
145
146
  	printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]
  ",
ff7204a74   Alexander Duyck   swiotlb: Make io_...
147
  	       (unsigned long long)io_tlb_start,
c40dba06e   Alexander Duyck   swiotlb: Make io_...
148
  	       (unsigned long long)io_tlb_end,
ff7204a74   Alexander Duyck   swiotlb: Make io_...
149
  	       bytes >> 20, vstart, vend - 1);
2e5b2b86b   Ian Campbell   swiotlb: consolid...
150
  }
ac2cbab21   Yinghai Lu   x86: Don't panic ...
151
  int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
152
  {
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
153
  	void *v_overflow_buffer;
563aaf064   Jan Beulich   [IA64] swiotlb cl...
154
  	unsigned long i, bytes;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
155

abbceff7d   FUJITA Tomonori   swiotlb: add the ...
156
  	bytes = nslabs << IO_TLB_SHIFT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
157

abbceff7d   FUJITA Tomonori   swiotlb: add the ...
158
  	io_tlb_nslabs = nslabs;
ff7204a74   Alexander Duyck   swiotlb: Make io_...
159
160
  	io_tlb_start = __pa(tlb);
  	io_tlb_end = io_tlb_start + bytes;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
161
162
  
  	/*
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
163
164
  	 * Get the overflow emergency buffer
  	 */
ad6492b80   Yinghai Lu   memblock, nobootm...
165
  	v_overflow_buffer = memblock_virt_alloc_low_nopanic(
457ff1de2   Santosh Shilimkar   lib/swiotlb.c: us...
166
167
  						PAGE_ALIGN(io_tlb_overflow),
  						PAGE_SIZE);
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
168
  	if (!v_overflow_buffer)
ac2cbab21   Yinghai Lu   x86: Don't panic ...
169
  		return -ENOMEM;
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
170
171
172
173
  
  	io_tlb_overflow_buffer = __pa(v_overflow_buffer);
  
  	/*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
174
175
176
177
  	 * Allocate and initialize the free list array.  This array is used
  	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
  	 * between io_tlb_start and io_tlb_end.
  	 */
457ff1de2   Santosh Shilimkar   lib/swiotlb.c: us...
178
179
180
  	io_tlb_list = memblock_virt_alloc(
  				PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
  				PAGE_SIZE);
25667d675   Tony Luck   Revert "[IA64] sw...
181
  	for (i = 0; i < io_tlb_nslabs; i++)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
182
183
   		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
  	io_tlb_index = 0;
457ff1de2   Santosh Shilimkar   lib/swiotlb.c: us...
184
185
186
  	io_tlb_orig_addr = memblock_virt_alloc(
  				PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
  				PAGE_SIZE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
187

ad32e8cb8   FUJITA Tomonori   swiotlb: Defer sw...
188
189
  	if (verbose)
  		swiotlb_print_info();
ac2cbab21   Yinghai Lu   x86: Don't panic ...
190
191
  
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
192
  }
abbceff7d   FUJITA Tomonori   swiotlb: add the ...
193
194
195
196
  /*
   * Statically reserve bounce buffer space and initialize bounce buffer data
   * structures for the software IO TLB used to implement the DMA API.
   */
ac2cbab21   Yinghai Lu   x86: Don't panic ...
197
198
  void  __init
  swiotlb_init(int verbose)
abbceff7d   FUJITA Tomonori   swiotlb: add the ...
199
  {
c729de8fc   Yinghai Lu   x86, kdump: Set c...
200
  	size_t default_size = IO_TLB_DEFAULT_SIZE;
ff7204a74   Alexander Duyck   swiotlb: Make io_...
201
  	unsigned char *vstart;
abbceff7d   FUJITA Tomonori   swiotlb: add the ...
202
203
204
205
206
207
208
209
  	unsigned long bytes;
  
  	if (!io_tlb_nslabs) {
  		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
  		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
  	}
  
  	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
ac2cbab21   Yinghai Lu   x86: Don't panic ...
210
  	/* Get IO TLB memory from the low pages */
ad6492b80   Yinghai Lu   memblock, nobootm...
211
  	vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
ac2cbab21   Yinghai Lu   x86: Don't panic ...
212
213
  	if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
  		return;
abbceff7d   FUJITA Tomonori   swiotlb: add the ...
214

ac2cbab21   Yinghai Lu   x86: Don't panic ...
215
  	if (io_tlb_start)
457ff1de2   Santosh Shilimkar   lib/swiotlb.c: us...
216
217
  		memblock_free_early(io_tlb_start,
  				    PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
ac2cbab21   Yinghai Lu   x86: Don't panic ...
218
219
  	pr_warn("Cannot allocate SWIOTLB buffer");
  	no_iotlb_memory = true;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
220
  }
0b9afede3   Alex Williamson   [IA64] more robus...
221
222
223
224
225
226
  /*
   * Systems with larger DMA zones (those that don't support ISA) can
   * initialize the swiotlb later using the slab allocator if needed.
   * This should be just like above, but with some error catching.
   */
  int
563aaf064   Jan Beulich   [IA64] swiotlb cl...
227
  swiotlb_late_init_with_default_size(size_t default_size)
0b9afede3   Alex Williamson   [IA64] more robus...
228
  {
74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
229
  	unsigned long bytes, req_nslabs = io_tlb_nslabs;
ff7204a74   Alexander Duyck   swiotlb: Make io_...
230
  	unsigned char *vstart = NULL;
0b9afede3   Alex Williamson   [IA64] more robus...
231
  	unsigned int order;
74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
232
  	int rc = 0;
0b9afede3   Alex Williamson   [IA64] more robus...
233
234
235
236
237
238
239
240
241
  
  	if (!io_tlb_nslabs) {
  		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
  		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
  	}
  
  	/*
  	 * Get IO TLB memory from the low pages
  	 */
563aaf064   Jan Beulich   [IA64] swiotlb cl...
242
  	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
0b9afede3   Alex Williamson   [IA64] more robus...
243
  	io_tlb_nslabs = SLABS_PER_PAGE << order;
563aaf064   Jan Beulich   [IA64] swiotlb cl...
244
  	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
0b9afede3   Alex Williamson   [IA64] more robus...
245
246
  
  	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
ff7204a74   Alexander Duyck   swiotlb: Make io_...
247
248
249
  		vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
  						  order);
  		if (vstart)
0b9afede3   Alex Williamson   [IA64] more robus...
250
251
252
  			break;
  		order--;
  	}
ff7204a74   Alexander Duyck   swiotlb: Make io_...
253
  	if (!vstart) {
74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
254
255
256
  		io_tlb_nslabs = req_nslabs;
  		return -ENOMEM;
  	}
563aaf064   Jan Beulich   [IA64] swiotlb cl...
257
  	if (order != get_order(bytes)) {
0b9afede3   Alex Williamson   [IA64] more robus...
258
259
260
261
262
  		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
  		       "for software IO TLB
  ", (PAGE_SIZE << order) >> 20);
  		io_tlb_nslabs = SLABS_PER_PAGE << order;
  	}
ff7204a74   Alexander Duyck   swiotlb: Make io_...
263
  	rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
264
  	if (rc)
ff7204a74   Alexander Duyck   swiotlb: Make io_...
265
  		free_pages((unsigned long)vstart, order);
74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
266
267
268
269
270
271
272
  	return rc;
  }
  
  int
  swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
  {
  	unsigned long i, bytes;
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
273
  	unsigned char *v_overflow_buffer;
74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
274
275
276
277
  
  	bytes = nslabs << IO_TLB_SHIFT;
  
  	io_tlb_nslabs = nslabs;
ff7204a74   Alexander Duyck   swiotlb: Make io_...
278
279
  	io_tlb_start = virt_to_phys(tlb);
  	io_tlb_end = io_tlb_start + bytes;
74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
280

ff7204a74   Alexander Duyck   swiotlb: Make io_...
281
  	memset(tlb, 0, bytes);
0b9afede3   Alex Williamson   [IA64] more robus...
282
283
  
  	/*
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
284
285
286
287
288
289
290
291
292
293
  	 * Get the overflow emergency buffer
  	 */
  	v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
  						     get_order(io_tlb_overflow));
  	if (!v_overflow_buffer)
  		goto cleanup2;
  
  	io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
  
  	/*
0b9afede3   Alex Williamson   [IA64] more robus...
294
295
296
297
298
299
300
  	 * Allocate and initialize the free list array.  This array is used
  	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
  	 * between io_tlb_start and io_tlb_end.
  	 */
  	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
  	                              get_order(io_tlb_nslabs * sizeof(int)));
  	if (!io_tlb_list)
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
301
  		goto cleanup3;
0b9afede3   Alex Williamson   [IA64] more robus...
302
303
304
305
  
  	for (i = 0; i < io_tlb_nslabs; i++)
   		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
  	io_tlb_index = 0;
bc40ac669   Becky Bruce   swiotlb: store ph...
306
307
308
309
  	io_tlb_orig_addr = (phys_addr_t *)
  		__get_free_pages(GFP_KERNEL,
  				 get_order(io_tlb_nslabs *
  					   sizeof(phys_addr_t)));
0b9afede3   Alex Williamson   [IA64] more robus...
310
  	if (!io_tlb_orig_addr)
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
311
  		goto cleanup4;
0b9afede3   Alex Williamson   [IA64] more robus...
312

bc40ac669   Becky Bruce   swiotlb: store ph...
313
  	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
0b9afede3   Alex Williamson   [IA64] more robus...
314

ad32e8cb8   FUJITA Tomonori   swiotlb: Defer sw...
315
  	swiotlb_print_info();
0b9afede3   Alex Williamson   [IA64] more robus...
316

5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
317
  	late_alloc = 1;
0b9afede3   Alex Williamson   [IA64] more robus...
318
319
320
  	return 0;
  
  cleanup4:
25667d675   Tony Luck   Revert "[IA64] sw...
321
322
  	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
  	                                                 sizeof(int)));
0b9afede3   Alex Williamson   [IA64] more robus...
323
  	io_tlb_list = NULL;
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
324
325
326
327
  cleanup3:
  	free_pages((unsigned long)v_overflow_buffer,
  		   get_order(io_tlb_overflow));
  	io_tlb_overflow_buffer = 0;
0b9afede3   Alex Williamson   [IA64] more robus...
328
  cleanup2:
c40dba06e   Alexander Duyck   swiotlb: Make io_...
329
  	io_tlb_end = 0;
ff7204a74   Alexander Duyck   swiotlb: Make io_...
330
  	io_tlb_start = 0;
74838b753   Konrad Rzeszutek Wilk   swiotlb: add the ...
331
  	io_tlb_nslabs = 0;
0b9afede3   Alex Williamson   [IA64] more robus...
332
333
  	return -ENOMEM;
  }
5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
334
335
  void __init swiotlb_free(void)
  {
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
336
  	if (!io_tlb_orig_addr)
5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
337
338
339
  		return;
  
  	if (late_alloc) {
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
340
  		free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
341
342
343
344
345
  			   get_order(io_tlb_overflow));
  		free_pages((unsigned long)io_tlb_orig_addr,
  			   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
  		free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
  								 sizeof(int)));
ff7204a74   Alexander Duyck   swiotlb: Make io_...
346
  		free_pages((unsigned long)phys_to_virt(io_tlb_start),
5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
347
348
  			   get_order(io_tlb_nslabs << IO_TLB_SHIFT));
  	} else {
457ff1de2   Santosh Shilimkar   lib/swiotlb.c: us...
349
350
351
352
353
354
355
356
  		memblock_free_late(io_tlb_overflow_buffer,
  				   PAGE_ALIGN(io_tlb_overflow));
  		memblock_free_late(__pa(io_tlb_orig_addr),
  				   PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
  		memblock_free_late(__pa(io_tlb_list),
  				   PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
  		memblock_free_late(io_tlb_start,
  				   PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
357
  	}
f21ffe9f6   Konrad Rzeszutek Wilk   swiotlb: Expose s...
358
  	io_tlb_nslabs = 0;
5740afdb6   FUJITA Tomonori   swiotlb: Add swio...
359
  }
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
360
  static int is_swiotlb_buffer(phys_addr_t paddr)
640aebfe0   FUJITA Tomonori   swiotlb: add is_s...
361
  {
ff7204a74   Alexander Duyck   swiotlb: Make io_...
362
  	return paddr >= io_tlb_start && paddr < io_tlb_end;
640aebfe0   FUJITA Tomonori   swiotlb: add is_s...
363
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
364
  /*
fb05a3792   Becky Bruce   swiotlb: add supp...
365
366
   * Bounce: copy the swiotlb buffer back to the original dma location
   */
af51a9f18   Alexander Duyck   swiotlb: Do not e...
367
368
  static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
  			   size_t size, enum dma_data_direction dir)
fb05a3792   Becky Bruce   swiotlb: add supp...
369
  {
af51a9f18   Alexander Duyck   swiotlb: Do not e...
370
371
  	unsigned long pfn = PFN_DOWN(orig_addr);
  	unsigned char *vaddr = phys_to_virt(tlb_addr);
fb05a3792   Becky Bruce   swiotlb: add supp...
372
373
374
  
  	if (PageHighMem(pfn_to_page(pfn))) {
  		/* The buffer does not have a mapping.  Map it in and copy */
af51a9f18   Alexander Duyck   swiotlb: Do not e...
375
  		unsigned int offset = orig_addr & ~PAGE_MASK;
fb05a3792   Becky Bruce   swiotlb: add supp...
376
377
378
379
380
  		char *buffer;
  		unsigned int sz = 0;
  		unsigned long flags;
  
  		while (size) {
67131ad05   Becky Bruce   swiotlb: fix comp...
381
  			sz = min_t(size_t, PAGE_SIZE - offset, size);
fb05a3792   Becky Bruce   swiotlb: add supp...
382
383
  
  			local_irq_save(flags);
c3eede8e0   Cong Wang   lib: remove the s...
384
  			buffer = kmap_atomic(pfn_to_page(pfn));
fb05a3792   Becky Bruce   swiotlb: add supp...
385
  			if (dir == DMA_TO_DEVICE)
af51a9f18   Alexander Duyck   swiotlb: Do not e...
386
  				memcpy(vaddr, buffer + offset, sz);
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
387
  			else
af51a9f18   Alexander Duyck   swiotlb: Do not e...
388
  				memcpy(buffer + offset, vaddr, sz);
c3eede8e0   Cong Wang   lib: remove the s...
389
  			kunmap_atomic(buffer);
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
390
  			local_irq_restore(flags);
fb05a3792   Becky Bruce   swiotlb: add supp...
391
392
393
  
  			size -= sz;
  			pfn++;
af51a9f18   Alexander Duyck   swiotlb: Do not e...
394
  			vaddr += sz;
fb05a3792   Becky Bruce   swiotlb: add supp...
395
  			offset = 0;
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
396
  		}
af51a9f18   Alexander Duyck   swiotlb: Do not e...
397
398
  	} else if (dir == DMA_TO_DEVICE) {
  		memcpy(vaddr, phys_to_virt(orig_addr), size);
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
399
  	} else {
af51a9f18   Alexander Duyck   swiotlb: Do not e...
400
  		memcpy(phys_to_virt(orig_addr), vaddr, size);
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
401
  	}
1b548f667   Jeremy Fitzhardinge   swiotlb: factor o...
402
  }
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
403
404
405
406
  phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
  				   dma_addr_t tbl_dma_addr,
  				   phys_addr_t orig_addr, size_t size,
  				   enum dma_data_direction dir)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
407
408
  {
  	unsigned long flags;
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
409
  	phys_addr_t tlb_addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
410
411
  	unsigned int nslots, stride, index, wrap;
  	int i;
681cc5cd3   FUJITA Tomonori   iommu sg merging:...
412
413
414
  	unsigned long mask;
  	unsigned long offset_slots;
  	unsigned long max_slots;
ac2cbab21   Yinghai Lu   x86: Don't panic ...
415
416
  	if (no_iotlb_memory)
  		panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
681cc5cd3   FUJITA Tomonori   iommu sg merging:...
417
  	mask = dma_get_seg_boundary(hwdev);
681cc5cd3   FUJITA Tomonori   iommu sg merging:...
418

eb605a575   FUJITA Tomonori   swiotlb: add swio...
419
420
421
  	tbl_dma_addr &= mask;
  
  	offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
a5ddde4a5   Ian Campbell   swiotlb: add comm...
422
423
424
425
  
  	/*
   	 * Carefully handle integer overflow which can occur when mask == ~0UL.
   	 */
b15a3891c   Jan Beulich   avoid endless loo...
426
427
428
  	max_slots = mask + 1
  		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
  		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
429
430
431
432
433
434
435
436
437
438
  
  	/*
  	 * For mappings greater than a page, we limit the stride (and
  	 * hence alignment) to a page size.
  	 */
  	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
  	if (size > PAGE_SIZE)
  		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
  	else
  		stride = 1;
348145458   Eric Sesterhenn   BUG_ON() Conversi...
439
  	BUG_ON(!nslots);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
440
441
442
443
444
445
  
  	/*
  	 * Find suitable number of IO TLB entries size that will fit this
  	 * request and allocate a buffer from that IO TLB pool.
  	 */
  	spin_lock_irqsave(&io_tlb_lock, flags);
a7133a155   Andrew Morton   lib/swiotlb.c: cl...
446
447
448
449
450
451
  	index = ALIGN(io_tlb_index, stride);
  	if (index >= io_tlb_nslabs)
  		index = 0;
  	wrap = index;
  
  	do {
a85225092   FUJITA Tomonori   swiotlb: use iomm...
452
453
  		while (iommu_is_span_boundary(index, nslots, offset_slots,
  					      max_slots)) {
b15a3891c   Jan Beulich   avoid endless loo...
454
455
456
  			index += stride;
  			if (index >= io_tlb_nslabs)
  				index = 0;
a7133a155   Andrew Morton   lib/swiotlb.c: cl...
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
  			if (index == wrap)
  				goto not_found;
  		}
  
  		/*
  		 * If we find a slot that indicates we have 'nslots' number of
  		 * contiguous buffers, we allocate the buffers from that slot
  		 * and mark the entries as '0' indicating unavailable.
  		 */
  		if (io_tlb_list[index] >= nslots) {
  			int count = 0;
  
  			for (i = index; i < (int) (index + nslots); i++)
  				io_tlb_list[i] = 0;
  			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
  				io_tlb_list[i] = ++count;
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
473
  			tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
474

a7133a155   Andrew Morton   lib/swiotlb.c: cl...
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
  			/*
  			 * Update the indices to avoid searching in the next
  			 * round.
  			 */
  			io_tlb_index = ((index + nslots) < io_tlb_nslabs
  					? (index + nslots) : 0);
  
  			goto found;
  		}
  		index += stride;
  		if (index >= io_tlb_nslabs)
  			index = 0;
  	} while (index != wrap);
  
  not_found:
  	spin_unlock_irqrestore(&io_tlb_lock, flags);
0cb637bff   Konrad Rzeszutek Wilk   swiotlb: Don't Do...
491
492
493
  	if (printk_ratelimit())
  		dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)
  ", size);
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
494
  	return SWIOTLB_MAP_ERROR;
a7133a155   Andrew Morton   lib/swiotlb.c: cl...
495
  found:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
496
497
498
499
500
501
502
  	spin_unlock_irqrestore(&io_tlb_lock, flags);
  
  	/*
  	 * Save away the mapping from the original address to the DMA address.
  	 * This is needed when we sync the memory.  Then we sync the buffer if
  	 * needed.
  	 */
bc40ac669   Becky Bruce   swiotlb: store ph...
503
  	for (i = 0; i < nslots; i++)
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
504
  		io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
505
  	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
af51a9f18   Alexander Duyck   swiotlb: Do not e...
506
  		swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
507

e05ed4d1f   Alexander Duyck   swiotlb: Return p...
508
  	return tlb_addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
509
  }
d7ef1533a   Konrad Rzeszutek Wilk   swiotlb: Make swi...
510
  EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
511
512
  
  /*
eb605a575   FUJITA Tomonori   swiotlb: add swio...
513
514
   * Allocates bounce buffer and returns its kernel virtual address.
   */
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
515
516
  phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size,
  		       enum dma_data_direction dir)
eb605a575   FUJITA Tomonori   swiotlb: add swio...
517
  {
ff7204a74   Alexander Duyck   swiotlb: Make io_...
518
  	dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
eb605a575   FUJITA Tomonori   swiotlb: add swio...
519
520
521
522
523
  
  	return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
  }
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
524
525
   * dma_addr is the kernel virtual address of the bounce buffer to unmap.
   */
61ca08c32   Alexander Duyck   swiotlb: Use phys...
526
527
  void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
  			      size_t size, enum dma_data_direction dir)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
528
529
530
  {
  	unsigned long flags;
  	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
61ca08c32   Alexander Duyck   swiotlb: Use phys...
531
532
  	int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
  	phys_addr_t orig_addr = io_tlb_orig_addr[index];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
533
534
535
536
  
  	/*
  	 * First, sync the memory before unmapping the entry
  	 */
af51a9f18   Alexander Duyck   swiotlb: Do not e...
537
538
  	if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
  		swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
539
540
541
  
  	/*
  	 * Return the buffer to the free list by setting the corresponding
af901ca18   André Goddard Rosa   tree-wide: fix as...
542
  	 * entries to indicate the number of contiguous entries available.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
  	 * While returning the entries to the free list, we merge the entries
  	 * with slots below and above the pool being returned.
  	 */
  	spin_lock_irqsave(&io_tlb_lock, flags);
  	{
  		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
  			 io_tlb_list[index + nslots] : 0);
  		/*
  		 * Step 1: return the slots to the free list, merging the
  		 * slots with superceeding slots
  		 */
  		for (i = index + nslots - 1; i >= index; i--)
  			io_tlb_list[i] = ++count;
  		/*
  		 * Step 2: merge the returned slots with the preceding slots,
  		 * if available (non zero)
  		 */
  		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
  			io_tlb_list[i] = ++count;
  	}
  	spin_unlock_irqrestore(&io_tlb_lock, flags);
  }
d7ef1533a   Konrad Rzeszutek Wilk   swiotlb: Make swi...
565
  EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
566

fbfda893e   Alexander Duyck   swiotlb: Use phys...
567
568
569
  void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
  			     size_t size, enum dma_data_direction dir,
  			     enum dma_sync_target target)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
570
  {
fbfda893e   Alexander Duyck   swiotlb: Use phys...
571
572
  	int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
  	phys_addr_t orig_addr = io_tlb_orig_addr[index];
bc40ac669   Becky Bruce   swiotlb: store ph...
573

fbfda893e   Alexander Duyck   swiotlb: Use phys...
574
  	orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
df336d1c7   Keir Fraser   Fix swiotlb_sync_...
575

de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
576
577
578
  	switch (target) {
  	case SYNC_FOR_CPU:
  		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
af51a9f18   Alexander Duyck   swiotlb: Do not e...
579
  			swiotlb_bounce(orig_addr, tlb_addr,
fbfda893e   Alexander Duyck   swiotlb: Use phys...
580
  				       size, DMA_FROM_DEVICE);
348145458   Eric Sesterhenn   BUG_ON() Conversi...
581
582
  		else
  			BUG_ON(dir != DMA_TO_DEVICE);
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
583
584
585
  		break;
  	case SYNC_FOR_DEVICE:
  		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
af51a9f18   Alexander Duyck   swiotlb: Do not e...
586
  			swiotlb_bounce(orig_addr, tlb_addr,
fbfda893e   Alexander Duyck   swiotlb: Use phys...
587
  				       size, DMA_TO_DEVICE);
348145458   Eric Sesterhenn   BUG_ON() Conversi...
588
589
  		else
  			BUG_ON(dir != DMA_FROM_DEVICE);
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
590
591
  		break;
  	default:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
592
  		BUG();
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
593
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
594
  }
d7ef1533a   Konrad Rzeszutek Wilk   swiotlb: Make swi...
595
  EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
596
597
598
  
  void *
  swiotlb_alloc_coherent(struct device *hwdev, size_t size,
06a544971   Al Viro   [PATCH] gfp_t: dm...
599
  		       dma_addr_t *dma_handle, gfp_t flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
600
  {
563aaf064   Jan Beulich   [IA64] swiotlb cl...
601
  	dma_addr_t dev_addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
602
603
  	void *ret;
  	int order = get_order(size);
284901a90   Yang Hongyang   dma-mapping: repl...
604
  	u64 dma_mask = DMA_BIT_MASK(32);
1e74f3000   FUJITA Tomonori   swiotlb: use cohe...
605
606
607
  
  	if (hwdev && hwdev->coherent_dma_mask)
  		dma_mask = hwdev->coherent_dma_mask;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
608

25667d675   Tony Luck   Revert "[IA64] sw...
609
  	ret = (void *)__get_free_pages(flags, order);
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
610
611
612
613
614
615
616
617
618
  	if (ret) {
  		dev_addr = swiotlb_virt_to_bus(hwdev, ret);
  		if (dev_addr + size - 1 > dma_mask) {
  			/*
  			 * The allocated memory isn't reachable by the device.
  			 */
  			free_pages((unsigned long) ret, order);
  			ret = NULL;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
619
620
621
  	}
  	if (!ret) {
  		/*
bfc5501f6   Konrad Rzeszutek Wilk   swiotlb: Make int...
622
623
  		 * We are either out of memory or the device can't DMA to
  		 * GFP_DMA memory; fall back on map_single(), which
ceb5ac326   Becky Bruce   swiotlb: comment ...
624
  		 * will grab memory from the lowest available address range.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
625
  		 */
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
626
627
  		phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
  		if (paddr == SWIOTLB_MAP_ERROR)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
628
  			return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
629

e05ed4d1f   Alexander Duyck   swiotlb: Return p...
630
631
  		ret = phys_to_virt(paddr);
  		dev_addr = phys_to_dma(hwdev, paddr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
632

61ca08c32   Alexander Duyck   swiotlb: Use phys...
633
634
635
636
637
638
  		/* Confirm address can be DMA'd by device */
  		if (dev_addr + size - 1 > dma_mask) {
  			printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx
  ",
  			       (unsigned long long)dma_mask,
  			       (unsigned long long)dev_addr);
a2b89b596   FUJITA Tomonori   swiotlb: remove p...
639

61ca08c32   Alexander Duyck   swiotlb: Use phys...
640
641
642
643
644
  			/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
  			swiotlb_tbl_unmap_single(hwdev, paddr,
  						 size, DMA_TO_DEVICE);
  			return NULL;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
645
  	}
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
646

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
647
  	*dma_handle = dev_addr;
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
648
  	memset(ret, 0, size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
649
650
  	return ret;
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
651
  EXPORT_SYMBOL(swiotlb_alloc_coherent);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
652
653
654
  
  void
  swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
655
  		      dma_addr_t dev_addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
656
  {
862d196b2   FUJITA Tomonori   swiotlb: use phys...
657
  	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
658

aa24886e3   David Brownell   dma_free_coherent...
659
  	WARN_ON(irqs_disabled());
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
660
661
  	if (!is_swiotlb_buffer(paddr))
  		free_pages((unsigned long)vaddr, get_order(size));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
662
  	else
bfc5501f6   Konrad Rzeszutek Wilk   swiotlb: Make int...
663
  		/* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
61ca08c32   Alexander Duyck   swiotlb: Use phys...
664
  		swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
665
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
666
  EXPORT_SYMBOL(swiotlb_free_coherent);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
667
668
  
  static void
22d482699   Konrad Rzeszutek Wilk   swiotlb: search a...
669
670
  swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
  	     int do_panic)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
671
672
673
674
  {
  	/*
  	 * Ran out of IOMMU space for this operation. This is very bad.
  	 * Unfortunately the drivers cannot handle this operation properly.
17e5ad6c0   Tony Luck   [PATCH] Removed r...
675
  	 * unless they check for dma_mapping_error (most don't)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
676
677
678
  	 * When the mapping is small enough return a static buffer to limit
  	 * the damage, or panic when the transfer is too big.
  	 */
563aaf064   Jan Beulich   [IA64] swiotlb cl...
679
  	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
94b324864   Kay Sievers   swiotlb: struct d...
680
681
  	       "device %s
  ", size, dev ? dev_name(dev) : "?");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
682

c7084b35e   Casey Dahlin   lib/swiotlb.c: Fi...
683
684
685
686
687
688
689
690
691
692
693
694
  	if (size <= io_tlb_overflow || !do_panic)
  		return;
  
  	if (dir == DMA_BIDIRECTIONAL)
  		panic("DMA: Random memory could be DMA accessed
  ");
  	if (dir == DMA_FROM_DEVICE)
  		panic("DMA: Random memory could be DMA written
  ");
  	if (dir == DMA_TO_DEVICE)
  		panic("DMA: Random memory could be DMA read
  ");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
695
696
697
698
  }
  
  /*
   * Map a single buffer of the indicated size for DMA in streaming mode.  The
17e5ad6c0   Tony Luck   [PATCH] Removed r...
699
   * physical address to use is returned.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
700
701
   *
   * Once the device is given the dma address, the device owns this memory until
ceb5ac326   Becky Bruce   swiotlb: comment ...
702
   * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
703
   */
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
704
705
706
707
  dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
  			    unsigned long offset, size_t size,
  			    enum dma_data_direction dir,
  			    struct dma_attrs *attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
708
  {
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
709
  	phys_addr_t map, phys = page_to_phys(page) + offset;
862d196b2   FUJITA Tomonori   swiotlb: use phys...
710
  	dma_addr_t dev_addr = phys_to_dma(dev, phys);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
711

348145458   Eric Sesterhenn   BUG_ON() Conversi...
712
  	BUG_ON(dir == DMA_NONE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
713
  	/*
ceb5ac326   Becky Bruce   swiotlb: comment ...
714
  	 * If the address happens to be in the device's DMA window,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
715
716
717
  	 * we can safely return the device addr and not worry about bounce
  	 * buffering it.
  	 */
b9394647a   FUJITA Tomonori   swiotlb: use dma_...
718
  	if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
719
  		return dev_addr;
2b2b614dd   Zoltan Kiss   tracing/events: A...
720
  	trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
721
  	/* Oh well, have to allocate and map a bounce buffer. */
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
722
  	map = map_single(dev, phys, size, dir);
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
723
  	if (map == SWIOTLB_MAP_ERROR) {
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
724
  		swiotlb_full(dev, size, dir, 1);
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
725
  		return phys_to_dma(dev, io_tlb_overflow_buffer);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
726
  	}
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
727
  	dev_addr = phys_to_dma(dev, map);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
728

e05ed4d1f   Alexander Duyck   swiotlb: Return p...
729
  	/* Ensure that the address returned is DMA'ble */
fba99fa38   FUJITA Tomonori   swiotlb: fix wron...
730
  	if (!dma_capable(dev, dev_addr, size)) {
61ca08c32   Alexander Duyck   swiotlb: Use phys...
731
  		swiotlb_tbl_unmap_single(dev, map, size, dir);
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
732
  		return phys_to_dma(dev, io_tlb_overflow_buffer);
fba99fa38   FUJITA Tomonori   swiotlb: fix wron...
733
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
734
735
736
  
  	return dev_addr;
  }
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
737
  EXPORT_SYMBOL_GPL(swiotlb_map_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
738
739
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
740
   * Unmap a single streaming mode DMA translation.  The dma_addr and size must
ceb5ac326   Becky Bruce   swiotlb: comment ...
741
   * match what was provided for in a previous swiotlb_map_page call.  All
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
742
743
744
745
746
   * other usages are undefined.
   *
   * After this call, reads by the cpu to the buffer are guaranteed to see
   * whatever the device wrote there.
   */
7fcebbd2d   Becky Bruce   swiotlb: rename u...
747
  static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
22d482699   Konrad Rzeszutek Wilk   swiotlb: search a...
748
  			 size_t size, enum dma_data_direction dir)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
749
  {
862d196b2   FUJITA Tomonori   swiotlb: use phys...
750
  	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
751

348145458   Eric Sesterhenn   BUG_ON() Conversi...
752
  	BUG_ON(dir == DMA_NONE);
7fcebbd2d   Becky Bruce   swiotlb: rename u...
753

02ca646e7   FUJITA Tomonori   swiotlb: remove u...
754
  	if (is_swiotlb_buffer(paddr)) {
61ca08c32   Alexander Duyck   swiotlb: Use phys...
755
  		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
7fcebbd2d   Becky Bruce   swiotlb: rename u...
756
757
758
759
760
  		return;
  	}
  
  	if (dir != DMA_FROM_DEVICE)
  		return;
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
761
762
763
764
765
766
767
  	/*
  	 * phys_to_virt doesn't work with hihgmem page but we could
  	 * call dma_mark_clean() with hihgmem page here. However, we
  	 * are fine since dma_mark_clean() is null on POWERPC. We can
  	 * make dma_mark_clean() take a physical address if necessary.
  	 */
  	dma_mark_clean(phys_to_virt(paddr), size);
7fcebbd2d   Becky Bruce   swiotlb: rename u...
768
769
770
771
772
773
774
  }
  
  void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
  			size_t size, enum dma_data_direction dir,
  			struct dma_attrs *attrs)
  {
  	unmap_single(hwdev, dev_addr, size, dir);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
775
  }
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
776
  EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
874d6a955   FUJITA Tomonori   swiotlb: clean up...
777

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
778
779
780
781
  /*
   * Make physical memory consistent for a single streaming mode DMA translation
   * after a transfer.
   *
ceb5ac326   Becky Bruce   swiotlb: comment ...
782
   * If you perform a swiotlb_map_page() but wish to interrogate the buffer
17e5ad6c0   Tony Luck   [PATCH] Removed r...
783
784
   * using the cpu, yet do not wish to teardown the dma mapping, you must
   * call this function before doing so.  At the next point you give the dma
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
785
786
787
   * address back to the card, you must first perform a
   * swiotlb_dma_sync_for_device, and then the device again owns the buffer
   */
be6b02678   Andrew Morton   [PATCH] swiotlb u...
788
  static void
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
789
  swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
d7ef1533a   Konrad Rzeszutek Wilk   swiotlb: Make swi...
790
791
  		    size_t size, enum dma_data_direction dir,
  		    enum dma_sync_target target)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
792
  {
862d196b2   FUJITA Tomonori   swiotlb: use phys...
793
  	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
794

348145458   Eric Sesterhenn   BUG_ON() Conversi...
795
  	BUG_ON(dir == DMA_NONE);
380d68783   Becky Bruce   swiotlb: use swio...
796

02ca646e7   FUJITA Tomonori   swiotlb: remove u...
797
  	if (is_swiotlb_buffer(paddr)) {
fbfda893e   Alexander Duyck   swiotlb: Use phys...
798
  		swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
380d68783   Becky Bruce   swiotlb: use swio...
799
800
801
802
803
  		return;
  	}
  
  	if (dir != DMA_FROM_DEVICE)
  		return;
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
804
  	dma_mark_clean(phys_to_virt(paddr), size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
805
806
807
  }
  
  void
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
808
  swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
809
  			    size_t size, enum dma_data_direction dir)
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
810
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
811
  	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
812
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
813
  EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
814
815
  
  void
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
816
  swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
817
  			       size_t size, enum dma_data_direction dir)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
818
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
819
  	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
820
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
821
  EXPORT_SYMBOL(swiotlb_sync_single_for_device);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
822
823
824
  
  /*
   * Map a set of buffers described by scatterlist in streaming mode for DMA.
ceb5ac326   Becky Bruce   swiotlb: comment ...
825
   * This is the scatter-gather version of the above swiotlb_map_page
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
826
827
828
829
830
831
832
833
834
835
   * interface.  Here the scatter gather list elements are each tagged with the
   * appropriate dma address and length.  They are obtained via
   * sg_dma_{address,length}(SG).
   *
   * NOTE: An implementation may be able to use a smaller number of
   *       DMA address/length pairs than there are SG table elements.
   *       (for example via virtual mapping capabilities)
   *       The routine returns the number of addr/length pairs actually
   *       used, at most nents.
   *
ceb5ac326   Becky Bruce   swiotlb: comment ...
836
   * Device ownership issues as mentioned above for swiotlb_map_page are the
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
837
838
839
   * same here.
   */
  int
309df0c50   Arthur Kepner   dma/ia64: update ...
840
  swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
841
  		     enum dma_data_direction dir, struct dma_attrs *attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
842
  {
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
843
  	struct scatterlist *sg;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
844
  	int i;
348145458   Eric Sesterhenn   BUG_ON() Conversi...
845
  	BUG_ON(dir == DMA_NONE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
846

dbfd49fe9   Jens Axboe   swiotlb: sg chain...
847
  	for_each_sg(sgl, sg, nelems, i) {
961d7d0ee   Ian Campbell   swiotlb: do not u...
848
  		phys_addr_t paddr = sg_phys(sg);
862d196b2   FUJITA Tomonori   swiotlb: use phys...
849
  		dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
bc40ac669   Becky Bruce   swiotlb: store ph...
850

cf56e3f2e   FUJITA Tomonori   swiotlb: remove s...
851
  		if (swiotlb_force ||
b9394647a   FUJITA Tomonori   swiotlb: use dma_...
852
  		    !dma_capable(hwdev, dev_addr, sg->length)) {
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
853
854
855
  			phys_addr_t map = map_single(hwdev, sg_phys(sg),
  						     sg->length, dir);
  			if (map == SWIOTLB_MAP_ERROR) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
856
857
858
  				/* Don't panic here, we expect map_sg users
  				   to do proper error handling. */
  				swiotlb_full(hwdev, sg->length, dir, 0);
309df0c50   Arthur Kepner   dma/ia64: update ...
859
860
  				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
  						       attrs);
4d86ec7a8   EunBong Song   swiotlb: replace ...
861
  				sg_dma_len(sgl) = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
862
863
  				return 0;
  			}
e05ed4d1f   Alexander Duyck   swiotlb: Return p...
864
  			sg->dma_address = phys_to_dma(hwdev, map);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
865
866
  		} else
  			sg->dma_address = dev_addr;
4d86ec7a8   EunBong Song   swiotlb: replace ...
867
  		sg_dma_len(sg) = sg->length;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
868
869
870
  	}
  	return nelems;
  }
309df0c50   Arthur Kepner   dma/ia64: update ...
871
872
873
874
  EXPORT_SYMBOL(swiotlb_map_sg_attrs);
  
  int
  swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
22d482699   Konrad Rzeszutek Wilk   swiotlb: search a...
875
  	       enum dma_data_direction dir)
309df0c50   Arthur Kepner   dma/ia64: update ...
876
877
878
  {
  	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
879
  EXPORT_SYMBOL(swiotlb_map_sg);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
880
881
882
  
  /*
   * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
ceb5ac326   Becky Bruce   swiotlb: comment ...
883
   * concerning calls here are the same as for swiotlb_unmap_page() above.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
884
885
   */
  void
309df0c50   Arthur Kepner   dma/ia64: update ...
886
  swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
887
  		       int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
888
  {
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
889
  	struct scatterlist *sg;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
890
  	int i;
348145458   Eric Sesterhenn   BUG_ON() Conversi...
891
  	BUG_ON(dir == DMA_NONE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
892

7fcebbd2d   Becky Bruce   swiotlb: rename u...
893
  	for_each_sg(sgl, sg, nelems, i)
4d86ec7a8   EunBong Song   swiotlb: replace ...
894
  		unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir);
7fcebbd2d   Becky Bruce   swiotlb: rename u...
895

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
896
  }
309df0c50   Arthur Kepner   dma/ia64: update ...
897
898
899
900
  EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
  
  void
  swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
22d482699   Konrad Rzeszutek Wilk   swiotlb: search a...
901
  		 enum dma_data_direction dir)
309df0c50   Arthur Kepner   dma/ia64: update ...
902
903
904
  {
  	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
905
  EXPORT_SYMBOL(swiotlb_unmap_sg);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
906
907
908
909
910
911
912
913
  
  /*
   * Make physical memory consistent for a set of streaming mode DMA translations
   * after a transfer.
   *
   * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
   * and usage.
   */
be6b02678   Andrew Morton   [PATCH] swiotlb u...
914
  static void
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
915
  swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
d7ef1533a   Konrad Rzeszutek Wilk   swiotlb: Make swi...
916
917
  		int nelems, enum dma_data_direction dir,
  		enum dma_sync_target target)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
918
  {
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
919
  	struct scatterlist *sg;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
920
  	int i;
380d68783   Becky Bruce   swiotlb: use swio...
921
922
  	for_each_sg(sgl, sg, nelems, i)
  		swiotlb_sync_single(hwdev, sg->dma_address,
4d86ec7a8   EunBong Song   swiotlb: replace ...
923
  				    sg_dma_len(sg), dir, target);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
924
925
926
  }
  
  void
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
927
  swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
928
  			int nelems, enum dma_data_direction dir)
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
929
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
930
  	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
931
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
932
  EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
933
934
  
  void
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
935
  swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
936
  			   int nelems, enum dma_data_direction dir)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
937
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
938
  	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
939
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
940
  EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
941
942
  
  int
8d8bb39b9   FUJITA Tomonori   dma-mapping: add ...
943
  swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
944
  {
ee3f6ba89   Alexander Duyck   swiotlb: Make io_...
945
  	return (dma_addr == phys_to_dma(hwdev, io_tlb_overflow_buffer));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
946
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
947
  EXPORT_SYMBOL(swiotlb_dma_mapping_error);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
948
949
  
  /*
17e5ad6c0   Tony Luck   [PATCH] Removed r...
950
   * Return whether the given device DMA address mask can be supported
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
951
   * properly.  For example, if your device can only drive the low 24-bits
17e5ad6c0   Tony Luck   [PATCH] Removed r...
952
   * during bus mastering, then you would pass 0x00ffffff as the mask to
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
953
954
955
   * this function.
   */
  int
563aaf064   Jan Beulich   [IA64] swiotlb cl...
956
  swiotlb_dma_supported(struct device *hwdev, u64 mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
957
  {
c40dba06e   Alexander Duyck   swiotlb: Make io_...
958
  	return phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
959
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
960
  EXPORT_SYMBOL(swiotlb_dma_supported);