Blame view

lib/swiotlb.c 24.1 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
  /*
   * Dynamic DMA mapping support.
   *
563aaf064   Jan Beulich   [IA64] swiotlb cl...
4
   * This implementation is a fallback for platforms that do not support
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
5
6
7
8
9
10
11
12
13
   * I/O TLBs (aka DMA address translation hardware).
   * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
   * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
   * Copyright (C) 2000, 2003 Hewlett-Packard Co
   *	David Mosberger-Tang <davidm@hpl.hp.com>
   *
   * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
   * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
   *			unnecessary i-cache flushing.
569c8bf5d   John W. Linville   [PATCH] swiotlb: ...
14
15
16
   * 04/07/.. ak		Better overflow handling. Assorted fixes.
   * 05/09/10 linville	Add support for syncing ranges, support syncing for
   *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
17
18
19
   */
  
  #include <linux/cache.h>
17e5ad6c0   Tony Luck   [PATCH] Removed r...
20
  #include <linux/dma-mapping.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
21
22
  #include <linux/mm.h>
  #include <linux/module.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
23
24
25
26
27
28
  #include <linux/spinlock.h>
  #include <linux/string.h>
  #include <linux/types.h>
  #include <linux/ctype.h>
  
  #include <asm/io.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
29
  #include <asm/dma.h>
17e5ad6c0   Tony Luck   [PATCH] Removed r...
30
  #include <asm/scatterlist.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
31
32
33
  
  #include <linux/init.h>
  #include <linux/bootmem.h>
a85225092   FUJITA Tomonori   swiotlb: use iomm...
34
  #include <linux/iommu-helper.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
35
36
37
  
  #define OFFSET(val,align) ((unsigned long)	\
  	                   ( (val) & ( (align) - 1)))
f9527f121   Jens Axboe   Update swiotlb to...
38
  #define SG_ENT_VIRT_ADDRESS(sg)	(sg_virt((sg)))
93fbff63e   Jan Beulich   [IA64] make swiot...
39
  #define SG_ENT_PHYS_ADDRESS(sg)	virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
40
41
42
43
44
45
46
47
48
49
50
51
52
  
  /*
   * Maximum allowable number of contiguous slabs to map,
   * must be a power of 2.  What is the appropriate value ?
   * The complexity of {map,unmap}_single is linearly dependent on this value.
   */
  #define IO_TLB_SEGSIZE	128
  
  /*
   * log of the size of each IO TLB slab.  The number of slabs is command line
   * controllable.
   */
  #define IO_TLB_SHIFT 11
0b9afede3   Alex Williamson   [IA64] more robus...
53
54
55
56
57
58
59
60
  #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
  
  /*
   * Minimum IO TLB size to bother booting with.  Systems with mainly
   * 64bit capable cards will only lightly use the swiotlb.  If we can't
   * allocate a contiguous 1MB, we're probably in trouble anyway.
   */
  #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
61
62
63
64
65
66
67
  /*
   * Enumeration for sync targets
   */
  enum dma_sync_target {
  	SYNC_FOR_CPU = 0,
  	SYNC_FOR_DEVICE = 1,
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
  int swiotlb_force;
  
  /*
   * Used to do a quick range check in swiotlb_unmap_single and
   * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
   * API.
   */
  static char *io_tlb_start, *io_tlb_end;
  
  /*
   * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
   * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
   */
  static unsigned long io_tlb_nslabs;
  
  /*
   * When the IOMMU overflows we return a fallback buffer. This sets the size.
   */
  static unsigned long io_tlb_overflow = 32*1024;
  
  void *io_tlb_overflow_buffer;
  
  /*
   * This is a free list describing the number of free entries available from
   * each index
   */
  static unsigned int *io_tlb_list;
  static unsigned int io_tlb_index;
  
  /*
   * We need to save away the original address corresponding to a mapped entry
   * for the sync operations.
   */
25667d675   Tony Luck   Revert "[IA64] sw...
101
  static unsigned char **io_tlb_orig_addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
102
103
104
105
106
107
108
109
110
111
  
  /*
   * Protect the above data structures in the map and unmap calls
   */
  static DEFINE_SPINLOCK(io_tlb_lock);
  
  static int __init
  setup_io_tlb_npages(char *str)
  {
  	if (isdigit(*str)) {
e8579e72c   Alex Williamson   [IA64, X86_64] fi...
112
  		io_tlb_nslabs = simple_strtoul(str, &str, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
113
114
115
116
117
118
119
120
121
122
123
124
125
126
  		/* avoid tail segment of size < IO_TLB_SEGSIZE */
  		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
  	}
  	if (*str == ',')
  		++str;
  	if (!strcmp(str, "force"))
  		swiotlb_force = 1;
  	return 1;
  }
  __setup("swiotlb=", setup_io_tlb_npages);
  /* make io_tlb_overflow tunable too? */
  
  /*
   * Statically reserve bounce buffer space and initialize bounce buffer data
17e5ad6c0   Tony Luck   [PATCH] Removed r...
127
   * structures for the software IO TLB used to implement the DMA API.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
128
   */
563aaf064   Jan Beulich   [IA64] swiotlb cl...
129
130
  void __init
  swiotlb_init_with_default_size(size_t default_size)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
131
  {
563aaf064   Jan Beulich   [IA64] swiotlb cl...
132
  	unsigned long i, bytes;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
133
134
  
  	if (!io_tlb_nslabs) {
e8579e72c   Alex Williamson   [IA64, X86_64] fi...
135
  		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
136
137
  		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
  	}
563aaf064   Jan Beulich   [IA64] swiotlb cl...
138
  	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
139
140
141
  	/*
  	 * Get IO TLB memory from the low pages
  	 */
563aaf064   Jan Beulich   [IA64] swiotlb cl...
142
  	io_tlb_start = alloc_bootmem_low_pages(bytes);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
143
144
  	if (!io_tlb_start)
  		panic("Cannot allocate SWIOTLB buffer");
563aaf064   Jan Beulich   [IA64] swiotlb cl...
145
  	io_tlb_end = io_tlb_start + bytes;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
146
147
148
149
150
151
152
  
  	/*
  	 * Allocate and initialize the free list array.  This array is used
  	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
  	 * between io_tlb_start and io_tlb_end.
  	 */
  	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
25667d675   Tony Luck   Revert "[IA64] sw...
153
  	for (i = 0; i < io_tlb_nslabs; i++)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
154
155
   		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
  	io_tlb_index = 0;
25667d675   Tony Luck   Revert "[IA64] sw...
156
  	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
157
158
159
160
161
  
  	/*
  	 * Get the overflow emergency buffer
  	 */
  	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
563aaf064   Jan Beulich   [IA64] swiotlb cl...
162
163
164
  	if (!io_tlb_overflow_buffer)
  		panic("Cannot allocate SWIOTLB overflow buffer!
  ");
25667d675   Tony Luck   Revert "[IA64] sw...
165
166
167
  	printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx
  ",
  	       virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
168
  }
563aaf064   Jan Beulich   [IA64] swiotlb cl...
169
170
  void __init
  swiotlb_init(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
171
  {
25667d675   Tony Luck   Revert "[IA64] sw...
172
  	swiotlb_init_with_default_size(64 * (1<<20));	/* default to 64MB */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
173
  }
0b9afede3   Alex Williamson   [IA64] more robus...
174
175
176
177
178
179
  /*
   * Systems with larger DMA zones (those that don't support ISA) can
   * initialize the swiotlb later using the slab allocator if needed.
   * This should be just like above, but with some error catching.
   */
  int
563aaf064   Jan Beulich   [IA64] swiotlb cl...
180
  swiotlb_late_init_with_default_size(size_t default_size)
0b9afede3   Alex Williamson   [IA64] more robus...
181
  {
563aaf064   Jan Beulich   [IA64] swiotlb cl...
182
  	unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
0b9afede3   Alex Williamson   [IA64] more robus...
183
184
185
186
187
188
189
190
191
192
  	unsigned int order;
  
  	if (!io_tlb_nslabs) {
  		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
  		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
  	}
  
  	/*
  	 * Get IO TLB memory from the low pages
  	 */
563aaf064   Jan Beulich   [IA64] swiotlb cl...
193
  	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
0b9afede3   Alex Williamson   [IA64] more robus...
194
  	io_tlb_nslabs = SLABS_PER_PAGE << order;
563aaf064   Jan Beulich   [IA64] swiotlb cl...
195
  	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
0b9afede3   Alex Williamson   [IA64] more robus...
196
197
198
199
200
201
202
203
204
205
206
  
  	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
  		io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
  		                                        order);
  		if (io_tlb_start)
  			break;
  		order--;
  	}
  
  	if (!io_tlb_start)
  		goto cleanup1;
563aaf064   Jan Beulich   [IA64] swiotlb cl...
207
  	if (order != get_order(bytes)) {
0b9afede3   Alex Williamson   [IA64] more robus...
208
209
210
211
  		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
  		       "for software IO TLB
  ", (PAGE_SIZE << order) >> 20);
  		io_tlb_nslabs = SLABS_PER_PAGE << order;
563aaf064   Jan Beulich   [IA64] swiotlb cl...
212
  		bytes = io_tlb_nslabs << IO_TLB_SHIFT;
0b9afede3   Alex Williamson   [IA64] more robus...
213
  	}
563aaf064   Jan Beulich   [IA64] swiotlb cl...
214
215
  	io_tlb_end = io_tlb_start + bytes;
  	memset(io_tlb_start, 0, bytes);
0b9afede3   Alex Williamson   [IA64] more robus...
216
217
218
219
220
221
222
223
224
225
226
227
228
229
  
  	/*
  	 * Allocate and initialize the free list array.  This array is used
  	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
  	 * between io_tlb_start and io_tlb_end.
  	 */
  	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
  	                              get_order(io_tlb_nslabs * sizeof(int)));
  	if (!io_tlb_list)
  		goto cleanup2;
  
  	for (i = 0; i < io_tlb_nslabs; i++)
   		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
  	io_tlb_index = 0;
25667d675   Tony Luck   Revert "[IA64] sw...
230
231
  	io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
  	                           get_order(io_tlb_nslabs * sizeof(char *)));
0b9afede3   Alex Williamson   [IA64] more robus...
232
233
  	if (!io_tlb_orig_addr)
  		goto cleanup3;
25667d675   Tony Luck   Revert "[IA64] sw...
234
  	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
0b9afede3   Alex Williamson   [IA64] more robus...
235
236
237
238
239
240
241
242
  
  	/*
  	 * Get the overflow emergency buffer
  	 */
  	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
  	                                          get_order(io_tlb_overflow));
  	if (!io_tlb_overflow_buffer)
  		goto cleanup4;
25667d675   Tony Luck   Revert "[IA64] sw...
243
244
245
246
  	printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
  	       "0x%lx
  ", bytes >> 20,
  	       virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
0b9afede3   Alex Williamson   [IA64] more robus...
247
248
249
250
  
  	return 0;
  
  cleanup4:
25667d675   Tony Luck   Revert "[IA64] sw...
251
252
  	free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
  	                                                      sizeof(char *)));
0b9afede3   Alex Williamson   [IA64] more robus...
253
254
  	io_tlb_orig_addr = NULL;
  cleanup3:
25667d675   Tony Luck   Revert "[IA64] sw...
255
256
  	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
  	                                                 sizeof(int)));
0b9afede3   Alex Williamson   [IA64] more robus...
257
  	io_tlb_list = NULL;
0b9afede3   Alex Williamson   [IA64] more robus...
258
  cleanup2:
563aaf064   Jan Beulich   [IA64] swiotlb cl...
259
  	io_tlb_end = NULL;
0b9afede3   Alex Williamson   [IA64] more robus...
260
261
262
263
264
265
  	free_pages((unsigned long)io_tlb_start, order);
  	io_tlb_start = NULL;
  cleanup1:
  	io_tlb_nslabs = req_nslabs;
  	return -ENOMEM;
  }
be6b02678   Andrew Morton   [PATCH] swiotlb u...
266
  static int
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
267
268
269
270
271
272
273
274
275
276
277
278
279
  address_needs_mapping(struct device *hwdev, dma_addr_t addr)
  {
  	dma_addr_t mask = 0xffffffff;
  	/* If the device has a mask, use it, otherwise default to 32 bits */
  	if (hwdev && hwdev->dma_mask)
  		mask = *hwdev->dma_mask;
  	return (addr & ~mask) != 0;
  }
  
  /*
   * Allocates bounce buffer and returns its kernel virtual address.
   */
  static void *
25667d675   Tony Luck   Revert "[IA64] sw...
280
  map_single(struct device *hwdev, char *buffer, size_t size, int dir)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
281
282
283
284
285
  {
  	unsigned long flags;
  	char *dma_addr;
  	unsigned int nslots, stride, index, wrap;
  	int i;
681cc5cd3   FUJITA Tomonori   iommu sg merging:...
286
287
288
289
290
291
292
293
294
  	unsigned long start_dma_addr;
  	unsigned long mask;
  	unsigned long offset_slots;
  	unsigned long max_slots;
  
  	mask = dma_get_seg_boundary(hwdev);
  	start_dma_addr = virt_to_bus(io_tlb_start) & mask;
  
  	offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
b15a3891c   Jan Beulich   avoid endless loo...
295
296
297
  	max_slots = mask + 1
  		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
  		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
298
299
300
301
302
303
304
305
306
307
  
  	/*
  	 * For mappings greater than a page, we limit the stride (and
  	 * hence alignment) to a page size.
  	 */
  	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
  	if (size > PAGE_SIZE)
  		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
  	else
  		stride = 1;
348145458   Eric Sesterhenn   BUG_ON() Conversi...
308
  	BUG_ON(!nslots);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
309
310
311
312
313
314
  
  	/*
  	 * Find suitable number of IO TLB entries size that will fit this
  	 * request and allocate a buffer from that IO TLB pool.
  	 */
  	spin_lock_irqsave(&io_tlb_lock, flags);
a7133a155   Andrew Morton   lib/swiotlb.c: cl...
315
316
317
318
319
320
  	index = ALIGN(io_tlb_index, stride);
  	if (index >= io_tlb_nslabs)
  		index = 0;
  	wrap = index;
  
  	do {
a85225092   FUJITA Tomonori   swiotlb: use iomm...
321
322
  		while (iommu_is_span_boundary(index, nslots, offset_slots,
  					      max_slots)) {
b15a3891c   Jan Beulich   avoid endless loo...
323
324
325
  			index += stride;
  			if (index >= io_tlb_nslabs)
  				index = 0;
a7133a155   Andrew Morton   lib/swiotlb.c: cl...
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
  			if (index == wrap)
  				goto not_found;
  		}
  
  		/*
  		 * If we find a slot that indicates we have 'nslots' number of
  		 * contiguous buffers, we allocate the buffers from that slot
  		 * and mark the entries as '0' indicating unavailable.
  		 */
  		if (io_tlb_list[index] >= nslots) {
  			int count = 0;
  
  			for (i = index; i < (int) (index + nslots); i++)
  				io_tlb_list[i] = 0;
  			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
  				io_tlb_list[i] = ++count;
  			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
343

a7133a155   Andrew Morton   lib/swiotlb.c: cl...
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
  			/*
  			 * Update the indices to avoid searching in the next
  			 * round.
  			 */
  			io_tlb_index = ((index + nslots) < io_tlb_nslabs
  					? (index + nslots) : 0);
  
  			goto found;
  		}
  		index += stride;
  		if (index >= io_tlb_nslabs)
  			index = 0;
  	} while (index != wrap);
  
  not_found:
  	spin_unlock_irqrestore(&io_tlb_lock, flags);
  	return NULL;
  found:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
362
363
364
365
366
367
368
  	spin_unlock_irqrestore(&io_tlb_lock, flags);
  
  	/*
  	 * Save away the mapping from the original address to the DMA address.
  	 * This is needed when we sync the memory.  Then we sync the buffer if
  	 * needed.
  	 */
df336d1c7   Keir Fraser   Fix swiotlb_sync_...
369
370
  	for (i = 0; i < nslots; i++)
  		io_tlb_orig_addr[index+i] = buffer + (i << IO_TLB_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
371
  	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
25667d675   Tony Luck   Revert "[IA64] sw...
372
  		memcpy(dma_addr, buffer, size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
373
374
375
376
377
378
379
380
381
382
383
384
385
  
  	return dma_addr;
  }
  
  /*
   * dma_addr is the kernel virtual address of the bounce buffer to unmap.
   */
  static void
  unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
  {
  	unsigned long flags;
  	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
  	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
25667d675   Tony Luck   Revert "[IA64] sw...
386
  	char *buffer = io_tlb_orig_addr[index];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
387
388
389
390
  
  	/*
  	 * First, sync the memory before unmapping the entry
  	 */
25667d675   Tony Luck   Revert "[IA64] sw...
391
  	if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
392
393
394
395
  		/*
  		 * bounce... copy the data back into the original buffer * and
  		 * delete the bounce buffer.
  		 */
25667d675   Tony Luck   Revert "[IA64] sw...
396
  		memcpy(buffer, dma_addr, size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
  
  	/*
  	 * Return the buffer to the free list by setting the corresponding
  	 * entries to indicate the number of contigous entries available.
  	 * While returning the entries to the free list, we merge the entries
  	 * with slots below and above the pool being returned.
  	 */
  	spin_lock_irqsave(&io_tlb_lock, flags);
  	{
  		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
  			 io_tlb_list[index + nslots] : 0);
  		/*
  		 * Step 1: return the slots to the free list, merging the
  		 * slots with superceeding slots
  		 */
  		for (i = index + nslots - 1; i >= index; i--)
  			io_tlb_list[i] = ++count;
  		/*
  		 * Step 2: merge the returned slots with the preceding slots,
  		 * if available (non zero)
  		 */
  		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
  			io_tlb_list[i] = ++count;
  	}
  	spin_unlock_irqrestore(&io_tlb_lock, flags);
  }
  
  static void
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
425
426
  sync_single(struct device *hwdev, char *dma_addr, size_t size,
  	    int dir, int target)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
427
428
  {
  	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
25667d675   Tony Luck   Revert "[IA64] sw...
429
  	char *buffer = io_tlb_orig_addr[index];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
430

df336d1c7   Keir Fraser   Fix swiotlb_sync_...
431
  	buffer += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
432
433
434
  	switch (target) {
  	case SYNC_FOR_CPU:
  		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
25667d675   Tony Luck   Revert "[IA64] sw...
435
  			memcpy(buffer, dma_addr, size);
348145458   Eric Sesterhenn   BUG_ON() Conversi...
436
437
  		else
  			BUG_ON(dir != DMA_TO_DEVICE);
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
438
439
440
  		break;
  	case SYNC_FOR_DEVICE:
  		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
25667d675   Tony Luck   Revert "[IA64] sw...
441
  			memcpy(dma_addr, buffer, size);
348145458   Eric Sesterhenn   BUG_ON() Conversi...
442
443
  		else
  			BUG_ON(dir != DMA_FROM_DEVICE);
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
444
445
  		break;
  	default:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
446
  		BUG();
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
447
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
448
449
450
451
  }
  
  void *
  swiotlb_alloc_coherent(struct device *hwdev, size_t size,
06a544971   Al Viro   [PATCH] gfp_t: dm...
452
  		       dma_addr_t *dma_handle, gfp_t flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
453
  {
563aaf064   Jan Beulich   [IA64] swiotlb cl...
454
  	dma_addr_t dev_addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
455
456
457
458
459
460
461
462
463
  	void *ret;
  	int order = get_order(size);
  
  	/*
  	 * XXX fix me: the DMA API should pass us an explicit DMA mask
  	 * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
  	 * bit range instead of a 16MB one).
  	 */
  	flags |= GFP_DMA;
25667d675   Tony Luck   Revert "[IA64] sw...
464
  	ret = (void *)__get_free_pages(flags, order);
93fbff63e   Jan Beulich   [IA64] make swiot...
465
  	if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
  		/*
  		 * The allocated memory isn't reachable by the device.
  		 * Fall back on swiotlb_map_single().
  		 */
  		free_pages((unsigned long) ret, order);
  		ret = NULL;
  	}
  	if (!ret) {
  		/*
  		 * We are either out of memory or the device can't DMA
  		 * to GFP_DMA memory; fall back on
  		 * swiotlb_map_single(), which will grab memory from
  		 * the lowest available address range.
  		 */
  		dma_addr_t handle;
  		handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
8d8bb39b9   FUJITA Tomonori   dma-mapping: add ...
482
  		if (swiotlb_dma_mapping_error(hwdev, handle))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
483
  			return NULL;
93fbff63e   Jan Beulich   [IA64] make swiot...
484
  		ret = bus_to_virt(handle);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
485
486
487
  	}
  
  	memset(ret, 0, size);
93fbff63e   Jan Beulich   [IA64] make swiot...
488
  	dev_addr = virt_to_bus(ret);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
489
490
491
  
  	/* Confirm address can be DMA'd by device */
  	if (address_needs_mapping(hwdev, dev_addr)) {
563aaf064   Jan Beulich   [IA64] swiotlb cl...
492
493
494
495
  		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx
  ",
  		       (unsigned long long)*hwdev->dma_mask,
  		       (unsigned long long)dev_addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
496
497
498
499
500
501
502
503
504
505
506
  		panic("swiotlb_alloc_coherent: allocated memory is out of "
  		      "range for device");
  	}
  	*dma_handle = dev_addr;
  	return ret;
  }
  
  void
  swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
  		      dma_addr_t dma_handle)
  {
aa24886e3   David Brownell   dma_free_coherent...
507
  	WARN_ON(irqs_disabled());
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
508
509
510
511
512
513
514
515
516
517
518
519
520
521
  	if (!(vaddr >= (void *)io_tlb_start
                      && vaddr < (void *)io_tlb_end))
  		free_pages((unsigned long) vaddr, get_order(size));
  	else
  		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
  		swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
  }
  
  static void
  swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
  {
  	/*
  	 * Ran out of IOMMU space for this operation. This is very bad.
  	 * Unfortunately the drivers cannot handle this operation properly.
17e5ad6c0   Tony Luck   [PATCH] Removed r...
522
  	 * unless they check for dma_mapping_error (most don't)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
523
524
525
  	 * When the mapping is small enough return a static buffer to limit
  	 * the damage, or panic when the transfer is too big.
  	 */
563aaf064   Jan Beulich   [IA64] swiotlb cl...
526
  	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
527
528
529
530
  	       "device %s
  ", size, dev ? dev->bus_id : "?");
  
  	if (size > io_tlb_overflow && do_panic) {
17e5ad6c0   Tony Luck   [PATCH] Removed r...
531
532
533
534
535
536
  		if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
  			panic("DMA: Memory would be corrupted
  ");
  		if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
  			panic("DMA: Random memory would be DMAed
  ");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
537
538
539
540
541
  	}
  }
  
  /*
   * Map a single buffer of the indicated size for DMA in streaming mode.  The
17e5ad6c0   Tony Luck   [PATCH] Removed r...
542
   * physical address to use is returned.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
543
544
545
546
547
   *
   * Once the device is given the dma address, the device owns this memory until
   * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
   */
  dma_addr_t
309df0c50   Arthur Kepner   dma/ia64: update ...
548
549
  swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
  			 int dir, struct dma_attrs *attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
550
  {
563aaf064   Jan Beulich   [IA64] swiotlb cl...
551
  	dma_addr_t dev_addr = virt_to_bus(ptr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
552
  	void *map;
348145458   Eric Sesterhenn   BUG_ON() Conversi...
553
  	BUG_ON(dir == DMA_NONE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
554
555
556
557
558
  	/*
  	 * If the pointer passed in happens to be in the device's DMA window,
  	 * we can safely return the device addr and not worry about bounce
  	 * buffering it.
  	 */
25667d675   Tony Luck   Revert "[IA64] sw...
559
  	if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
560
561
562
563
564
  		return dev_addr;
  
  	/*
  	 * Oh well, have to allocate and map a bounce buffer.
  	 */
25667d675   Tony Luck   Revert "[IA64] sw...
565
  	map = map_single(hwdev, ptr, size, dir);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
566
567
568
569
  	if (!map) {
  		swiotlb_full(hwdev, size, dir, 1);
  		map = io_tlb_overflow_buffer;
  	}
93fbff63e   Jan Beulich   [IA64] make swiot...
570
  	dev_addr = virt_to_bus(map);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
571
572
573
574
575
576
577
578
579
  
  	/*
  	 * Ensure that the address returned is DMA'ble
  	 */
  	if (address_needs_mapping(hwdev, dev_addr))
  		panic("map_single: bounce buffer is not DMA'ble");
  
  	return dev_addr;
  }
309df0c50   Arthur Kepner   dma/ia64: update ...
580
581
582
583
584
585
586
  EXPORT_SYMBOL(swiotlb_map_single_attrs);
  
  dma_addr_t
  swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
  {
  	return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
587
588
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
589
590
591
592
593
594
595
596
   * Unmap a single streaming mode DMA translation.  The dma_addr and size must
   * match what was provided for in a previous swiotlb_map_single call.  All
   * other usages are undefined.
   *
   * After this call, reads by the cpu to the buffer are guaranteed to see
   * whatever the device wrote there.
   */
  void
309df0c50   Arthur Kepner   dma/ia64: update ...
597
598
  swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
  			   size_t size, int dir, struct dma_attrs *attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
599
  {
93fbff63e   Jan Beulich   [IA64] make swiot...
600
  	char *dma_addr = bus_to_virt(dev_addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
601

348145458   Eric Sesterhenn   BUG_ON() Conversi...
602
  	BUG_ON(dir == DMA_NONE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
603
604
605
  	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
  		unmap_single(hwdev, dma_addr, size, dir);
  	else if (dir == DMA_FROM_DEVICE)
cde14bbfb   Jan Beulich   [IA64] swiotlb bu...
606
  		dma_mark_clean(dma_addr, size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
607
  }
309df0c50   Arthur Kepner   dma/ia64: update ...
608
  EXPORT_SYMBOL(swiotlb_unmap_single_attrs);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
609

309df0c50   Arthur Kepner   dma/ia64: update ...
610
611
612
613
614
615
  void
  swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
  		     int dir)
  {
  	return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
616
617
618
619
620
  /*
   * Make physical memory consistent for a single streaming mode DMA translation
   * after a transfer.
   *
   * If you perform a swiotlb_map_single() but wish to interrogate the buffer
17e5ad6c0   Tony Luck   [PATCH] Removed r...
621
622
   * using the cpu, yet do not wish to teardown the dma mapping, you must
   * call this function before doing so.  At the next point you give the dma
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
623
624
625
   * address back to the card, you must first perform a
   * swiotlb_dma_sync_for_device, and then the device again owns the buffer
   */
be6b02678   Andrew Morton   [PATCH] swiotlb u...
626
  static void
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
627
  swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
628
  		    size_t size, int dir, int target)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
629
  {
93fbff63e   Jan Beulich   [IA64] make swiot...
630
  	char *dma_addr = bus_to_virt(dev_addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
631

348145458   Eric Sesterhenn   BUG_ON() Conversi...
632
  	BUG_ON(dir == DMA_NONE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
633
  	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
634
  		sync_single(hwdev, dma_addr, size, dir, target);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
635
  	else if (dir == DMA_FROM_DEVICE)
cde14bbfb   Jan Beulich   [IA64] swiotlb bu...
636
  		dma_mark_clean(dma_addr, size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
637
638
639
  }
  
  void
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
640
641
642
  swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
  			    size_t size, int dir)
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
643
  	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
644
645
646
  }
  
  void
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
647
648
649
  swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
  			       size_t size, int dir)
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
650
  	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
651
652
653
  }
  
  /*
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
654
655
   * Same as above, but for a sub-range of the mapping.
   */
be6b02678   Andrew Morton   [PATCH] swiotlb u...
656
  static void
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
657
  swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
658
659
  			  unsigned long offset, size_t size,
  			  int dir, int target)
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
660
  {
93fbff63e   Jan Beulich   [IA64] make swiot...
661
  	char *dma_addr = bus_to_virt(dev_addr) + offset;
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
662

348145458   Eric Sesterhenn   BUG_ON() Conversi...
663
  	BUG_ON(dir == DMA_NONE);
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
664
  	if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
665
  		sync_single(hwdev, dma_addr, size, dir, target);
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
666
  	else if (dir == DMA_FROM_DEVICE)
cde14bbfb   Jan Beulich   [IA64] swiotlb bu...
667
  		dma_mark_clean(dma_addr, size);
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
668
669
670
671
672
673
  }
  
  void
  swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
  				  unsigned long offset, size_t size, int dir)
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
674
675
  	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
  				  SYNC_FOR_CPU);
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
676
677
678
679
680
681
  }
  
  void
  swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
  				     unsigned long offset, size_t size, int dir)
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
682
683
  	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
  				  SYNC_FOR_DEVICE);
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
684
  }
309df0c50   Arthur Kepner   dma/ia64: update ...
685
686
  void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int,
  			    struct dma_attrs *);
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
687
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
   * Map a set of buffers described by scatterlist in streaming mode for DMA.
   * This is the scatter-gather version of the above swiotlb_map_single
   * interface.  Here the scatter gather list elements are each tagged with the
   * appropriate dma address and length.  They are obtained via
   * sg_dma_{address,length}(SG).
   *
   * NOTE: An implementation may be able to use a smaller number of
   *       DMA address/length pairs than there are SG table elements.
   *       (for example via virtual mapping capabilities)
   *       The routine returns the number of addr/length pairs actually
   *       used, at most nents.
   *
   * Device ownership issues as mentioned above for swiotlb_map_single are the
   * same here.
   */
  int
309df0c50   Arthur Kepner   dma/ia64: update ...
704
705
  swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
  		     int dir, struct dma_attrs *attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
706
  {
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
707
  	struct scatterlist *sg;
25667d675   Tony Luck   Revert "[IA64] sw...
708
  	void *addr;
563aaf064   Jan Beulich   [IA64] swiotlb cl...
709
  	dma_addr_t dev_addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
710
  	int i;
348145458   Eric Sesterhenn   BUG_ON() Conversi...
711
  	BUG_ON(dir == DMA_NONE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
712

dbfd49fe9   Jens Axboe   swiotlb: sg chain...
713
  	for_each_sg(sgl, sg, nelems, i) {
25667d675   Tony Luck   Revert "[IA64] sw...
714
715
716
717
  		addr = SG_ENT_VIRT_ADDRESS(sg);
  		dev_addr = virt_to_bus(addr);
  		if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
  			void *map = map_single(hwdev, addr, sg->length, dir);
7e8702334   Andi Kleen   [PATCH] Fix swiot...
718
  			if (!map) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
719
720
721
  				/* Don't panic here, we expect map_sg users
  				   to do proper error handling. */
  				swiotlb_full(hwdev, sg->length, dir, 0);
309df0c50   Arthur Kepner   dma/ia64: update ...
722
723
  				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
  						       attrs);
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
724
  				sgl[0].dma_length = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
725
726
  				return 0;
  			}
cde14bbfb   Jan Beulich   [IA64] swiotlb bu...
727
  			sg->dma_address = virt_to_bus(map);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
728
729
730
731
732
733
  		} else
  			sg->dma_address = dev_addr;
  		sg->dma_length = sg->length;
  	}
  	return nelems;
  }
309df0c50   Arthur Kepner   dma/ia64: update ...
734
735
736
737
738
739
740
741
  EXPORT_SYMBOL(swiotlb_map_sg_attrs);
  
  int
  swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
  	       int dir)
  {
  	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
742
743
744
745
746
747
  
  /*
   * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
   * concerning calls here are the same as for swiotlb_unmap_single() above.
   */
  void
309df0c50   Arthur Kepner   dma/ia64: update ...
748
749
  swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
  		       int nelems, int dir, struct dma_attrs *attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
750
  {
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
751
  	struct scatterlist *sg;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
752
  	int i;
348145458   Eric Sesterhenn   BUG_ON() Conversi...
753
  	BUG_ON(dir == DMA_NONE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
754

dbfd49fe9   Jens Axboe   swiotlb: sg chain...
755
  	for_each_sg(sgl, sg, nelems, i) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
756
  		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
93fbff63e   Jan Beulich   [IA64] make swiot...
757
758
  			unmap_single(hwdev, bus_to_virt(sg->dma_address),
  				     sg->dma_length, dir);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
759
  		else if (dir == DMA_FROM_DEVICE)
cde14bbfb   Jan Beulich   [IA64] swiotlb bu...
760
  			dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
761
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
762
  }
309df0c50   Arthur Kepner   dma/ia64: update ...
763
764
765
766
767
768
769
770
  EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
  
  void
  swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
  		 int dir)
  {
  	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
771
772
773
774
775
776
777
778
  
  /*
   * Make physical memory consistent for a set of streaming mode DMA translations
   * after a transfer.
   *
   * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
   * and usage.
   */
be6b02678   Andrew Morton   [PATCH] swiotlb u...
779
  static void
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
780
  swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
781
  		int nelems, int dir, int target)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
782
  {
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
783
  	struct scatterlist *sg;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
784
  	int i;
348145458   Eric Sesterhenn   BUG_ON() Conversi...
785
  	BUG_ON(dir == DMA_NONE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
786

dbfd49fe9   Jens Axboe   swiotlb: sg chain...
787
  	for_each_sg(sgl, sg, nelems, i) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
788
  		if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
93fbff63e   Jan Beulich   [IA64] make swiot...
789
  			sync_single(hwdev, bus_to_virt(sg->dma_address),
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
790
  				    sg->dma_length, dir, target);
cde14bbfb   Jan Beulich   [IA64] swiotlb bu...
791
792
  		else if (dir == DMA_FROM_DEVICE)
  			dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
793
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
794
795
796
  }
  
  void
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
797
798
799
  swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
  			int nelems, int dir)
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
800
  	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
801
802
803
  }
  
  void
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
804
805
806
  swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
  			   int nelems, int dir)
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
807
  	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
808
809
810
  }
  
  int
8d8bb39b9   FUJITA Tomonori   dma-mapping: add ...
811
  swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
812
  {
93fbff63e   Jan Beulich   [IA64] make swiot...
813
  	return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
814
815
816
  }
  
  /*
17e5ad6c0   Tony Luck   [PATCH] Removed r...
817
   * Return whether the given device DMA address mask can be supported
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
818
   * properly.  For example, if your device can only drive the low 24-bits
17e5ad6c0   Tony Luck   [PATCH] Removed r...
819
   * during bus mastering, then you would pass 0x00ffffff as the mask to
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
820
821
822
   * this function.
   */
  int
563aaf064   Jan Beulich   [IA64] swiotlb cl...
823
  swiotlb_dma_supported(struct device *hwdev, u64 mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
824
  {
25667d675   Tony Luck   Revert "[IA64] sw...
825
  	return virt_to_bus(io_tlb_end - 1) <= mask;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
826
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
827
828
829
830
831
832
  EXPORT_SYMBOL(swiotlb_map_single);
  EXPORT_SYMBOL(swiotlb_unmap_single);
  EXPORT_SYMBOL(swiotlb_map_sg);
  EXPORT_SYMBOL(swiotlb_unmap_sg);
  EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
  EXPORT_SYMBOL(swiotlb_sync_single_for_device);
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
833
834
  EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
  EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
835
836
837
  EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
  EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
  EXPORT_SYMBOL(swiotlb_dma_mapping_error);
25667d675   Tony Luck   Revert "[IA64] sw...
838
839
  EXPORT_SYMBOL(swiotlb_alloc_coherent);
  EXPORT_SYMBOL(swiotlb_free_coherent);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
840
  EXPORT_SYMBOL(swiotlb_dma_supported);