Blame view

lib/swiotlb.c 24.5 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
  /*
   * Dynamic DMA mapping support.
   *
563aaf064   Jan Beulich   [IA64] swiotlb cl...
4
   * This implementation is a fallback for platforms that do not support
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
5
6
7
8
9
10
11
12
13
   * I/O TLBs (aka DMA address translation hardware).
   * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
   * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
   * Copyright (C) 2000, 2003 Hewlett-Packard Co
   *	David Mosberger-Tang <davidm@hpl.hp.com>
   *
   * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
   * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
   *			unnecessary i-cache flushing.
569c8bf5d   John W. Linville   [PATCH] swiotlb: ...
14
15
16
   * 04/07/.. ak		Better overflow handling. Assorted fixes.
   * 05/09/10 linville	Add support for syncing ranges, support syncing for
   *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
fb05a3792   Becky Bruce   swiotlb: add supp...
17
   * 08/12/11 beckyb	Add highmem support
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
18
19
20
   */
  
  #include <linux/cache.h>
17e5ad6c0   Tony Luck   [PATCH] Removed r...
21
  #include <linux/dma-mapping.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
22
23
  #include <linux/mm.h>
  #include <linux/module.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
24
25
  #include <linux/spinlock.h>
  #include <linux/string.h>
0016fdee9   Ian Campbell   swiotlb: move som...
26
  #include <linux/swiotlb.h>
fb05a3792   Becky Bruce   swiotlb: add supp...
27
  #include <linux/pfn.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
28
29
  #include <linux/types.h>
  #include <linux/ctype.h>
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
30
  #include <linux/highmem.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
31
32
  
  #include <asm/io.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
33
  #include <asm/dma.h>
17e5ad6c0   Tony Luck   [PATCH] Removed r...
34
  #include <asm/scatterlist.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
35
36
37
  
  #include <linux/init.h>
  #include <linux/bootmem.h>
a85225092   FUJITA Tomonori   swiotlb: use iomm...
38
  #include <linux/iommu-helper.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
39
40
41
  
  #define OFFSET(val,align) ((unsigned long)	\
  	                   ( (val) & ( (align) - 1)))
0b9afede3   Alex Williamson   [IA64] more robus...
42
43
44
45
46
47
48
49
  #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
  
  /*
   * Minimum IO TLB size to bother booting with.  Systems with mainly
   * 64bit capable cards will only lightly use the swiotlb.  If we can't
   * allocate a contiguous 1MB, we're probably in trouble anyway.
   */
  #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
50
51
52
53
54
55
56
  /*
   * Enumeration for sync targets
   */
  enum dma_sync_target {
  	SYNC_FOR_CPU = 0,
  	SYNC_FOR_DEVICE = 1,
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
57
58
59
  int swiotlb_force;
  
  /*
ceb5ac326   Becky Bruce   swiotlb: comment ...
60
61
   * Used to do a quick range check in unmap_single and
   * sync_single_*, to see if the memory was in fact allocated by this
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
   * API.
   */
  static char *io_tlb_start, *io_tlb_end;
  
  /*
   * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
   * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
   */
  static unsigned long io_tlb_nslabs;
  
  /*
   * When the IOMMU overflows we return a fallback buffer. This sets the size.
   */
  static unsigned long io_tlb_overflow = 32*1024;
  
  void *io_tlb_overflow_buffer;
  
  /*
   * This is a free list describing the number of free entries available from
   * each index
   */
  static unsigned int *io_tlb_list;
  static unsigned int io_tlb_index;
  
  /*
   * We need to save away the original address corresponding to a mapped entry
   * for the sync operations.
   */
bc40ac669   Becky Bruce   swiotlb: store ph...
90
  static phys_addr_t *io_tlb_orig_addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
91
92
93
94
95
96
97
98
99
100
  
  /*
   * Protect the above data structures in the map and unmap calls
   */
  static DEFINE_SPINLOCK(io_tlb_lock);
  
  static int __init
  setup_io_tlb_npages(char *str)
  {
  	if (isdigit(*str)) {
e8579e72c   Alex Williamson   [IA64, X86_64] fi...
101
  		io_tlb_nslabs = simple_strtoul(str, &str, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
102
103
104
105
106
107
108
109
110
111
112
  		/* avoid tail segment of size < IO_TLB_SEGSIZE */
  		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
  	}
  	if (*str == ',')
  		++str;
  	if (!strcmp(str, "force"))
  		swiotlb_force = 1;
  	return 1;
  }
  __setup("swiotlb=", setup_io_tlb_npages);
  /* make io_tlb_overflow tunable too? */
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
113
  /* Note that this doesn't work with highmem page */
70a7d3cc1   Jeremy Fitzhardinge   swiotlb: add hwde...
114
115
  static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
  				      volatile void *address)
e08e1f7ad   Ian Campbell   swiotlb: allow ar...
116
  {
862d196b2   FUJITA Tomonori   swiotlb: use phys...
117
  	return phys_to_dma(hwdev, virt_to_phys(address));
e08e1f7ad   Ian Campbell   swiotlb: allow ar...
118
  }
2e5b2b86b   Ian Campbell   swiotlb: consolid...
119
120
121
  static void swiotlb_print_info(unsigned long bytes)
  {
  	phys_addr_t pstart, pend;
2e5b2b86b   Ian Campbell   swiotlb: consolid...
122
123
124
  
  	pstart = virt_to_phys(io_tlb_start);
  	pend = virt_to_phys(io_tlb_end);
2e5b2b86b   Ian Campbell   swiotlb: consolid...
125
126
127
  	printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p
  ",
  	       bytes >> 20, io_tlb_start, io_tlb_end);
70a7d3cc1   Jeremy Fitzhardinge   swiotlb: add hwde...
128
129
130
131
  	printk(KERN_INFO "software IO TLB at phys %#llx - %#llx
  ",
  	       (unsigned long long)pstart,
  	       (unsigned long long)pend);
2e5b2b86b   Ian Campbell   swiotlb: consolid...
132
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
133
134
  /*
   * Statically reserve bounce buffer space and initialize bounce buffer data
17e5ad6c0   Tony Luck   [PATCH] Removed r...
135
   * structures for the software IO TLB used to implement the DMA API.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
136
   */
563aaf064   Jan Beulich   [IA64] swiotlb cl...
137
138
  void __init
  swiotlb_init_with_default_size(size_t default_size)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
139
  {
563aaf064   Jan Beulich   [IA64] swiotlb cl...
140
  	unsigned long i, bytes;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
141
142
  
  	if (!io_tlb_nslabs) {
e8579e72c   Alex Williamson   [IA64, X86_64] fi...
143
  		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
144
145
  		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
  	}
563aaf064   Jan Beulich   [IA64] swiotlb cl...
146
  	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
147
148
149
  	/*
  	 * Get IO TLB memory from the low pages
  	 */
3885123da   FUJITA Tomonori   swiotlb: remove u...
150
  	io_tlb_start = alloc_bootmem_low_pages(bytes);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
151
152
  	if (!io_tlb_start)
  		panic("Cannot allocate SWIOTLB buffer");
563aaf064   Jan Beulich   [IA64] swiotlb cl...
153
  	io_tlb_end = io_tlb_start + bytes;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
154
155
156
157
158
159
160
  
  	/*
  	 * Allocate and initialize the free list array.  This array is used
  	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
  	 * between io_tlb_start and io_tlb_end.
  	 */
  	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
25667d675   Tony Luck   Revert "[IA64] sw...
161
  	for (i = 0; i < io_tlb_nslabs; i++)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
162
163
   		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
  	io_tlb_index = 0;
bc40ac669   Becky Bruce   swiotlb: store ph...
164
  	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
165
166
167
168
169
  
  	/*
  	 * Get the overflow emergency buffer
  	 */
  	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
563aaf064   Jan Beulich   [IA64] swiotlb cl...
170
171
172
  	if (!io_tlb_overflow_buffer)
  		panic("Cannot allocate SWIOTLB overflow buffer!
  ");
2e5b2b86b   Ian Campbell   swiotlb: consolid...
173
  	swiotlb_print_info(bytes);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
174
  }
563aaf064   Jan Beulich   [IA64] swiotlb cl...
175
176
  void __init
  swiotlb_init(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
177
  {
25667d675   Tony Luck   Revert "[IA64] sw...
178
  	swiotlb_init_with_default_size(64 * (1<<20));	/* default to 64MB */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
179
  }
0b9afede3   Alex Williamson   [IA64] more robus...
180
181
182
183
184
185
  /*
   * Systems with larger DMA zones (those that don't support ISA) can
   * initialize the swiotlb later using the slab allocator if needed.
   * This should be just like above, but with some error catching.
   */
  int
563aaf064   Jan Beulich   [IA64] swiotlb cl...
186
  swiotlb_late_init_with_default_size(size_t default_size)
0b9afede3   Alex Williamson   [IA64] more robus...
187
  {
563aaf064   Jan Beulich   [IA64] swiotlb cl...
188
  	unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
0b9afede3   Alex Williamson   [IA64] more robus...
189
190
191
192
193
194
195
196
197
198
  	unsigned int order;
  
  	if (!io_tlb_nslabs) {
  		io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
  		io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
  	}
  
  	/*
  	 * Get IO TLB memory from the low pages
  	 */
563aaf064   Jan Beulich   [IA64] swiotlb cl...
199
  	order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
0b9afede3   Alex Williamson   [IA64] more robus...
200
  	io_tlb_nslabs = SLABS_PER_PAGE << order;
563aaf064   Jan Beulich   [IA64] swiotlb cl...
201
  	bytes = io_tlb_nslabs << IO_TLB_SHIFT;
0b9afede3   Alex Williamson   [IA64] more robus...
202
203
  
  	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
bb52196be   FUJITA Tomonori   swiotlb: remove u...
204
205
  		io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
  							order);
0b9afede3   Alex Williamson   [IA64] more robus...
206
207
208
209
210
211
212
  		if (io_tlb_start)
  			break;
  		order--;
  	}
  
  	if (!io_tlb_start)
  		goto cleanup1;
563aaf064   Jan Beulich   [IA64] swiotlb cl...
213
  	if (order != get_order(bytes)) {
0b9afede3   Alex Williamson   [IA64] more robus...
214
215
216
217
  		printk(KERN_WARNING "Warning: only able to allocate %ld MB "
  		       "for software IO TLB
  ", (PAGE_SIZE << order) >> 20);
  		io_tlb_nslabs = SLABS_PER_PAGE << order;
563aaf064   Jan Beulich   [IA64] swiotlb cl...
218
  		bytes = io_tlb_nslabs << IO_TLB_SHIFT;
0b9afede3   Alex Williamson   [IA64] more robus...
219
  	}
563aaf064   Jan Beulich   [IA64] swiotlb cl...
220
221
  	io_tlb_end = io_tlb_start + bytes;
  	memset(io_tlb_start, 0, bytes);
0b9afede3   Alex Williamson   [IA64] more robus...
222
223
224
225
226
227
228
229
230
231
232
233
234
235
  
  	/*
  	 * Allocate and initialize the free list array.  This array is used
  	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
  	 * between io_tlb_start and io_tlb_end.
  	 */
  	io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
  	                              get_order(io_tlb_nslabs * sizeof(int)));
  	if (!io_tlb_list)
  		goto cleanup2;
  
  	for (i = 0; i < io_tlb_nslabs; i++)
   		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
  	io_tlb_index = 0;
bc40ac669   Becky Bruce   swiotlb: store ph...
236
237
238
239
  	io_tlb_orig_addr = (phys_addr_t *)
  		__get_free_pages(GFP_KERNEL,
  				 get_order(io_tlb_nslabs *
  					   sizeof(phys_addr_t)));
0b9afede3   Alex Williamson   [IA64] more robus...
240
241
  	if (!io_tlb_orig_addr)
  		goto cleanup3;
bc40ac669   Becky Bruce   swiotlb: store ph...
242
  	memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
0b9afede3   Alex Williamson   [IA64] more robus...
243
244
245
246
247
248
249
250
  
  	/*
  	 * Get the overflow emergency buffer
  	 */
  	io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
  	                                          get_order(io_tlb_overflow));
  	if (!io_tlb_overflow_buffer)
  		goto cleanup4;
2e5b2b86b   Ian Campbell   swiotlb: consolid...
251
  	swiotlb_print_info(bytes);
0b9afede3   Alex Williamson   [IA64] more robus...
252
253
254
255
  
  	return 0;
  
  cleanup4:
bc40ac669   Becky Bruce   swiotlb: store ph...
256
257
  	free_pages((unsigned long)io_tlb_orig_addr,
  		   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
0b9afede3   Alex Williamson   [IA64] more robus...
258
259
  	io_tlb_orig_addr = NULL;
  cleanup3:
25667d675   Tony Luck   Revert "[IA64] sw...
260
261
  	free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
  	                                                 sizeof(int)));
0b9afede3   Alex Williamson   [IA64] more robus...
262
  	io_tlb_list = NULL;
0b9afede3   Alex Williamson   [IA64] more robus...
263
  cleanup2:
563aaf064   Jan Beulich   [IA64] swiotlb cl...
264
  	io_tlb_end = NULL;
0b9afede3   Alex Williamson   [IA64] more robus...
265
266
267
268
269
270
  	free_pages((unsigned long)io_tlb_start, order);
  	io_tlb_start = NULL;
  cleanup1:
  	io_tlb_nslabs = req_nslabs;
  	return -ENOMEM;
  }
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
271
  static int is_swiotlb_buffer(phys_addr_t paddr)
640aebfe0   FUJITA Tomonori   swiotlb: add is_s...
272
  {
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
273
274
  	return paddr >= virt_to_phys(io_tlb_start) &&
  		paddr < virt_to_phys(io_tlb_end);
640aebfe0   FUJITA Tomonori   swiotlb: add is_s...
275
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
276
  /*
fb05a3792   Becky Bruce   swiotlb: add supp...
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
   * Bounce: copy the swiotlb buffer back to the original dma location
   */
  static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
  			   enum dma_data_direction dir)
  {
  	unsigned long pfn = PFN_DOWN(phys);
  
  	if (PageHighMem(pfn_to_page(pfn))) {
  		/* The buffer does not have a mapping.  Map it in and copy */
  		unsigned int offset = phys & ~PAGE_MASK;
  		char *buffer;
  		unsigned int sz = 0;
  		unsigned long flags;
  
  		while (size) {
67131ad05   Becky Bruce   swiotlb: fix comp...
292
  			sz = min_t(size_t, PAGE_SIZE - offset, size);
fb05a3792   Becky Bruce   swiotlb: add supp...
293
294
295
296
297
298
  
  			local_irq_save(flags);
  			buffer = kmap_atomic(pfn_to_page(pfn),
  					     KM_BOUNCE_READ);
  			if (dir == DMA_TO_DEVICE)
  				memcpy(dma_addr, buffer + offset, sz);
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
299
  			else
fb05a3792   Becky Bruce   swiotlb: add supp...
300
301
  				memcpy(buffer + offset, dma_addr, sz);
  			kunmap_atomic(buffer, KM_BOUNCE_READ);
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
302
  			local_irq_restore(flags);
fb05a3792   Becky Bruce   swiotlb: add supp...
303
304
305
306
307
  
  			size -= sz;
  			pfn++;
  			dma_addr += sz;
  			offset = 0;
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
308
309
  		}
  	} else {
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
310
  		if (dir == DMA_TO_DEVICE)
fb05a3792   Becky Bruce   swiotlb: add supp...
311
  			memcpy(dma_addr, phys_to_virt(phys), size);
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
312
  		else
fb05a3792   Becky Bruce   swiotlb: add supp...
313
  			memcpy(phys_to_virt(phys), dma_addr, size);
ef9b18935   Jeremy Fitzhardinge   swiotlb: support ...
314
  	}
1b548f667   Jeremy Fitzhardinge   swiotlb: factor o...
315
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
316
317
318
319
  /*
   * Allocates bounce buffer and returns its kernel virtual address.
   */
  static void *
bc40ac669   Becky Bruce   swiotlb: store ph...
320
  map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
321
322
323
324
325
  {
  	unsigned long flags;
  	char *dma_addr;
  	unsigned int nslots, stride, index, wrap;
  	int i;
681cc5cd3   FUJITA Tomonori   iommu sg merging:...
326
327
328
329
330
331
  	unsigned long start_dma_addr;
  	unsigned long mask;
  	unsigned long offset_slots;
  	unsigned long max_slots;
  
  	mask = dma_get_seg_boundary(hwdev);
70a7d3cc1   Jeremy Fitzhardinge   swiotlb: add hwde...
332
  	start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
681cc5cd3   FUJITA Tomonori   iommu sg merging:...
333
334
  
  	offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
a5ddde4a5   Ian Campbell   swiotlb: add comm...
335
336
337
338
  
  	/*
   	 * Carefully handle integer overflow which can occur when mask == ~0UL.
   	 */
b15a3891c   Jan Beulich   avoid endless loo...
339
340
341
  	max_slots = mask + 1
  		    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
  		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
342
343
344
345
346
347
348
349
350
351
  
  	/*
  	 * For mappings greater than a page, we limit the stride (and
  	 * hence alignment) to a page size.
  	 */
  	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
  	if (size > PAGE_SIZE)
  		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
  	else
  		stride = 1;
348145458   Eric Sesterhenn   BUG_ON() Conversi...
352
  	BUG_ON(!nslots);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
353
354
355
356
357
358
  
  	/*
  	 * Find suitable number of IO TLB entries size that will fit this
  	 * request and allocate a buffer from that IO TLB pool.
  	 */
  	spin_lock_irqsave(&io_tlb_lock, flags);
a7133a155   Andrew Morton   lib/swiotlb.c: cl...
359
360
361
362
363
364
  	index = ALIGN(io_tlb_index, stride);
  	if (index >= io_tlb_nslabs)
  		index = 0;
  	wrap = index;
  
  	do {
a85225092   FUJITA Tomonori   swiotlb: use iomm...
365
366
  		while (iommu_is_span_boundary(index, nslots, offset_slots,
  					      max_slots)) {
b15a3891c   Jan Beulich   avoid endless loo...
367
368
369
  			index += stride;
  			if (index >= io_tlb_nslabs)
  				index = 0;
a7133a155   Andrew Morton   lib/swiotlb.c: cl...
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
  			if (index == wrap)
  				goto not_found;
  		}
  
  		/*
  		 * If we find a slot that indicates we have 'nslots' number of
  		 * contiguous buffers, we allocate the buffers from that slot
  		 * and mark the entries as '0' indicating unavailable.
  		 */
  		if (io_tlb_list[index] >= nslots) {
  			int count = 0;
  
  			for (i = index; i < (int) (index + nslots); i++)
  				io_tlb_list[i] = 0;
  			for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
  				io_tlb_list[i] = ++count;
  			dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
387

a7133a155   Andrew Morton   lib/swiotlb.c: cl...
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
  			/*
  			 * Update the indices to avoid searching in the next
  			 * round.
  			 */
  			io_tlb_index = ((index + nslots) < io_tlb_nslabs
  					? (index + nslots) : 0);
  
  			goto found;
  		}
  		index += stride;
  		if (index >= io_tlb_nslabs)
  			index = 0;
  	} while (index != wrap);
  
  not_found:
  	spin_unlock_irqrestore(&io_tlb_lock, flags);
  	return NULL;
  found:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
406
407
408
409
410
411
412
  	spin_unlock_irqrestore(&io_tlb_lock, flags);
  
  	/*
  	 * Save away the mapping from the original address to the DMA address.
  	 * This is needed when we sync the memory.  Then we sync the buffer if
  	 * needed.
  	 */
bc40ac669   Becky Bruce   swiotlb: store ph...
413
414
  	for (i = 0; i < nslots; i++)
  		io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
415
  	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
fb05a3792   Becky Bruce   swiotlb: add supp...
416
  		swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
417
418
419
420
421
422
423
424
  
  	return dma_addr;
  }
  
  /*
   * dma_addr is the kernel virtual address of the bounce buffer to unmap.
   */
  static void
7fcebbd2d   Becky Bruce   swiotlb: rename u...
425
  do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
426
427
428
429
  {
  	unsigned long flags;
  	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
  	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
bc40ac669   Becky Bruce   swiotlb: store ph...
430
  	phys_addr_t phys = io_tlb_orig_addr[index];
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
431
432
433
434
  
  	/*
  	 * First, sync the memory before unmapping the entry
  	 */
bc40ac669   Becky Bruce   swiotlb: store ph...
435
  	if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
fb05a3792   Becky Bruce   swiotlb: add supp...
436
  		swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
  
  	/*
  	 * Return the buffer to the free list by setting the corresponding
  	 * entries to indicate the number of contigous entries available.
  	 * While returning the entries to the free list, we merge the entries
  	 * with slots below and above the pool being returned.
  	 */
  	spin_lock_irqsave(&io_tlb_lock, flags);
  	{
  		count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
  			 io_tlb_list[index + nslots] : 0);
  		/*
  		 * Step 1: return the slots to the free list, merging the
  		 * slots with superceeding slots
  		 */
  		for (i = index + nslots - 1; i >= index; i--)
  			io_tlb_list[i] = ++count;
  		/*
  		 * Step 2: merge the returned slots with the preceding slots,
  		 * if available (non zero)
  		 */
  		for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
  			io_tlb_list[i] = ++count;
  	}
  	spin_unlock_irqrestore(&io_tlb_lock, flags);
  }
  
  static void
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
465
466
  sync_single(struct device *hwdev, char *dma_addr, size_t size,
  	    int dir, int target)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
467
  {
bc40ac669   Becky Bruce   swiotlb: store ph...
468
469
470
471
  	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
  	phys_addr_t phys = io_tlb_orig_addr[index];
  
  	phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
df336d1c7   Keir Fraser   Fix swiotlb_sync_...
472

de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
473
474
475
  	switch (target) {
  	case SYNC_FOR_CPU:
  		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
fb05a3792   Becky Bruce   swiotlb: add supp...
476
  			swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
348145458   Eric Sesterhenn   BUG_ON() Conversi...
477
478
  		else
  			BUG_ON(dir != DMA_TO_DEVICE);
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
479
480
481
  		break;
  	case SYNC_FOR_DEVICE:
  		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
fb05a3792   Becky Bruce   swiotlb: add supp...
482
  			swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
348145458   Eric Sesterhenn   BUG_ON() Conversi...
483
484
  		else
  			BUG_ON(dir != DMA_FROM_DEVICE);
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
485
486
  		break;
  	default:
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
487
  		BUG();
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
488
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
489
490
491
492
  }
  
  void *
  swiotlb_alloc_coherent(struct device *hwdev, size_t size,
06a544971   Al Viro   [PATCH] gfp_t: dm...
493
  		       dma_addr_t *dma_handle, gfp_t flags)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
494
  {
563aaf064   Jan Beulich   [IA64] swiotlb cl...
495
  	dma_addr_t dev_addr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
496
497
  	void *ret;
  	int order = get_order(size);
284901a90   Yang Hongyang   dma-mapping: repl...
498
  	u64 dma_mask = DMA_BIT_MASK(32);
1e74f3000   FUJITA Tomonori   swiotlb: use cohe...
499
500
501
  
  	if (hwdev && hwdev->coherent_dma_mask)
  		dma_mask = hwdev->coherent_dma_mask;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
502

25667d675   Tony Luck   Revert "[IA64] sw...
503
  	ret = (void *)__get_free_pages(flags, order);
b9394647a   FUJITA Tomonori   swiotlb: use dma_...
504
  	if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
505
506
  		/*
  		 * The allocated memory isn't reachable by the device.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
507
508
509
510
511
512
513
  		 */
  		free_pages((unsigned long) ret, order);
  		ret = NULL;
  	}
  	if (!ret) {
  		/*
  		 * We are either out of memory or the device can't DMA
ceb5ac326   Becky Bruce   swiotlb: comment ...
514
515
  		 * to GFP_DMA memory; fall back on map_single(), which
  		 * will grab memory from the lowest available address range.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
516
  		 */
bc40ac669   Becky Bruce   swiotlb: store ph...
517
  		ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
9dfda12b8   FUJITA Tomonori   swiotlb: use map_...
518
  		if (!ret)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
519
  			return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
520
521
522
  	}
  
  	memset(ret, 0, size);
70a7d3cc1   Jeremy Fitzhardinge   swiotlb: add hwde...
523
  	dev_addr = swiotlb_virt_to_bus(hwdev, ret);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
524
525
  
  	/* Confirm address can be DMA'd by device */
b9394647a   FUJITA Tomonori   swiotlb: use dma_...
526
  	if (dev_addr + size > dma_mask) {
563aaf064   Jan Beulich   [IA64] swiotlb cl...
527
528
  		printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx
  ",
1e74f3000   FUJITA Tomonori   swiotlb: use cohe...
529
  		       (unsigned long long)dma_mask,
563aaf064   Jan Beulich   [IA64] swiotlb cl...
530
  		       (unsigned long long)dev_addr);
a2b89b596   FUJITA Tomonori   swiotlb: remove p...
531
532
  
  		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
7fcebbd2d   Becky Bruce   swiotlb: rename u...
533
  		do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
a2b89b596   FUJITA Tomonori   swiotlb: remove p...
534
  		return NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
535
536
537
538
  	}
  	*dma_handle = dev_addr;
  	return ret;
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
539
  EXPORT_SYMBOL(swiotlb_alloc_coherent);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
540
541
542
  
  void
  swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
543
  		      dma_addr_t dev_addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
544
  {
862d196b2   FUJITA Tomonori   swiotlb: use phys...
545
  	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
546

aa24886e3   David Brownell   dma_free_coherent...
547
  	WARN_ON(irqs_disabled());
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
548
549
  	if (!is_swiotlb_buffer(paddr))
  		free_pages((unsigned long)vaddr, get_order(size));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
550
551
  	else
  		/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
7fcebbd2d   Becky Bruce   swiotlb: rename u...
552
  		do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
553
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
554
  EXPORT_SYMBOL(swiotlb_free_coherent);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
555
556
557
558
559
560
561
  
  static void
  swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
  {
  	/*
  	 * Ran out of IOMMU space for this operation. This is very bad.
  	 * Unfortunately the drivers cannot handle this operation properly.
17e5ad6c0   Tony Luck   [PATCH] Removed r...
562
  	 * unless they check for dma_mapping_error (most don't)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
563
564
565
  	 * When the mapping is small enough return a static buffer to limit
  	 * the damage, or panic when the transfer is too big.
  	 */
563aaf064   Jan Beulich   [IA64] swiotlb cl...
566
  	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
94b324864   Kay Sievers   swiotlb: struct d...
567
568
  	       "device %s
  ", size, dev ? dev_name(dev) : "?");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
569

c7084b35e   Casey Dahlin   lib/swiotlb.c: Fi...
570
571
572
573
574
575
576
577
578
579
580
581
  	if (size <= io_tlb_overflow || !do_panic)
  		return;
  
  	if (dir == DMA_BIDIRECTIONAL)
  		panic("DMA: Random memory could be DMA accessed
  ");
  	if (dir == DMA_FROM_DEVICE)
  		panic("DMA: Random memory could be DMA written
  ");
  	if (dir == DMA_TO_DEVICE)
  		panic("DMA: Random memory could be DMA read
  ");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
582
583
584
585
  }
  
  /*
   * Map a single buffer of the indicated size for DMA in streaming mode.  The
17e5ad6c0   Tony Luck   [PATCH] Removed r...
586
   * physical address to use is returned.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
587
588
   *
   * Once the device is given the dma address, the device owns this memory until
ceb5ac326   Becky Bruce   swiotlb: comment ...
589
   * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
590
   */
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
591
592
593
594
  dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
  			    unsigned long offset, size_t size,
  			    enum dma_data_direction dir,
  			    struct dma_attrs *attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
595
  {
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
596
  	phys_addr_t phys = page_to_phys(page) + offset;
862d196b2   FUJITA Tomonori   swiotlb: use phys...
597
  	dma_addr_t dev_addr = phys_to_dma(dev, phys);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
598
  	void *map;
348145458   Eric Sesterhenn   BUG_ON() Conversi...
599
  	BUG_ON(dir == DMA_NONE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
600
  	/*
ceb5ac326   Becky Bruce   swiotlb: comment ...
601
  	 * If the address happens to be in the device's DMA window,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
602
603
604
  	 * we can safely return the device addr and not worry about bounce
  	 * buffering it.
  	 */
b9394647a   FUJITA Tomonori   swiotlb: use dma_...
605
  	if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
606
607
608
609
610
  		return dev_addr;
  
  	/*
  	 * Oh well, have to allocate and map a bounce buffer.
  	 */
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
611
  	map = map_single(dev, phys, size, dir);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
612
  	if (!map) {
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
613
  		swiotlb_full(dev, size, dir, 1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
614
615
  		map = io_tlb_overflow_buffer;
  	}
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
616
  	dev_addr = swiotlb_virt_to_bus(dev, map);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
617
618
619
620
  
  	/*
  	 * Ensure that the address returned is DMA'ble
  	 */
b9394647a   FUJITA Tomonori   swiotlb: use dma_...
621
  	if (!dma_capable(dev, dev_addr, size))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
622
623
624
625
  		panic("map_single: bounce buffer is not DMA'ble");
  
  	return dev_addr;
  }
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
626
  EXPORT_SYMBOL_GPL(swiotlb_map_page);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
627
628
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
629
   * Unmap a single streaming mode DMA translation.  The dma_addr and size must
ceb5ac326   Becky Bruce   swiotlb: comment ...
630
   * match what was provided for in a previous swiotlb_map_page call.  All
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
631
632
633
634
635
   * other usages are undefined.
   *
   * After this call, reads by the cpu to the buffer are guaranteed to see
   * whatever the device wrote there.
   */
7fcebbd2d   Becky Bruce   swiotlb: rename u...
636
637
  static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
  			 size_t size, int dir)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
638
  {
862d196b2   FUJITA Tomonori   swiotlb: use phys...
639
  	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
640

348145458   Eric Sesterhenn   BUG_ON() Conversi...
641
  	BUG_ON(dir == DMA_NONE);
7fcebbd2d   Becky Bruce   swiotlb: rename u...
642

02ca646e7   FUJITA Tomonori   swiotlb: remove u...
643
644
  	if (is_swiotlb_buffer(paddr)) {
  		do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
7fcebbd2d   Becky Bruce   swiotlb: rename u...
645
646
647
648
649
  		return;
  	}
  
  	if (dir != DMA_FROM_DEVICE)
  		return;
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
650
651
652
653
654
655
656
  	/*
  	 * phys_to_virt doesn't work with hihgmem page but we could
  	 * call dma_mark_clean() with hihgmem page here. However, we
  	 * are fine since dma_mark_clean() is null on POWERPC. We can
  	 * make dma_mark_clean() take a physical address if necessary.
  	 */
  	dma_mark_clean(phys_to_virt(paddr), size);
7fcebbd2d   Becky Bruce   swiotlb: rename u...
657
658
659
660
661
662
663
  }
  
  void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
  			size_t size, enum dma_data_direction dir,
  			struct dma_attrs *attrs)
  {
  	unmap_single(hwdev, dev_addr, size, dir);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
664
  }
f98eee8ea   FUJITA Tomonori   x86, ia64: remove...
665
  EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
874d6a955   FUJITA Tomonori   swiotlb: clean up...
666

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
667
668
669
670
  /*
   * Make physical memory consistent for a single streaming mode DMA translation
   * after a transfer.
   *
ceb5ac326   Becky Bruce   swiotlb: comment ...
671
   * If you perform a swiotlb_map_page() but wish to interrogate the buffer
17e5ad6c0   Tony Luck   [PATCH] Removed r...
672
673
   * using the cpu, yet do not wish to teardown the dma mapping, you must
   * call this function before doing so.  At the next point you give the dma
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
674
675
676
   * address back to the card, you must first perform a
   * swiotlb_dma_sync_for_device, and then the device again owns the buffer
   */
be6b02678   Andrew Morton   [PATCH] swiotlb u...
677
  static void
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
678
  swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
679
  		    size_t size, int dir, int target)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
680
  {
862d196b2   FUJITA Tomonori   swiotlb: use phys...
681
  	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
682

348145458   Eric Sesterhenn   BUG_ON() Conversi...
683
  	BUG_ON(dir == DMA_NONE);
380d68783   Becky Bruce   swiotlb: use swio...
684

02ca646e7   FUJITA Tomonori   swiotlb: remove u...
685
686
  	if (is_swiotlb_buffer(paddr)) {
  		sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
380d68783   Becky Bruce   swiotlb: use swio...
687
688
689
690
691
  		return;
  	}
  
  	if (dir != DMA_FROM_DEVICE)
  		return;
02ca646e7   FUJITA Tomonori   swiotlb: remove u...
692
  	dma_mark_clean(phys_to_virt(paddr), size);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
693
694
695
  }
  
  void
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
696
  swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
697
  			    size_t size, enum dma_data_direction dir)
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
698
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
699
  	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
700
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
701
  EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
702
703
  
  void
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
704
  swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
705
  			       size_t size, enum dma_data_direction dir)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
706
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
707
  	swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
708
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
709
  EXPORT_SYMBOL(swiotlb_sync_single_for_device);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
710
711
  
  /*
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
712
713
   * Same as above, but for a sub-range of the mapping.
   */
be6b02678   Andrew Morton   [PATCH] swiotlb u...
714
  static void
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
715
  swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
716
717
  			  unsigned long offset, size_t size,
  			  int dir, int target)
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
718
  {
380d68783   Becky Bruce   swiotlb: use swio...
719
  	swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
720
721
722
723
  }
  
  void
  swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
724
725
  				  unsigned long offset, size_t size,
  				  enum dma_data_direction dir)
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
726
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
727
728
  	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
  				  SYNC_FOR_CPU);
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
729
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
730
  EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
731
732
733
  
  void
  swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
734
735
  				     unsigned long offset, size_t size,
  				     enum dma_data_direction dir)
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
736
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
737
738
  	swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
  				  SYNC_FOR_DEVICE);
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
739
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
740
  EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
878a97cfd   John W. Linville   [PATCH] swiotlb: ...
741
742
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
743
   * Map a set of buffers described by scatterlist in streaming mode for DMA.
ceb5ac326   Becky Bruce   swiotlb: comment ...
744
   * This is the scatter-gather version of the above swiotlb_map_page
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
745
746
747
748
749
750
751
752
753
754
   * interface.  Here the scatter gather list elements are each tagged with the
   * appropriate dma address and length.  They are obtained via
   * sg_dma_{address,length}(SG).
   *
   * NOTE: An implementation may be able to use a smaller number of
   *       DMA address/length pairs than there are SG table elements.
   *       (for example via virtual mapping capabilities)
   *       The routine returns the number of addr/length pairs actually
   *       used, at most nents.
   *
ceb5ac326   Becky Bruce   swiotlb: comment ...
755
   * Device ownership issues as mentioned above for swiotlb_map_page are the
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
756
757
758
   * same here.
   */
  int
309df0c50   Arthur Kepner   dma/ia64: update ...
759
  swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
760
  		     enum dma_data_direction dir, struct dma_attrs *attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
761
  {
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
762
  	struct scatterlist *sg;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
763
  	int i;
348145458   Eric Sesterhenn   BUG_ON() Conversi...
764
  	BUG_ON(dir == DMA_NONE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
765

dbfd49fe9   Jens Axboe   swiotlb: sg chain...
766
  	for_each_sg(sgl, sg, nelems, i) {
961d7d0ee   Ian Campbell   swiotlb: do not u...
767
  		phys_addr_t paddr = sg_phys(sg);
862d196b2   FUJITA Tomonori   swiotlb: use phys...
768
  		dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
bc40ac669   Becky Bruce   swiotlb: store ph...
769

cf56e3f2e   FUJITA Tomonori   swiotlb: remove s...
770
  		if (swiotlb_force ||
b9394647a   FUJITA Tomonori   swiotlb: use dma_...
771
  		    !dma_capable(hwdev, dev_addr, sg->length)) {
bc40ac669   Becky Bruce   swiotlb: store ph...
772
773
  			void *map = map_single(hwdev, sg_phys(sg),
  					       sg->length, dir);
7e8702334   Andi Kleen   [PATCH] Fix swiot...
774
  			if (!map) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
775
776
777
  				/* Don't panic here, we expect map_sg users
  				   to do proper error handling. */
  				swiotlb_full(hwdev, sg->length, dir, 0);
309df0c50   Arthur Kepner   dma/ia64: update ...
778
779
  				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
  						       attrs);
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
780
  				sgl[0].dma_length = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
781
782
  				return 0;
  			}
70a7d3cc1   Jeremy Fitzhardinge   swiotlb: add hwde...
783
  			sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
784
785
786
787
788
789
  		} else
  			sg->dma_address = dev_addr;
  		sg->dma_length = sg->length;
  	}
  	return nelems;
  }
309df0c50   Arthur Kepner   dma/ia64: update ...
790
791
792
793
794
795
796
797
  EXPORT_SYMBOL(swiotlb_map_sg_attrs);
  
  int
  swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
  	       int dir)
  {
  	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
798
  EXPORT_SYMBOL(swiotlb_map_sg);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
799
800
801
  
  /*
   * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
ceb5ac326   Becky Bruce   swiotlb: comment ...
802
   * concerning calls here are the same as for swiotlb_unmap_page() above.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
803
804
   */
  void
309df0c50   Arthur Kepner   dma/ia64: update ...
805
  swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
806
  		       int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
807
  {
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
808
  	struct scatterlist *sg;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
809
  	int i;
348145458   Eric Sesterhenn   BUG_ON() Conversi...
810
  	BUG_ON(dir == DMA_NONE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
811

7fcebbd2d   Becky Bruce   swiotlb: rename u...
812
813
  	for_each_sg(sgl, sg, nelems, i)
  		unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
814
  }
309df0c50   Arthur Kepner   dma/ia64: update ...
815
816
817
818
819
820
821
822
  EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
  
  void
  swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
  		 int dir)
  {
  	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
823
  EXPORT_SYMBOL(swiotlb_unmap_sg);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
824
825
826
827
828
829
830
831
  
  /*
   * Make physical memory consistent for a set of streaming mode DMA translations
   * after a transfer.
   *
   * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
   * and usage.
   */
be6b02678   Andrew Morton   [PATCH] swiotlb u...
832
  static void
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
833
  swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
834
  		int nelems, int dir, int target)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
835
  {
dbfd49fe9   Jens Axboe   swiotlb: sg chain...
836
  	struct scatterlist *sg;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
837
  	int i;
380d68783   Becky Bruce   swiotlb: use swio...
838
839
  	for_each_sg(sgl, sg, nelems, i)
  		swiotlb_sync_single(hwdev, sg->dma_address,
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
840
  				    sg->dma_length, dir, target);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
841
842
843
  }
  
  void
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
844
  swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
845
  			int nelems, enum dma_data_direction dir)
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
846
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
847
  	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
848
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
849
  EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
8270f3f1a   John W. Linville   [PATCH] swiotlb: ...
850
851
  
  void
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
852
  swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
160c1d8e4   FUJITA Tomonori   x86, ia64: conver...
853
  			   int nelems, enum dma_data_direction dir)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
854
  {
de69e0f0b   John W. Linville   [PATCH] swiotlb: ...
855
  	swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
856
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
857
  EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
858
859
  
  int
8d8bb39b9   FUJITA Tomonori   dma-mapping: add ...
860
  swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
861
  {
70a7d3cc1   Jeremy Fitzhardinge   swiotlb: add hwde...
862
  	return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
863
  }
874d6a955   FUJITA Tomonori   swiotlb: clean up...
864
  EXPORT_SYMBOL(swiotlb_dma_mapping_error);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
865
866
  
  /*
17e5ad6c0   Tony Luck   [PATCH] Removed r...
867
   * Return whether the given device DMA address mask can be supported
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
868
   * properly.  For example, if your device can only drive the low 24-bits
17e5ad6c0   Tony Luck   [PATCH] Removed r...
869
   * during bus mastering, then you would pass 0x00ffffff as the mask to
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
870
871
872
   * this function.
   */
  int
563aaf064   Jan Beulich   [IA64] swiotlb cl...
873
  swiotlb_dma_supported(struct device *hwdev, u64 mask)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
874
  {
70a7d3cc1   Jeremy Fitzhardinge   swiotlb: add hwde...
875
  	return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
876
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
877
  EXPORT_SYMBOL(swiotlb_dma_supported);