Commit 681cc5cd3efbeafca6386114070e0bfb5012e249
Committed by
Linus Torvalds
1 parent
59fc67dedb
Exists in
master
and in
7 other branches
iommu sg merging: swiotlb: respect the segment boundary limits
This patch makes swiotlb not allocate a memory area spanning LLD's segment boundary. is_span_boundary() judges whether a memory area spans LLD's segment boundary. If map_single finds such a area, map_single tries to find the next available memory area. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Greg KH <greg@kroah.com> Cc: Jeff Garzik <jeff@garzik.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 35 additions and 6 deletions Side-by-side Diff
lib/swiotlb.c
... | ... | @@ -282,6 +282,15 @@ |
282 | 282 | return (addr & ~mask) != 0; |
283 | 283 | } |
284 | 284 | |
285 | +static inline unsigned int is_span_boundary(unsigned int index, | |
286 | + unsigned int nslots, | |
287 | + unsigned long offset_slots, | |
288 | + unsigned long max_slots) | |
289 | +{ | |
290 | + unsigned long offset = (offset_slots + index) & (max_slots - 1); | |
291 | + return offset + nslots > max_slots; | |
292 | +} | |
293 | + | |
285 | 294 | /* |
286 | 295 | * Allocates bounce buffer and returns its kernel virtual address. |
287 | 296 | */ |
288 | 297 | |
... | ... | @@ -292,7 +301,17 @@ |
292 | 301 | char *dma_addr; |
293 | 302 | unsigned int nslots, stride, index, wrap; |
294 | 303 | int i; |
304 | + unsigned long start_dma_addr; | |
305 | + unsigned long mask; | |
306 | + unsigned long offset_slots; | |
307 | + unsigned long max_slots; | |
295 | 308 | |
309 | + mask = dma_get_seg_boundary(hwdev); | |
310 | + start_dma_addr = virt_to_bus(io_tlb_start) & mask; | |
311 | + | |
312 | + offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | |
313 | + max_slots = ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; | |
314 | + | |
296 | 315 | /* |
297 | 316 | * For mappings greater than a page, we limit the stride (and |
298 | 317 | * hence alignment) to a page size. |
299 | 318 | |
300 | 319 | |
... | ... | @@ -311,11 +330,18 @@ |
311 | 330 | */ |
312 | 331 | spin_lock_irqsave(&io_tlb_lock, flags); |
313 | 332 | { |
314 | - wrap = index = ALIGN(io_tlb_index, stride); | |
315 | - | |
333 | + index = ALIGN(io_tlb_index, stride); | |
316 | 334 | if (index >= io_tlb_nslabs) |
317 | - wrap = index = 0; | |
335 | + index = 0; | |
318 | 336 | |
337 | + while (is_span_boundary(index, nslots, offset_slots, | |
338 | + max_slots)) { | |
339 | + index += stride; | |
340 | + if (index >= io_tlb_nslabs) | |
341 | + index = 0; | |
342 | + } | |
343 | + wrap = index; | |
344 | + | |
319 | 345 | do { |
320 | 346 | /* |
321 | 347 | * If we find a slot that indicates we have 'nslots' |
... | ... | @@ -341,9 +367,12 @@ |
341 | 367 | |
342 | 368 | goto found; |
343 | 369 | } |
344 | - index += stride; | |
345 | - if (index >= io_tlb_nslabs) | |
346 | - index = 0; | |
370 | + do { | |
371 | + index += stride; | |
372 | + if (index >= io_tlb_nslabs) | |
373 | + index = 0; | |
374 | + } while (is_span_boundary(index, nslots, offset_slots, | |
375 | + max_slots)); | |
347 | 376 | } while (index != wrap); |
348 | 377 | |
349 | 378 | spin_unlock_irqrestore(&io_tlb_lock, flags); |