Commit f94181da7192f4ed8ccb1b633ea4ce56954df130
Exists in
master
and in
39 other branches
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel…
…/git/tip/linux-2.6-tip * 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: rcu: fix rcutorture bug rcu: eliminate synchronize_rcu_xxx macro rcu: make treercu safe for suspend and resume rcu: fix rcutree grace-period-latency bug on small systems futex: catch certain assymetric (get|put)_futex_key calls futex: make futex_(get|put)_key() calls symmetric locking, percpu counters: introduce separate lock classes swiotlb: clean up EXPORT_SYMBOL usage swiotlb: remove unnecessary declaration swiotlb: replace architecture-specific swiotlb.h with linux/swiotlb.h swiotlb: add support for systems with highmem swiotlb: store phys address in io_tlb_orig_addr array swiotlb: add hwdev to swiotlb_phys_to_bus() / swiotlb_sg_to_bus()
Showing 15 changed files Side-by-side Diff
- arch/ia64/include/asm/swiotlb.h
- arch/x86/include/asm/swiotlb.h
- arch/x86/kernel/pci-swiotlb_64.c
- include/linux/percpu_counter.h
- include/linux/rcupdate.h
- include/linux/swiotlb.h
- kernel/futex.c
- kernel/rcupdate.c
- kernel/rcupreempt.c
- kernel/rcutorture.c
- kernel/rcutree.c
- lib/percpu_counter.c
- lib/proportions.c
- lib/swiotlb.c
- mm/backing-dev.c
arch/ia64/include/asm/swiotlb.h
... | ... | @@ -2,44 +2,7 @@ |
2 | 2 | #define ASM_IA64__SWIOTLB_H |
3 | 3 | |
4 | 4 | #include <linux/dma-mapping.h> |
5 | - | |
6 | -/* SWIOTLB interface */ | |
7 | - | |
8 | -extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, | |
9 | - size_t size, int dir); | |
10 | -extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |
11 | - dma_addr_t *dma_handle, gfp_t flags); | |
12 | -extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | |
13 | - size_t size, int dir); | |
14 | -extern void swiotlb_sync_single_for_cpu(struct device *hwdev, | |
15 | - dma_addr_t dev_addr, | |
16 | - size_t size, int dir); | |
17 | -extern void swiotlb_sync_single_for_device(struct device *hwdev, | |
18 | - dma_addr_t dev_addr, | |
19 | - size_t size, int dir); | |
20 | -extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, | |
21 | - dma_addr_t dev_addr, | |
22 | - unsigned long offset, | |
23 | - size_t size, int dir); | |
24 | -extern void swiotlb_sync_single_range_for_device(struct device *hwdev, | |
25 | - dma_addr_t dev_addr, | |
26 | - unsigned long offset, | |
27 | - size_t size, int dir); | |
28 | -extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, | |
29 | - struct scatterlist *sg, int nelems, | |
30 | - int dir); | |
31 | -extern void swiotlb_sync_sg_for_device(struct device *hwdev, | |
32 | - struct scatterlist *sg, int nelems, | |
33 | - int dir); | |
34 | -extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, | |
35 | - int nents, int direction); | |
36 | -extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, | |
37 | - int nents, int direction); | |
38 | -extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); | |
39 | -extern void swiotlb_free_coherent(struct device *hwdev, size_t size, | |
40 | - void *vaddr, dma_addr_t dma_handle); | |
41 | -extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); | |
42 | -extern void swiotlb_init(void); | |
5 | +#include <linux/swiotlb.h> | |
43 | 6 | |
44 | 7 | extern int swiotlb_force; |
45 | 8 |
arch/x86/include/asm/swiotlb.h
1 | 1 | #ifndef _ASM_X86_SWIOTLB_H |
2 | 2 | #define _ASM_X86_SWIOTLB_H |
3 | 3 | |
4 | -#include <asm/dma-mapping.h> | |
4 | +#include <linux/swiotlb.h> | |
5 | 5 | |
6 | 6 | /* SWIOTLB interface */ |
7 | - | |
8 | -extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, | |
9 | - size_t size, int dir); | |
10 | -extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |
11 | - dma_addr_t *dma_handle, gfp_t flags); | |
12 | -extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | |
13 | - size_t size, int dir); | |
14 | -extern void swiotlb_sync_single_for_cpu(struct device *hwdev, | |
15 | - dma_addr_t dev_addr, | |
16 | - size_t size, int dir); | |
17 | -extern void swiotlb_sync_single_for_device(struct device *hwdev, | |
18 | - dma_addr_t dev_addr, | |
19 | - size_t size, int dir); | |
20 | -extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, | |
21 | - dma_addr_t dev_addr, | |
22 | - unsigned long offset, | |
23 | - size_t size, int dir); | |
24 | -extern void swiotlb_sync_single_range_for_device(struct device *hwdev, | |
25 | - dma_addr_t dev_addr, | |
26 | - unsigned long offset, | |
27 | - size_t size, int dir); | |
28 | -extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, | |
29 | - struct scatterlist *sg, int nelems, | |
30 | - int dir); | |
31 | -extern void swiotlb_sync_sg_for_device(struct device *hwdev, | |
32 | - struct scatterlist *sg, int nelems, | |
33 | - int dir); | |
34 | -extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, | |
35 | - int nents, int direction); | |
36 | -extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, | |
37 | - int nents, int direction); | |
38 | -extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); | |
39 | -extern void swiotlb_free_coherent(struct device *hwdev, size_t size, | |
40 | - void *vaddr, dma_addr_t dma_handle); | |
41 | -extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); | |
42 | -extern void swiotlb_init(void); | |
43 | 7 | |
44 | 8 | extern int swiotlb_force; |
45 | 9 |
arch/x86/kernel/pci-swiotlb_64.c
include/linux/percpu_counter.h
... | ... | @@ -26,8 +26,16 @@ |
26 | 26 | |
27 | 27 | extern int percpu_counter_batch; |
28 | 28 | |
29 | -int percpu_counter_init(struct percpu_counter *fbc, s64 amount); | |
30 | -int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount); | |
29 | +int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, | |
30 | + struct lock_class_key *key); | |
31 | + | |
32 | +#define percpu_counter_init(fbc, value) \ | |
33 | + ({ \ | |
34 | + static struct lock_class_key __key; \ | |
35 | + \ | |
36 | + __percpu_counter_init(fbc, value, &__key); \ | |
37 | + }) | |
38 | + | |
31 | 39 | void percpu_counter_destroy(struct percpu_counter *fbc); |
32 | 40 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount); |
33 | 41 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); |
... | ... | @@ -80,8 +88,6 @@ |
80 | 88 | fbc->count = amount; |
81 | 89 | return 0; |
82 | 90 | } |
83 | - | |
84 | -#define percpu_counter_init_irq percpu_counter_init | |
85 | 91 | |
86 | 92 | static inline void percpu_counter_destroy(struct percpu_counter *fbc) |
87 | 93 | { |
include/linux/rcupdate.h
... | ... | @@ -204,18 +204,6 @@ |
204 | 204 | |
205 | 205 | extern void wakeme_after_rcu(struct rcu_head *head); |
206 | 206 | |
207 | -#define synchronize_rcu_xxx(name, func) \ | |
208 | -void name(void) \ | |
209 | -{ \ | |
210 | - struct rcu_synchronize rcu; \ | |
211 | - \ | |
212 | - init_completion(&rcu.completion); \ | |
213 | - /* Will wake me after RCU finished. */ \ | |
214 | - func(&rcu.head, wakeme_after_rcu); \ | |
215 | - /* Wait for it. */ \ | |
216 | - wait_for_completion(&rcu.completion); \ | |
217 | -} | |
218 | - | |
219 | 207 | /** |
220 | 208 | * synchronize_sched - block until all CPUs have exited any non-preemptive |
221 | 209 | * kernel code sequences. |
include/linux/swiotlb.h
... | ... | @@ -27,7 +27,8 @@ |
27 | 27 | extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs); |
28 | 28 | extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); |
29 | 29 | |
30 | -extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address); | |
30 | +extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, | |
31 | + phys_addr_t address); | |
31 | 32 | extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); |
32 | 33 | |
33 | 34 | extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size); |
kernel/futex.c
... | ... | @@ -170,8 +170,11 @@ |
170 | 170 | */ |
171 | 171 | static void drop_futex_key_refs(union futex_key *key) |
172 | 172 | { |
173 | - if (!key->both.ptr) | |
173 | + if (!key->both.ptr) { | |
174 | + /* If we're here then we tried to put a key we failed to get */ | |
175 | + WARN_ON_ONCE(1); | |
174 | 176 | return; |
177 | + } | |
175 | 178 | |
176 | 179 | switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { |
177 | 180 | case FUT_OFF_INODE: |
178 | 181 | |
... | ... | @@ -730,8 +733,8 @@ |
730 | 733 | } |
731 | 734 | |
732 | 735 | spin_unlock(&hb->lock); |
733 | -out: | |
734 | 736 | put_futex_key(fshared, &key); |
737 | +out: | |
735 | 738 | return ret; |
736 | 739 | } |
737 | 740 | |
... | ... | @@ -755,7 +758,7 @@ |
755 | 758 | goto out; |
756 | 759 | ret = get_futex_key(uaddr2, fshared, &key2); |
757 | 760 | if (unlikely(ret != 0)) |
758 | - goto out; | |
761 | + goto out_put_key1; | |
759 | 762 | |
760 | 763 | hb1 = hash_futex(&key1); |
761 | 764 | hb2 = hash_futex(&key2); |
762 | 765 | |
... | ... | @@ -777,12 +780,12 @@ |
777 | 780 | * but we might get them from range checking |
778 | 781 | */ |
779 | 782 | ret = op_ret; |
780 | - goto out; | |
783 | + goto out_put_keys; | |
781 | 784 | #endif |
782 | 785 | |
783 | 786 | if (unlikely(op_ret != -EFAULT)) { |
784 | 787 | ret = op_ret; |
785 | - goto out; | |
788 | + goto out_put_keys; | |
786 | 789 | } |
787 | 790 | |
788 | 791 | /* |
... | ... | @@ -796,7 +799,7 @@ |
796 | 799 | ret = futex_handle_fault((unsigned long)uaddr2, |
797 | 800 | attempt); |
798 | 801 | if (ret) |
799 | - goto out; | |
802 | + goto out_put_keys; | |
800 | 803 | goto retry; |
801 | 804 | } |
802 | 805 | |
803 | 806 | |
804 | 807 | |
... | ... | @@ -834,10 +837,11 @@ |
834 | 837 | spin_unlock(&hb1->lock); |
835 | 838 | if (hb1 != hb2) |
836 | 839 | spin_unlock(&hb2->lock); |
837 | -out: | |
840 | +out_put_keys: | |
838 | 841 | put_futex_key(fshared, &key2); |
842 | +out_put_key1: | |
839 | 843 | put_futex_key(fshared, &key1); |
840 | - | |
844 | +out: | |
841 | 845 | return ret; |
842 | 846 | } |
843 | 847 | |
844 | 848 | |
... | ... | @@ -854,13 +858,13 @@ |
854 | 858 | struct futex_q *this, *next; |
855 | 859 | int ret, drop_count = 0; |
856 | 860 | |
857 | - retry: | |
861 | +retry: | |
858 | 862 | ret = get_futex_key(uaddr1, fshared, &key1); |
859 | 863 | if (unlikely(ret != 0)) |
860 | 864 | goto out; |
861 | 865 | ret = get_futex_key(uaddr2, fshared, &key2); |
862 | 866 | if (unlikely(ret != 0)) |
863 | - goto out; | |
867 | + goto out_put_key1; | |
864 | 868 | |
865 | 869 | hb1 = hash_futex(&key1); |
866 | 870 | hb2 = hash_futex(&key2); |
... | ... | @@ -882,7 +886,7 @@ |
882 | 886 | if (!ret) |
883 | 887 | goto retry; |
884 | 888 | |
885 | - return ret; | |
889 | + goto out_put_keys; | |
886 | 890 | } |
887 | 891 | if (curval != *cmpval) { |
888 | 892 | ret = -EAGAIN; |
889 | 893 | |
890 | 894 | |
... | ... | @@ -927,9 +931,11 @@ |
927 | 931 | while (--drop_count >= 0) |
928 | 932 | drop_futex_key_refs(&key1); |
929 | 933 | |
930 | -out: | |
934 | +out_put_keys: | |
931 | 935 | put_futex_key(fshared, &key2); |
936 | +out_put_key1: | |
932 | 937 | put_futex_key(fshared, &key1); |
938 | +out: | |
933 | 939 | return ret; |
934 | 940 | } |
935 | 941 | |
... | ... | @@ -990,7 +996,7 @@ |
990 | 996 | int ret = 0; |
991 | 997 | |
992 | 998 | /* In the common case we don't take the spinlock, which is nice. */ |
993 | - retry: | |
999 | +retry: | |
994 | 1000 | lock_ptr = q->lock_ptr; |
995 | 1001 | barrier(); |
996 | 1002 | if (lock_ptr != NULL) { |
997 | 1003 | |
... | ... | @@ -1172,11 +1178,11 @@ |
1172 | 1178 | |
1173 | 1179 | q.pi_state = NULL; |
1174 | 1180 | q.bitset = bitset; |
1175 | - retry: | |
1181 | +retry: | |
1176 | 1182 | q.key = FUTEX_KEY_INIT; |
1177 | 1183 | ret = get_futex_key(uaddr, fshared, &q.key); |
1178 | 1184 | if (unlikely(ret != 0)) |
1179 | - goto out_release_sem; | |
1185 | + goto out; | |
1180 | 1186 | |
1181 | 1187 | hb = queue_lock(&q); |
1182 | 1188 | |
... | ... | @@ -1204,6 +1210,7 @@ |
1204 | 1210 | |
1205 | 1211 | if (unlikely(ret)) { |
1206 | 1212 | queue_unlock(&q, hb); |
1213 | + put_futex_key(fshared, &q.key); | |
1207 | 1214 | |
1208 | 1215 | ret = get_user(uval, uaddr); |
1209 | 1216 | |
... | ... | @@ -1213,7 +1220,7 @@ |
1213 | 1220 | } |
1214 | 1221 | ret = -EWOULDBLOCK; |
1215 | 1222 | if (uval != val) |
1216 | - goto out_unlock_release_sem; | |
1223 | + goto out_unlock_put_key; | |
1217 | 1224 | |
1218 | 1225 | /* Only actually queue if *uaddr contained val. */ |
1219 | 1226 | queue_me(&q, hb); |
1220 | 1227 | |
1221 | 1228 | |
... | ... | @@ -1305,11 +1312,11 @@ |
1305 | 1312 | return -ERESTART_RESTARTBLOCK; |
1306 | 1313 | } |
1307 | 1314 | |
1308 | - out_unlock_release_sem: | |
1315 | +out_unlock_put_key: | |
1309 | 1316 | queue_unlock(&q, hb); |
1310 | - | |
1311 | - out_release_sem: | |
1312 | 1317 | put_futex_key(fshared, &q.key); |
1318 | + | |
1319 | +out: | |
1313 | 1320 | return ret; |
1314 | 1321 | } |
1315 | 1322 | |
1316 | 1323 | |
1317 | 1324 | |
1318 | 1325 | |
... | ... | @@ -1358,16 +1365,16 @@ |
1358 | 1365 | } |
1359 | 1366 | |
1360 | 1367 | q.pi_state = NULL; |
1361 | - retry: | |
1368 | +retry: | |
1362 | 1369 | q.key = FUTEX_KEY_INIT; |
1363 | 1370 | ret = get_futex_key(uaddr, fshared, &q.key); |
1364 | 1371 | if (unlikely(ret != 0)) |
1365 | - goto out_release_sem; | |
1372 | + goto out; | |
1366 | 1373 | |
1367 | - retry_unlocked: | |
1374 | +retry_unlocked: | |
1368 | 1375 | hb = queue_lock(&q); |
1369 | 1376 | |
1370 | - retry_locked: | |
1377 | +retry_locked: | |
1371 | 1378 | ret = lock_taken = 0; |
1372 | 1379 | |
1373 | 1380 | /* |
1374 | 1381 | |
... | ... | @@ -1388,14 +1395,14 @@ |
1388 | 1395 | */ |
1389 | 1396 | if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) { |
1390 | 1397 | ret = -EDEADLK; |
1391 | - goto out_unlock_release_sem; | |
1398 | + goto out_unlock_put_key; | |
1392 | 1399 | } |
1393 | 1400 | |
1394 | 1401 | /* |
1395 | 1402 | * Surprise - we got the lock. Just return to userspace: |
1396 | 1403 | */ |
1397 | 1404 | if (unlikely(!curval)) |
1398 | - goto out_unlock_release_sem; | |
1405 | + goto out_unlock_put_key; | |
1399 | 1406 | |
1400 | 1407 | uval = curval; |
1401 | 1408 | |
... | ... | @@ -1431,7 +1438,7 @@ |
1431 | 1438 | * We took the lock due to owner died take over. |
1432 | 1439 | */ |
1433 | 1440 | if (unlikely(lock_taken)) |
1434 | - goto out_unlock_release_sem; | |
1441 | + goto out_unlock_put_key; | |
1435 | 1442 | |
1436 | 1443 | /* |
1437 | 1444 | * We dont have the lock. Look up the PI state (or create it if |
... | ... | @@ -1470,7 +1477,7 @@ |
1470 | 1477 | goto retry_locked; |
1471 | 1478 | } |
1472 | 1479 | default: |
1473 | - goto out_unlock_release_sem; | |
1480 | + goto out_unlock_put_key; | |
1474 | 1481 | } |
1475 | 1482 | } |
1476 | 1483 | |
1477 | 1484 | |
1478 | 1485 | |
1479 | 1486 | |
... | ... | @@ -1561,16 +1568,17 @@ |
1561 | 1568 | destroy_hrtimer_on_stack(&to->timer); |
1562 | 1569 | return ret != -EINTR ? ret : -ERESTARTNOINTR; |
1563 | 1570 | |
1564 | - out_unlock_release_sem: | |
1571 | +out_unlock_put_key: | |
1565 | 1572 | queue_unlock(&q, hb); |
1566 | 1573 | |
1567 | - out_release_sem: | |
1574 | +out_put_key: | |
1568 | 1575 | put_futex_key(fshared, &q.key); |
1576 | +out: | |
1569 | 1577 | if (to) |
1570 | 1578 | destroy_hrtimer_on_stack(&to->timer); |
1571 | 1579 | return ret; |
1572 | 1580 | |
1573 | - uaddr_faulted: | |
1581 | +uaddr_faulted: | |
1574 | 1582 | /* |
1575 | 1583 | * We have to r/w *(int __user *)uaddr, and we have to modify it |
1576 | 1584 | * atomically. Therefore, if we continue to fault after get_user() |
... | ... | @@ -1583,7 +1591,7 @@ |
1583 | 1591 | if (attempt++) { |
1584 | 1592 | ret = futex_handle_fault((unsigned long)uaddr, attempt); |
1585 | 1593 | if (ret) |
1586 | - goto out_release_sem; | |
1594 | + goto out_put_key; | |
1587 | 1595 | goto retry_unlocked; |
1588 | 1596 | } |
1589 | 1597 | |
1590 | 1598 | |
... | ... | @@ -1675,9 +1683,9 @@ |
1675 | 1683 | |
1676 | 1684 | out_unlock: |
1677 | 1685 | spin_unlock(&hb->lock); |
1678 | -out: | |
1679 | 1686 | put_futex_key(fshared, &key); |
1680 | 1687 | |
1688 | +out: | |
1681 | 1689 | return ret; |
1682 | 1690 | |
1683 | 1691 | pi_faulted: |
kernel/rcupdate.c
... | ... | @@ -77,8 +77,15 @@ |
77 | 77 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
78 | 78 | * and may be nested. |
79 | 79 | */ |
80 | -void synchronize_rcu(void); /* Makes kernel-doc tools happy */ | |
81 | -synchronize_rcu_xxx(synchronize_rcu, call_rcu) | |
80 | +void synchronize_rcu(void) | |
81 | +{ | |
82 | + struct rcu_synchronize rcu; | |
83 | + init_completion(&rcu.completion); | |
84 | + /* Will wake me after RCU finished. */ | |
85 | + call_rcu(&rcu.head, wakeme_after_rcu); | |
86 | + /* Wait for it. */ | |
87 | + wait_for_completion(&rcu.completion); | |
88 | +} | |
82 | 89 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
83 | 90 | |
84 | 91 | static void rcu_barrier_callback(struct rcu_head *notused) |
kernel/rcupreempt.c
... | ... | @@ -1177,7 +1177,16 @@ |
1177 | 1177 | * in -rt this does -not- necessarily result in all currently executing |
1178 | 1178 | * interrupt -handlers- having completed. |
1179 | 1179 | */ |
1180 | -synchronize_rcu_xxx(__synchronize_sched, call_rcu_sched) | |
1180 | +void __synchronize_sched(void) | |
1181 | +{ | |
1182 | + struct rcu_synchronize rcu; | |
1183 | + | |
1184 | + init_completion(&rcu.completion); | |
1185 | + /* Will wake me after RCU finished. */ | |
1186 | + call_rcu_sched(&rcu.head, wakeme_after_rcu); | |
1187 | + /* Wait for it. */ | |
1188 | + wait_for_completion(&rcu.completion); | |
1189 | +} | |
1181 | 1190 | EXPORT_SYMBOL_GPL(__synchronize_sched); |
1182 | 1191 | |
1183 | 1192 | /* |
kernel/rcutorture.c
... | ... | @@ -136,7 +136,7 @@ |
136 | 136 | #endif |
137 | 137 | int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; |
138 | 138 | |
139 | -#define FULLSTOP_SIGNALED 1 /* Bail due to signal. */ | |
139 | +#define FULLSTOP_SHUTDOWN 1 /* Bail due to system shutdown/panic. */ | |
140 | 140 | #define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */ |
141 | 141 | static int fullstop; /* stop generating callbacks at test end. */ |
142 | 142 | DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */ |
... | ... | @@ -151,12 +151,10 @@ |
151 | 151 | { |
152 | 152 | if (fullstop) |
153 | 153 | return NOTIFY_DONE; |
154 | - if (signal_pending(current)) { | |
155 | - mutex_lock(&fullstop_mutex); | |
156 | - if (!ACCESS_ONCE(fullstop)) | |
157 | - fullstop = FULLSTOP_SIGNALED; | |
158 | - mutex_unlock(&fullstop_mutex); | |
159 | - } | |
154 | + mutex_lock(&fullstop_mutex); | |
155 | + if (!fullstop) | |
156 | + fullstop = FULLSTOP_SHUTDOWN; | |
157 | + mutex_unlock(&fullstop_mutex); | |
160 | 158 | return NOTIFY_DONE; |
161 | 159 | } |
162 | 160 | |
... | ... | @@ -624,7 +622,7 @@ |
624 | 622 | rcu_stutter_wait(); |
625 | 623 | } while (!kthread_should_stop() && !fullstop); |
626 | 624 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); |
627 | - while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED) | |
625 | + while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) | |
628 | 626 | schedule_timeout_uninterruptible(1); |
629 | 627 | return 0; |
630 | 628 | } |
... | ... | @@ -649,7 +647,7 @@ |
649 | 647 | } while (!kthread_should_stop() && !fullstop); |
650 | 648 | |
651 | 649 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); |
652 | - while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED) | |
650 | + while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) | |
653 | 651 | schedule_timeout_uninterruptible(1); |
654 | 652 | return 0; |
655 | 653 | } |
... | ... | @@ -759,7 +757,7 @@ |
759 | 757 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); |
760 | 758 | if (irqreader && cur_ops->irqcapable) |
761 | 759 | del_timer_sync(&t); |
762 | - while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED) | |
760 | + while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN) | |
763 | 761 | schedule_timeout_uninterruptible(1); |
764 | 762 | return 0; |
765 | 763 | } |
kernel/rcutree.c
... | ... | @@ -79,7 +79,10 @@ |
79 | 79 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
80 | 80 | |
81 | 81 | #ifdef CONFIG_NO_HZ |
82 | -DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks); | |
82 | +DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | |
83 | + .dynticks_nesting = 1, | |
84 | + .dynticks = 1, | |
85 | +}; | |
83 | 86 | #endif /* #ifdef CONFIG_NO_HZ */ |
84 | 87 | |
85 | 88 | static int blimit = 10; /* Maximum callbacks per softirq. */ |
... | ... | @@ -572,6 +575,7 @@ |
572 | 575 | /* Special-case the common single-level case. */ |
573 | 576 | if (NUM_RCU_NODES == 1) { |
574 | 577 | rnp->qsmask = rnp->qsmaskinit; |
578 | + rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ | |
575 | 579 | spin_unlock_irqrestore(&rnp->lock, flags); |
576 | 580 | return; |
577 | 581 | } |
... | ... | @@ -1379,13 +1383,6 @@ |
1379 | 1383 | |
1380 | 1384 | static void __cpuinit rcu_online_cpu(int cpu) |
1381 | 1385 | { |
1382 | -#ifdef CONFIG_NO_HZ | |
1383 | - struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); | |
1384 | - | |
1385 | - rdtp->dynticks_nesting = 1; | |
1386 | - rdtp->dynticks |= 1; /* need consecutive #s even for hotplug. */ | |
1387 | - rdtp->dynticks_nmi = (rdtp->dynticks_nmi + 1) & ~0x1; | |
1388 | -#endif /* #ifdef CONFIG_NO_HZ */ | |
1389 | 1386 | rcu_init_percpu_data(cpu, &rcu_state); |
1390 | 1387 | rcu_init_percpu_data(cpu, &rcu_bh_state); |
1391 | 1388 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
lib/percpu_counter.c
... | ... | @@ -66,11 +66,11 @@ |
66 | 66 | } |
67 | 67 | EXPORT_SYMBOL(__percpu_counter_sum); |
68 | 68 | |
69 | -static struct lock_class_key percpu_counter_irqsafe; | |
70 | - | |
71 | -int percpu_counter_init(struct percpu_counter *fbc, s64 amount) | |
69 | +int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, | |
70 | + struct lock_class_key *key) | |
72 | 71 | { |
73 | 72 | spin_lock_init(&fbc->lock); |
73 | + lockdep_set_class(&fbc->lock, key); | |
74 | 74 | fbc->count = amount; |
75 | 75 | fbc->counters = alloc_percpu(s32); |
76 | 76 | if (!fbc->counters) |
... | ... | @@ -82,17 +82,7 @@ |
82 | 82 | #endif |
83 | 83 | return 0; |
84 | 84 | } |
85 | -EXPORT_SYMBOL(percpu_counter_init); | |
86 | - | |
87 | -int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount) | |
88 | -{ | |
89 | - int err; | |
90 | - | |
91 | - err = percpu_counter_init(fbc, amount); | |
92 | - if (!err) | |
93 | - lockdep_set_class(&fbc->lock, &percpu_counter_irqsafe); | |
94 | - return err; | |
95 | -} | |
85 | +EXPORT_SYMBOL(__percpu_counter_init); | |
96 | 86 | |
97 | 87 | void percpu_counter_destroy(struct percpu_counter *fbc) |
98 | 88 | { |
lib/proportions.c
... | ... | @@ -83,11 +83,11 @@ |
83 | 83 | pd->index = 0; |
84 | 84 | pd->pg[0].shift = shift; |
85 | 85 | mutex_init(&pd->mutex); |
86 | - err = percpu_counter_init_irq(&pd->pg[0].events, 0); | |
86 | + err = percpu_counter_init(&pd->pg[0].events, 0); | |
87 | 87 | if (err) |
88 | 88 | goto out; |
89 | 89 | |
90 | - err = percpu_counter_init_irq(&pd->pg[1].events, 0); | |
90 | + err = percpu_counter_init(&pd->pg[1].events, 0); | |
91 | 91 | if (err) |
92 | 92 | percpu_counter_destroy(&pd->pg[0].events); |
93 | 93 | |
... | ... | @@ -193,7 +193,7 @@ |
193 | 193 | spin_lock_init(&pl->lock); |
194 | 194 | pl->shift = 0; |
195 | 195 | pl->period = 0; |
196 | - return percpu_counter_init_irq(&pl->events, 0); | |
196 | + return percpu_counter_init(&pl->events, 0); | |
197 | 197 | } |
198 | 198 | |
199 | 199 | void prop_local_destroy_percpu(struct prop_local_percpu *pl) |
lib/swiotlb.c
... | ... | @@ -14,6 +14,7 @@ |
14 | 14 | * 04/07/.. ak Better overflow handling. Assorted fixes. |
15 | 15 | * 05/09/10 linville Add support for syncing ranges, support syncing for |
16 | 16 | * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. |
17 | + * 08/12/11 beckyb Add highmem support | |
17 | 18 | */ |
18 | 19 | |
19 | 20 | #include <linux/cache.h> |
20 | 21 | |
... | ... | @@ -21,8 +22,9 @@ |
21 | 22 | #include <linux/mm.h> |
22 | 23 | #include <linux/module.h> |
23 | 24 | #include <linux/spinlock.h> |
24 | -#include <linux/swiotlb.h> | |
25 | 25 | #include <linux/string.h> |
26 | +#include <linux/swiotlb.h> | |
27 | +#include <linux/pfn.h> | |
26 | 28 | #include <linux/types.h> |
27 | 29 | #include <linux/ctype.h> |
28 | 30 | #include <linux/highmem.h> |
... | ... | @@ -88,10 +90,7 @@ |
88 | 90 | * We need to save away the original address corresponding to a mapped entry |
89 | 91 | * for the sync operations. |
90 | 92 | */ |
91 | -static struct swiotlb_phys_addr { | |
92 | - struct page *page; | |
93 | - unsigned int offset; | |
94 | -} *io_tlb_orig_addr; | |
93 | +static phys_addr_t *io_tlb_orig_addr; | |
95 | 94 | |
96 | 95 | /* |
97 | 96 | * Protect the above data structures in the map and unmap calls |
... | ... | @@ -125,7 +124,7 @@ |
125 | 124 | return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); |
126 | 125 | } |
127 | 126 | |
128 | -dma_addr_t __weak swiotlb_phys_to_bus(phys_addr_t paddr) | |
127 | +dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) | |
129 | 128 | { |
130 | 129 | return paddr; |
131 | 130 | } |
132 | 131 | |
... | ... | @@ -135,9 +134,10 @@ |
135 | 134 | return baddr; |
136 | 135 | } |
137 | 136 | |
138 | -static dma_addr_t swiotlb_virt_to_bus(volatile void *address) | |
137 | +static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, | |
138 | + volatile void *address) | |
139 | 139 | { |
140 | - return swiotlb_phys_to_bus(virt_to_phys(address)); | |
140 | + return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); | |
141 | 141 | } |
142 | 142 | |
143 | 143 | static void *swiotlb_bus_to_virt(dma_addr_t address) |
144 | 144 | |
145 | 145 | |
146 | 146 | |
... | ... | @@ -150,35 +150,18 @@ |
150 | 150 | return 0; |
151 | 151 | } |
152 | 152 | |
153 | -static dma_addr_t swiotlb_sg_to_bus(struct scatterlist *sg) | |
154 | -{ | |
155 | - return swiotlb_phys_to_bus(page_to_phys(sg_page(sg)) + sg->offset); | |
156 | -} | |
157 | - | |
158 | 153 | static void swiotlb_print_info(unsigned long bytes) |
159 | 154 | { |
160 | 155 | phys_addr_t pstart, pend; |
161 | - dma_addr_t bstart, bend; | |
162 | 156 | |
163 | 157 | pstart = virt_to_phys(io_tlb_start); |
164 | 158 | pend = virt_to_phys(io_tlb_end); |
165 | 159 | |
166 | - bstart = swiotlb_phys_to_bus(pstart); | |
167 | - bend = swiotlb_phys_to_bus(pend); | |
168 | - | |
169 | 160 | printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n", |
170 | 161 | bytes >> 20, io_tlb_start, io_tlb_end); |
171 | - if (pstart != bstart || pend != bend) | |
172 | - printk(KERN_INFO "software IO TLB at phys %#llx - %#llx" | |
173 | - " bus %#llx - %#llx\n", | |
174 | - (unsigned long long)pstart, | |
175 | - (unsigned long long)pend, | |
176 | - (unsigned long long)bstart, | |
177 | - (unsigned long long)bend); | |
178 | - else | |
179 | - printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n", | |
180 | - (unsigned long long)pstart, | |
181 | - (unsigned long long)pend); | |
162 | + printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n", | |
163 | + (unsigned long long)pstart, | |
164 | + (unsigned long long)pend); | |
182 | 165 | } |
183 | 166 | |
184 | 167 | /* |
... | ... | @@ -214,7 +197,7 @@ |
214 | 197 | for (i = 0; i < io_tlb_nslabs; i++) |
215 | 198 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); |
216 | 199 | io_tlb_index = 0; |
217 | - io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)); | |
200 | + io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); | |
218 | 201 | |
219 | 202 | /* |
220 | 203 | * Get the overflow emergency buffer |
221 | 204 | |
... | ... | @@ -288,12 +271,14 @@ |
288 | 271 | io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); |
289 | 272 | io_tlb_index = 0; |
290 | 273 | |
291 | - io_tlb_orig_addr = (struct swiotlb_phys_addr *)__get_free_pages(GFP_KERNEL, | |
292 | - get_order(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr))); | |
274 | + io_tlb_orig_addr = (phys_addr_t *) | |
275 | + __get_free_pages(GFP_KERNEL, | |
276 | + get_order(io_tlb_nslabs * | |
277 | + sizeof(phys_addr_t))); | |
293 | 278 | if (!io_tlb_orig_addr) |
294 | 279 | goto cleanup3; |
295 | 280 | |
296 | - memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)); | |
281 | + memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t)); | |
297 | 282 | |
298 | 283 | /* |
299 | 284 | * Get the overflow emergency buffer |
... | ... | @@ -308,8 +293,8 @@ |
308 | 293 | return 0; |
309 | 294 | |
310 | 295 | cleanup4: |
311 | - free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * | |
312 | - sizeof(char *))); | |
296 | + free_pages((unsigned long)io_tlb_orig_addr, | |
297 | + get_order(io_tlb_nslabs * sizeof(phys_addr_t))); | |
313 | 298 | io_tlb_orig_addr = NULL; |
314 | 299 | cleanup3: |
315 | 300 | free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * |
316 | 301 | |
317 | 302 | |
318 | 303 | |
319 | 304 | |
320 | 305 | |
321 | 306 | |
322 | 307 | |
323 | 308 | |
324 | 309 | |
... | ... | @@ -340,51 +325,44 @@ |
340 | 325 | return addr >= io_tlb_start && addr < io_tlb_end; |
341 | 326 | } |
342 | 327 | |
343 | -static struct swiotlb_phys_addr swiotlb_bus_to_phys_addr(char *dma_addr) | |
328 | +/* | |
329 | + * Bounce: copy the swiotlb buffer back to the original dma location | |
330 | + */ | |
331 | +static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, | |
332 | + enum dma_data_direction dir) | |
344 | 333 | { |
345 | - int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | |
346 | - struct swiotlb_phys_addr buffer = io_tlb_orig_addr[index]; | |
347 | - buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1); | |
348 | - buffer.page += buffer.offset >> PAGE_SHIFT; | |
349 | - buffer.offset &= PAGE_SIZE - 1; | |
350 | - return buffer; | |
351 | -} | |
334 | + unsigned long pfn = PFN_DOWN(phys); | |
352 | 335 | |
353 | -static void | |
354 | -__sync_single(struct swiotlb_phys_addr buffer, char *dma_addr, size_t size, int dir) | |
355 | -{ | |
356 | - if (PageHighMem(buffer.page)) { | |
357 | - size_t len, bytes; | |
358 | - char *dev, *host, *kmp; | |
336 | + if (PageHighMem(pfn_to_page(pfn))) { | |
337 | + /* The buffer does not have a mapping. Map it in and copy */ | |
338 | + unsigned int offset = phys & ~PAGE_MASK; | |
339 | + char *buffer; | |
340 | + unsigned int sz = 0; | |
341 | + unsigned long flags; | |
359 | 342 | |
360 | - len = size; | |
361 | - while (len != 0) { | |
362 | - unsigned long flags; | |
343 | + while (size) { | |
344 | + sz = min(PAGE_SIZE - offset, size); | |
363 | 345 | |
364 | - bytes = len; | |
365 | - if ((bytes + buffer.offset) > PAGE_SIZE) | |
366 | - bytes = PAGE_SIZE - buffer.offset; | |
367 | - local_irq_save(flags); /* protects KM_BOUNCE_READ */ | |
368 | - kmp = kmap_atomic(buffer.page, KM_BOUNCE_READ); | |
369 | - dev = dma_addr + size - len; | |
370 | - host = kmp + buffer.offset; | |
371 | - if (dir == DMA_FROM_DEVICE) | |
372 | - memcpy(host, dev, bytes); | |
346 | + local_irq_save(flags); | |
347 | + buffer = kmap_atomic(pfn_to_page(pfn), | |
348 | + KM_BOUNCE_READ); | |
349 | + if (dir == DMA_TO_DEVICE) | |
350 | + memcpy(dma_addr, buffer + offset, sz); | |
373 | 351 | else |
374 | - memcpy(dev, host, bytes); | |
375 | - kunmap_atomic(kmp, KM_BOUNCE_READ); | |
352 | + memcpy(buffer + offset, dma_addr, sz); | |
353 | + kunmap_atomic(buffer, KM_BOUNCE_READ); | |
376 | 354 | local_irq_restore(flags); |
377 | - len -= bytes; | |
378 | - buffer.page++; | |
379 | - buffer.offset = 0; | |
355 | + | |
356 | + size -= sz; | |
357 | + pfn++; | |
358 | + dma_addr += sz; | |
359 | + offset = 0; | |
380 | 360 | } |
381 | 361 | } else { |
382 | - void *v = page_address(buffer.page) + buffer.offset; | |
383 | - | |
384 | 362 | if (dir == DMA_TO_DEVICE) |
385 | - memcpy(dma_addr, v, size); | |
363 | + memcpy(dma_addr, phys_to_virt(phys), size); | |
386 | 364 | else |
387 | - memcpy(v, dma_addr, size); | |
365 | + memcpy(phys_to_virt(phys), dma_addr, size); | |
388 | 366 | } |
389 | 367 | } |
390 | 368 | |
... | ... | @@ -392,7 +370,7 @@ |
392 | 370 | * Allocates bounce buffer and returns its kernel virtual address. |
393 | 371 | */ |
394 | 372 | static void * |
395 | -map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, int dir) | |
373 | +map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir) | |
396 | 374 | { |
397 | 375 | unsigned long flags; |
398 | 376 | char *dma_addr; |
399 | 377 | |
... | ... | @@ -402,10 +380,9 @@ |
402 | 380 | unsigned long mask; |
403 | 381 | unsigned long offset_slots; |
404 | 382 | unsigned long max_slots; |
405 | - struct swiotlb_phys_addr slot_buf; | |
406 | 383 | |
407 | 384 | mask = dma_get_seg_boundary(hwdev); |
408 | - start_dma_addr = swiotlb_virt_to_bus(io_tlb_start) & mask; | |
385 | + start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask; | |
409 | 386 | |
410 | 387 | offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
411 | 388 | |
412 | 389 | |
... | ... | @@ -487,15 +464,10 @@ |
487 | 464 | * This is needed when we sync the memory. Then we sync the buffer if |
488 | 465 | * needed. |
489 | 466 | */ |
490 | - slot_buf = buffer; | |
491 | - for (i = 0; i < nslots; i++) { | |
492 | - slot_buf.page += slot_buf.offset >> PAGE_SHIFT; | |
493 | - slot_buf.offset &= PAGE_SIZE - 1; | |
494 | - io_tlb_orig_addr[index+i] = slot_buf; | |
495 | - slot_buf.offset += 1 << IO_TLB_SHIFT; | |
496 | - } | |
467 | + for (i = 0; i < nslots; i++) | |
468 | + io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT); | |
497 | 469 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) |
498 | - __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); | |
470 | + swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE); | |
499 | 471 | |
500 | 472 | return dma_addr; |
501 | 473 | } |
502 | 474 | |
... | ... | @@ -509,17 +481,13 @@ |
509 | 481 | unsigned long flags; |
510 | 482 | int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
511 | 483 | int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; |
512 | - struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr); | |
484 | + phys_addr_t phys = io_tlb_orig_addr[index]; | |
513 | 485 | |
514 | 486 | /* |
515 | 487 | * First, sync the memory before unmapping the entry |
516 | 488 | */ |
517 | - if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)) | |
518 | - /* | |
519 | - * bounce... copy the data back into the original buffer * and | |
520 | - * delete the bounce buffer. | |
521 | - */ | |
522 | - __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); | |
489 | + if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) | |
490 | + swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE); | |
523 | 491 | |
524 | 492 | /* |
525 | 493 | * Return the buffer to the free list by setting the corresponding |
526 | 494 | |
527 | 495 | |
528 | 496 | |
... | ... | @@ -551,18 +519,21 @@ |
551 | 519 | sync_single(struct device *hwdev, char *dma_addr, size_t size, |
552 | 520 | int dir, int target) |
553 | 521 | { |
554 | - struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr); | |
522 | + int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; | |
523 | + phys_addr_t phys = io_tlb_orig_addr[index]; | |
555 | 524 | |
525 | + phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1)); | |
526 | + | |
556 | 527 | switch (target) { |
557 | 528 | case SYNC_FOR_CPU: |
558 | 529 | if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) |
559 | - __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); | |
530 | + swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE); | |
560 | 531 | else |
561 | 532 | BUG_ON(dir != DMA_TO_DEVICE); |
562 | 533 | break; |
563 | 534 | case SYNC_FOR_DEVICE: |
564 | 535 | if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) |
565 | - __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); | |
536 | + swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE); | |
566 | 537 | else |
567 | 538 | BUG_ON(dir != DMA_FROM_DEVICE); |
568 | 539 | break; |
... | ... | @@ -584,7 +555,9 @@ |
584 | 555 | dma_mask = hwdev->coherent_dma_mask; |
585 | 556 | |
586 | 557 | ret = (void *)__get_free_pages(flags, order); |
587 | - if (ret && !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(ret), size)) { | |
558 | + if (ret && | |
559 | + !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret), | |
560 | + size)) { | |
588 | 561 | /* |
589 | 562 | * The allocated memory isn't reachable by the device. |
590 | 563 | * Fall back on swiotlb_map_single(). |
591 | 564 | |
... | ... | @@ -599,16 +572,13 @@ |
599 | 572 | * swiotlb_map_single(), which will grab memory from |
600 | 573 | * the lowest available address range. |
601 | 574 | */ |
602 | - struct swiotlb_phys_addr buffer; | |
603 | - buffer.page = virt_to_page(NULL); | |
604 | - buffer.offset = 0; | |
605 | - ret = map_single(hwdev, buffer, size, DMA_FROM_DEVICE); | |
575 | + ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); | |
606 | 576 | if (!ret) |
607 | 577 | return NULL; |
608 | 578 | } |
609 | 579 | |
610 | 580 | memset(ret, 0, size); |
611 | - dev_addr = swiotlb_virt_to_bus(ret); | |
581 | + dev_addr = swiotlb_virt_to_bus(hwdev, ret); | |
612 | 582 | |
613 | 583 | /* Confirm address can be DMA'd by device */ |
614 | 584 | if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { |
... | ... | @@ -623,6 +593,7 @@ |
623 | 593 | *dma_handle = dev_addr; |
624 | 594 | return ret; |
625 | 595 | } |
596 | +EXPORT_SYMBOL(swiotlb_alloc_coherent); | |
626 | 597 | |
627 | 598 | void |
628 | 599 | swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
... | ... | @@ -635,6 +606,7 @@ |
635 | 606 | /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |
636 | 607 | unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); |
637 | 608 | } |
609 | +EXPORT_SYMBOL(swiotlb_free_coherent); | |
638 | 610 | |
639 | 611 | static void |
640 | 612 | swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) |
641 | 613 | |
... | ... | @@ -668,9 +640,8 @@ |
668 | 640 | swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, |
669 | 641 | int dir, struct dma_attrs *attrs) |
670 | 642 | { |
671 | - dma_addr_t dev_addr = swiotlb_virt_to_bus(ptr); | |
643 | + dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr); | |
672 | 644 | void *map; |
673 | - struct swiotlb_phys_addr buffer; | |
674 | 645 | |
675 | 646 | BUG_ON(dir == DMA_NONE); |
676 | 647 | /* |
677 | 648 | |
... | ... | @@ -685,15 +656,13 @@ |
685 | 656 | /* |
686 | 657 | * Oh well, have to allocate and map a bounce buffer. |
687 | 658 | */ |
688 | - buffer.page = virt_to_page(ptr); | |
689 | - buffer.offset = (unsigned long)ptr & ~PAGE_MASK; | |
690 | - map = map_single(hwdev, buffer, size, dir); | |
659 | + map = map_single(hwdev, virt_to_phys(ptr), size, dir); | |
691 | 660 | if (!map) { |
692 | 661 | swiotlb_full(hwdev, size, dir, 1); |
693 | 662 | map = io_tlb_overflow_buffer; |
694 | 663 | } |
695 | 664 | |
696 | - dev_addr = swiotlb_virt_to_bus(map); | |
665 | + dev_addr = swiotlb_virt_to_bus(hwdev, map); | |
697 | 666 | |
698 | 667 | /* |
699 | 668 | * Ensure that the address returned is DMA'ble |
... | ... | @@ -710,6 +679,7 @@ |
710 | 679 | { |
711 | 680 | return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); |
712 | 681 | } |
682 | +EXPORT_SYMBOL(swiotlb_map_single); | |
713 | 683 | |
714 | 684 | /* |
715 | 685 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
... | ... | @@ -739,6 +709,8 @@ |
739 | 709 | { |
740 | 710 | return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); |
741 | 711 | } |
712 | +EXPORT_SYMBOL(swiotlb_unmap_single); | |
713 | + | |
742 | 714 | /* |
743 | 715 | * Make physical memory consistent for a single streaming mode DMA translation |
744 | 716 | * after a transfer. |
... | ... | @@ -768,6 +740,7 @@ |
768 | 740 | { |
769 | 741 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); |
770 | 742 | } |
743 | +EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); | |
771 | 744 | |
772 | 745 | void |
773 | 746 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, |
... | ... | @@ -775,6 +748,7 @@ |
775 | 748 | { |
776 | 749 | swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); |
777 | 750 | } |
751 | +EXPORT_SYMBOL(swiotlb_sync_single_for_device); | |
778 | 752 | |
779 | 753 | /* |
780 | 754 | * Same as above, but for a sub-range of the mapping. |
... | ... | @@ -800,6 +774,7 @@ |
800 | 774 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, |
801 | 775 | SYNC_FOR_CPU); |
802 | 776 | } |
777 | +EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); | |
803 | 778 | |
804 | 779 | void |
805 | 780 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, |
806 | 781 | |
... | ... | @@ -808,9 +783,8 @@ |
808 | 783 | swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, |
809 | 784 | SYNC_FOR_DEVICE); |
810 | 785 | } |
786 | +EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | |
811 | 787 | |
812 | -void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int, | |
813 | - struct dma_attrs *); | |
814 | 788 | /* |
815 | 789 | * Map a set of buffers described by scatterlist in streaming mode for DMA. |
816 | 790 | * This is the scatter-gather version of the above swiotlb_map_single |
817 | 791 | |
818 | 792 | |
... | ... | @@ -832,20 +806,18 @@ |
832 | 806 | int dir, struct dma_attrs *attrs) |
833 | 807 | { |
834 | 808 | struct scatterlist *sg; |
835 | - struct swiotlb_phys_addr buffer; | |
836 | - dma_addr_t dev_addr; | |
837 | 809 | int i; |
838 | 810 | |
839 | 811 | BUG_ON(dir == DMA_NONE); |
840 | 812 | |
841 | 813 | for_each_sg(sgl, sg, nelems, i) { |
842 | - dev_addr = swiotlb_sg_to_bus(sg); | |
843 | - if (range_needs_mapping(sg_virt(sg), sg->length) || | |
814 | + void *addr = sg_virt(sg); | |
815 | + dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, addr); | |
816 | + | |
817 | + if (range_needs_mapping(addr, sg->length) || | |
844 | 818 | address_needs_mapping(hwdev, dev_addr, sg->length)) { |
845 | - void *map; | |
846 | - buffer.page = sg_page(sg); | |
847 | - buffer.offset = sg->offset; | |
848 | - map = map_single(hwdev, buffer, sg->length, dir); | |
819 | + void *map = map_single(hwdev, sg_phys(sg), | |
820 | + sg->length, dir); | |
849 | 821 | if (!map) { |
850 | 822 | /* Don't panic here, we expect map_sg users |
851 | 823 | to do proper error handling. */ |
... | ... | @@ -855,7 +827,7 @@ |
855 | 827 | sgl[0].dma_length = 0; |
856 | 828 | return 0; |
857 | 829 | } |
858 | - sg->dma_address = swiotlb_virt_to_bus(map); | |
830 | + sg->dma_address = swiotlb_virt_to_bus(hwdev, map); | |
859 | 831 | } else |
860 | 832 | sg->dma_address = dev_addr; |
861 | 833 | sg->dma_length = sg->length; |
... | ... | @@ -870,6 +842,7 @@ |
870 | 842 | { |
871 | 843 | return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); |
872 | 844 | } |
845 | +EXPORT_SYMBOL(swiotlb_map_sg); | |
873 | 846 | |
874 | 847 | /* |
875 | 848 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules |
876 | 849 | |
... | ... | @@ -885,11 +858,11 @@ |
885 | 858 | BUG_ON(dir == DMA_NONE); |
886 | 859 | |
887 | 860 | for_each_sg(sgl, sg, nelems, i) { |
888 | - if (sg->dma_address != swiotlb_sg_to_bus(sg)) | |
861 | + if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg))) | |
889 | 862 | unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), |
890 | 863 | sg->dma_length, dir); |
891 | 864 | else if (dir == DMA_FROM_DEVICE) |
892 | - dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); | |
865 | + dma_mark_clean(sg_virt(sg), sg->dma_length); | |
893 | 866 | } |
894 | 867 | } |
895 | 868 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); |
... | ... | @@ -900,6 +873,7 @@ |
900 | 873 | { |
901 | 874 | return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); |
902 | 875 | } |
876 | +EXPORT_SYMBOL(swiotlb_unmap_sg); | |
903 | 877 | |
904 | 878 | /* |
905 | 879 | * Make physical memory consistent for a set of streaming mode DMA translations |
906 | 880 | |
... | ... | @@ -918,11 +892,11 @@ |
918 | 892 | BUG_ON(dir == DMA_NONE); |
919 | 893 | |
920 | 894 | for_each_sg(sgl, sg, nelems, i) { |
921 | - if (sg->dma_address != swiotlb_sg_to_bus(sg)) | |
895 | + if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg))) | |
922 | 896 | sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), |
923 | 897 | sg->dma_length, dir, target); |
924 | 898 | else if (dir == DMA_FROM_DEVICE) |
925 | - dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); | |
899 | + dma_mark_clean(sg_virt(sg), sg->dma_length); | |
926 | 900 | } |
927 | 901 | } |
928 | 902 | |
... | ... | @@ -932,6 +906,7 @@ |
932 | 906 | { |
933 | 907 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); |
934 | 908 | } |
909 | +EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); | |
935 | 910 | |
936 | 911 | void |
937 | 912 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
938 | 913 | |
939 | 914 | |
... | ... | @@ -939,12 +914,14 @@ |
939 | 914 | { |
940 | 915 | swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); |
941 | 916 | } |
917 | +EXPORT_SYMBOL(swiotlb_sync_sg_for_device); | |
942 | 918 | |
943 | 919 | int |
944 | 920 | swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) |
945 | 921 | { |
946 | - return (dma_addr == swiotlb_virt_to_bus(io_tlb_overflow_buffer)); | |
922 | + return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer)); | |
947 | 923 | } |
924 | +EXPORT_SYMBOL(swiotlb_dma_mapping_error); | |
948 | 925 | |
949 | 926 | /* |
950 | 927 | * Return whether the given device DMA address mask can be supported |
951 | 928 | |
... | ... | @@ -955,21 +932,7 @@ |
955 | 932 | int |
956 | 933 | swiotlb_dma_supported(struct device *hwdev, u64 mask) |
957 | 934 | { |
958 | - return swiotlb_virt_to_bus(io_tlb_end - 1) <= mask; | |
935 | + return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask; | |
959 | 936 | } |
960 | - | |
961 | -EXPORT_SYMBOL(swiotlb_map_single); | |
962 | -EXPORT_SYMBOL(swiotlb_unmap_single); | |
963 | -EXPORT_SYMBOL(swiotlb_map_sg); | |
964 | -EXPORT_SYMBOL(swiotlb_unmap_sg); | |
965 | -EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); | |
966 | -EXPORT_SYMBOL(swiotlb_sync_single_for_device); | |
967 | -EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); | |
968 | -EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); | |
969 | -EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); | |
970 | -EXPORT_SYMBOL(swiotlb_sync_sg_for_device); | |
971 | -EXPORT_SYMBOL(swiotlb_dma_mapping_error); | |
972 | -EXPORT_SYMBOL(swiotlb_alloc_coherent); | |
973 | -EXPORT_SYMBOL(swiotlb_free_coherent); | |
974 | 937 | EXPORT_SYMBOL(swiotlb_dma_supported); |
mm/backing-dev.c