Commit 715b49ef2de6fcead0776d9349071670282faf65
Committed by
Linus Torvalds
1 parent
3213e913b0
Exists in
master
and in
4 other branches
[PATCH] EDAC: atomic scrub operations
EDAC requires a way to scrub memory if an ECC error is found and the chipset does not do the work automatically. That means rewriting memory locations atomically with respect to all CPUs _and_ bus masters. That means we can't use atomic_add(foo, 0) as it gets optimised for non-SMP This adds a function to include/asm-foo/atomic.h for the platforms currently supported which implements a scrub of a mapped block. It also adjusts a few other files include order where atomic.h is included before types.h as this now causes an error as atomic_scrub uses u32. Signed-off-by: Alan Cox <alan@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 7 changed files with 29 additions and 3 deletions Side-by-side Diff
drivers/md/kcopyd.c
fs/nfsctl.c
include/asm-i386/atomic.h
... | ... | @@ -255,6 +255,18 @@ |
255 | 255 | #define smp_mb__before_atomic_inc() barrier() |
256 | 256 | #define smp_mb__after_atomic_inc() barrier() |
257 | 257 | |
258 | +/* ECC atomic, DMA, SMP and interrupt safe scrub function */ | |
259 | + | |
260 | +static __inline__ void atomic_scrub(unsigned long *virt_addr, u32 size) | |
261 | +{ | |
262 | + u32 i; | |
263 | + for (i = 0; i < size / 4; i++, virt_addr++) | |
264 | + /* Very carefully read and write to memory atomically | |
265 | + * so we are interrupt, DMA and SMP safe. | |
266 | + */ | |
267 | + __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); | |
268 | +} | |
269 | + | |
258 | 270 | #include <asm-generic/atomic.h> |
259 | 271 | #endif |
include/asm-x86_64/atomic.h
... | ... | @@ -426,6 +426,18 @@ |
426 | 426 | #define smp_mb__before_atomic_inc() barrier() |
427 | 427 | #define smp_mb__after_atomic_inc() barrier() |
428 | 428 | |
429 | +/* ECC atomic, DMA, SMP and interrupt safe scrub function */ | |
430 | + | |
431 | +static __inline__ void atomic_scrub(u32 *virt_addr, u32 size) | |
432 | +{ | |
433 | + u32 i; | |
434 | + for (i = 0; i < size / 4; i++, virt_addr++) | |
435 | + /* Very carefully read and write to memory atomically | |
436 | + * so we are interrupt, DMA and SMP safe. | |
437 | + */ | |
438 | + __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); | |
439 | +} | |
440 | + | |
429 | 441 | #include <asm-generic/atomic.h> |
430 | 442 | #endif |
kernel/audit.c
kernel/auditsc.c
net/ipv4/raw.c
... | ... | @@ -40,12 +40,12 @@ |
40 | 40 | */ |
41 | 41 | |
42 | 42 | #include <linux/config.h> |
43 | +#include <linux/types.h> | |
43 | 44 | #include <asm/atomic.h> |
44 | 45 | #include <asm/byteorder.h> |
45 | 46 | #include <asm/current.h> |
46 | 47 | #include <asm/uaccess.h> |
47 | 48 | #include <asm/ioctls.h> |
48 | -#include <linux/types.h> | |
49 | 49 | #include <linux/stddef.h> |
50 | 50 | #include <linux/slab.h> |
51 | 51 | #include <linux/errno.h> |