Commit 933393f58fef9963eac61db8093689544e29a600

Authored by Christoph Lameter
Committed by Tejun Heo
1 parent ecefc36b41

percpu: Remove irqsafe_cpu_xxx variants

We simply say that regular this_cpu use must be safe regardless of
preemption and interrupt state.  That has no material change for x86
and s390 implementations of this_cpu operations.  However, arches that
do not provide their own implementation for this_cpu operations will
now get code generated that disables interrupts instead of preemption.

-tj: This is part of on-going percpu API cleanup.  For detailed
     discussion of the subject, please refer to the following thread.

     http://thread.gmane.org/gmane.linux.kernel/1222078

Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
LKML-Reference: <alpine.DEB.2.00.1112221154380.11787@router.home>

Showing 9 changed files with 62 additions and 236 deletions Side-by-side Diff

arch/s390/include/asm/percpu.h
... ... @@ -19,7 +19,7 @@
19 19 #define ARCH_NEEDS_WEAK_PER_CPU
20 20 #endif
21 21  
22   -#define arch_irqsafe_cpu_to_op(pcp, val, op) \
  22 +#define arch_this_cpu_to_op(pcp, val, op) \
23 23 do { \
24 24 typedef typeof(pcp) pcp_op_T__; \
25 25 pcp_op_T__ old__, new__, prev__; \
26 26  
27 27  
28 28  
29 29  
... ... @@ -41,27 +41,27 @@
41 41 preempt_enable(); \
42 42 } while (0)
43 43  
44   -#define irqsafe_cpu_add_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
45   -#define irqsafe_cpu_add_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
46   -#define irqsafe_cpu_add_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
47   -#define irqsafe_cpu_add_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
  44 +#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
  45 +#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
  46 +#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
  47 +#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
48 48  
49   -#define irqsafe_cpu_and_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
50   -#define irqsafe_cpu_and_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
51   -#define irqsafe_cpu_and_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
52   -#define irqsafe_cpu_and_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
  49 +#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &)
  50 +#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &)
  51 +#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &)
  52 +#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &)
53 53  
54   -#define irqsafe_cpu_or_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
55   -#define irqsafe_cpu_or_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
56   -#define irqsafe_cpu_or_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
57   -#define irqsafe_cpu_or_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
  54 +#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |)
  55 +#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |)
  56 +#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |)
  57 +#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |)
58 58  
59   -#define irqsafe_cpu_xor_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
60   -#define irqsafe_cpu_xor_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
61   -#define irqsafe_cpu_xor_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
62   -#define irqsafe_cpu_xor_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
  59 +#define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
  60 +#define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
  61 +#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
  62 +#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
63 63  
64   -#define arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) \
  64 +#define arch_this_cpu_cmpxchg(pcp, oval, nval) \
65 65 ({ \
66 66 typedef typeof(pcp) pcp_op_T__; \
67 67 pcp_op_T__ ret__; \
... ... @@ -79,10 +79,10 @@
79 79 ret__; \
80 80 })
81 81  
82   -#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
83   -#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
84   -#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
85   -#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
  82 +#define this_cpu_cmpxchg_1(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
  83 +#define this_cpu_cmpxchg_2(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
  84 +#define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
  85 +#define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
86 86  
87 87 #include <asm-generic/percpu.h>
88 88  
arch/x86/include/asm/percpu.h
... ... @@ -414,22 +414,6 @@
414 414 #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
415 415 #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
416 416  
417   -#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
418   -#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
419   -#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
420   -#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
421   -#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
422   -#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
423   -#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
424   -#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
425   -#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
426   -#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
427   -#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
428   -#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
429   -#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
430   -#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
431   -#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
432   -
433 417 #ifndef CONFIG_M386
434 418 #define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
435 419 #define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
... ... @@ -445,9 +429,6 @@
445 429 #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
446 430 #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
447 431  
448   -#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
449   -#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
450   -#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
451 432 #endif /* !CONFIG_M386 */
452 433  
453 434 #ifdef CONFIG_X86_CMPXCHG64
... ... @@ -467,7 +448,6 @@
467 448  
468 449 #define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
469 450 #define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
470   -#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2)
471 451 #endif /* CONFIG_X86_CMPXCHG64 */
472 452  
473 453 /*
... ... @@ -495,13 +475,6 @@
495 475 #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
496 476 #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
497 477  
498   -#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
499   -#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
500   -#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
501   -#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
502   -#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
503   -#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
504   -
505 478 /*
506 479 * Pretty complex macro to generate cmpxchg16 instruction. The instruction
507 480 * is not supported on early AMD64 processors so we must be able to emulate
... ... @@ -532,7 +505,6 @@
532 505  
533 506 #define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
534 507 #define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
535   -#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2)
536 508  
537 509 #endif
538 510  
include/linux/netdevice.h
... ... @@ -2115,7 +2115,7 @@
2115 2115 */
2116 2116 static inline void dev_put(struct net_device *dev)
2117 2117 {
2118   - irqsafe_cpu_dec(*dev->pcpu_refcnt);
  2118 + this_cpu_dec(*dev->pcpu_refcnt);
2119 2119 }
2120 2120  
2121 2121 /**
... ... @@ -2126,7 +2126,7 @@
2126 2126 */
2127 2127 static inline void dev_hold(struct net_device *dev)
2128 2128 {
2129   - irqsafe_cpu_inc(*dev->pcpu_refcnt);
  2129 + this_cpu_inc(*dev->pcpu_refcnt);
2130 2130 }
2131 2131  
2132 2132 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
include/linux/netfilter/x_tables.h
... ... @@ -471,7 +471,7 @@
471 471 *
472 472 * Begin packet processing : all readers must wait the end
473 473 * 1) Must be called with preemption disabled
474   - * 2) softirqs must be disabled too (or we should use irqsafe_cpu_add())
  474 + * 2) softirqs must be disabled too (or we should use this_cpu_add())
475 475 * Returns :
476 476 * 1 if no recursion on this cpu
477 477 * 0 if recursion detected
... ... @@ -503,7 +503,7 @@
503 503 *
504 504 * End packet processing : all readers can proceed
505 505 * 1) Must be called with preemption disabled
506   - * 2) softirqs must be disabled too (or we should use irqsafe_cpu_add())
  506 + * 2) softirqs must be disabled too (or we should use this_cpu_add())
507 507 */
508 508 static inline void xt_write_recseq_end(unsigned int addend)
509 509 {
include/linux/percpu.h
... ... @@ -172,10 +172,10 @@
172 172 * equal char, int or long. percpu_read() evaluates to a lvalue and
173 173 * all others to void.
174 174 *
175   - * These operations are guaranteed to be atomic w.r.t. preemption.
176   - * The generic versions use plain get/put_cpu_var(). Archs are
  175 + * These operations are guaranteed to be atomic.
  176 + * The generic versions disable interrupts. Archs are
177 177 * encouraged to implement single-instruction alternatives which don't
178   - * require preemption protection.
  178 + * require protection.
179 179 */
180 180 #ifndef percpu_read
181 181 # define percpu_read(var) \
182 182  
... ... @@ -347,9 +347,10 @@
347 347  
348 348 #define _this_cpu_generic_to_op(pcp, val, op) \
349 349 do { \
350   - preempt_disable(); \
  350 + unsigned long flags; \
  351 + local_irq_save(flags); \
351 352 *__this_cpu_ptr(&(pcp)) op val; \
352   - preempt_enable(); \
  353 + local_irq_restore(flags); \
353 354 } while (0)
354 355  
355 356 #ifndef this_cpu_write
356 357  
... ... @@ -447,10 +448,11 @@
447 448 #define _this_cpu_generic_add_return(pcp, val) \
448 449 ({ \
449 450 typeof(pcp) ret__; \
450   - preempt_disable(); \
  451 + unsigned long flags; \
  452 + local_irq_save(flags); \
451 453 __this_cpu_add(pcp, val); \
452 454 ret__ = __this_cpu_read(pcp); \
453   - preempt_enable(); \
  455 + local_irq_restore(flags); \
454 456 ret__; \
455 457 })
456 458  
457 459  
... ... @@ -476,10 +478,11 @@
476 478  
477 479 #define _this_cpu_generic_xchg(pcp, nval) \
478 480 ({ typeof(pcp) ret__; \
479   - preempt_disable(); \
  481 + unsigned long flags; \
  482 + local_irq_save(flags); \
480 483 ret__ = __this_cpu_read(pcp); \
481 484 __this_cpu_write(pcp, nval); \
482   - preempt_enable(); \
  485 + local_irq_restore(flags); \
483 486 ret__; \
484 487 })
485 488  
486 489  
... ... @@ -501,12 +504,14 @@
501 504 #endif
502 505  
503 506 #define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
504   -({ typeof(pcp) ret__; \
505   - preempt_disable(); \
  507 +({ \
  508 + typeof(pcp) ret__; \
  509 + unsigned long flags; \
  510 + local_irq_save(flags); \
506 511 ret__ = __this_cpu_read(pcp); \
507 512 if (ret__ == (oval)) \
508 513 __this_cpu_write(pcp, nval); \
509   - preempt_enable(); \
  514 + local_irq_restore(flags); \
510 515 ret__; \
511 516 })
512 517  
513 518  
... ... @@ -538,10 +543,11 @@
538 543 #define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
539 544 ({ \
540 545 int ret__; \
541   - preempt_disable(); \
  546 + unsigned long flags; \
  547 + local_irq_save(flags); \
542 548 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
543 549 oval1, oval2, nval1, nval2); \
544   - preempt_enable(); \
  550 + local_irq_restore(flags); \
545 551 ret__; \
546 552 })
547 553  
548 554  
... ... @@ -567,9 +573,9 @@
567 573 #endif
568 574  
569 575 /*
570   - * Generic percpu operations that do not require preemption handling.
  576 + * Generic percpu operations for context that are safe from preemption/interrupts.
571 577 * Either we do not care about races or the caller has the
572   - * responsibility of handling preemptions issues. Arch code can still
  578 + * responsibility of handling preemption/interrupt issues. Arch code can still
573 579 * override these instructions since the arch per cpu code may be more
574 580 * efficient and may actually get race freeness for free (that is the
575 581 * case for x86 for example).
... ... @@ -800,158 +806,6 @@
800 806 # endif
801 807 # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
802 808 __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
803   -#endif
804   -
805   -/*
806   - * IRQ safe versions of the per cpu RMW operations. Note that these operations
807   - * are *not* safe against modification of the same variable from another
808   - * processors (which one gets when using regular atomic operations)
809   - * They are guaranteed to be atomic vs. local interrupts and
810   - * preemption only.
811   - */
812   -#define irqsafe_cpu_generic_to_op(pcp, val, op) \
813   -do { \
814   - unsigned long flags; \
815   - local_irq_save(flags); \
816   - *__this_cpu_ptr(&(pcp)) op val; \
817   - local_irq_restore(flags); \
818   -} while (0)
819   -
820   -#ifndef irqsafe_cpu_add
821   -# ifndef irqsafe_cpu_add_1
822   -# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
823   -# endif
824   -# ifndef irqsafe_cpu_add_2
825   -# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
826   -# endif
827   -# ifndef irqsafe_cpu_add_4
828   -# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
829   -# endif
830   -# ifndef irqsafe_cpu_add_8
831   -# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
832   -# endif
833   -# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
834   -#endif
835   -
836   -#ifndef irqsafe_cpu_sub
837   -# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
838   -#endif
839   -
840   -#ifndef irqsafe_cpu_inc
841   -# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
842   -#endif
843   -
844   -#ifndef irqsafe_cpu_dec
845   -# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
846   -#endif
847   -
848   -#ifndef irqsafe_cpu_and
849   -# ifndef irqsafe_cpu_and_1
850   -# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
851   -# endif
852   -# ifndef irqsafe_cpu_and_2
853   -# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
854   -# endif
855   -# ifndef irqsafe_cpu_and_4
856   -# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
857   -# endif
858   -# ifndef irqsafe_cpu_and_8
859   -# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
860   -# endif
861   -# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
862   -#endif
863   -
864   -#ifndef irqsafe_cpu_or
865   -# ifndef irqsafe_cpu_or_1
866   -# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
867   -# endif
868   -# ifndef irqsafe_cpu_or_2
869   -# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
870   -# endif
871   -# ifndef irqsafe_cpu_or_4
872   -# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
873   -# endif
874   -# ifndef irqsafe_cpu_or_8
875   -# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
876   -# endif
877   -# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
878   -#endif
879   -
880   -#ifndef irqsafe_cpu_xor
881   -# ifndef irqsafe_cpu_xor_1
882   -# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
883   -# endif
884   -# ifndef irqsafe_cpu_xor_2
885   -# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
886   -# endif
887   -# ifndef irqsafe_cpu_xor_4
888   -# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
889   -# endif
890   -# ifndef irqsafe_cpu_xor_8
891   -# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
892   -# endif
893   -# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
894   -#endif
895   -
896   -#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \
897   -({ \
898   - typeof(pcp) ret__; \
899   - unsigned long flags; \
900   - local_irq_save(flags); \
901   - ret__ = __this_cpu_read(pcp); \
902   - if (ret__ == (oval)) \
903   - __this_cpu_write(pcp, nval); \
904   - local_irq_restore(flags); \
905   - ret__; \
906   -})
907   -
908   -#ifndef irqsafe_cpu_cmpxchg
909   -# ifndef irqsafe_cpu_cmpxchg_1
910   -# define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
911   -# endif
912   -# ifndef irqsafe_cpu_cmpxchg_2
913   -# define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
914   -# endif
915   -# ifndef irqsafe_cpu_cmpxchg_4
916   -# define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
917   -# endif
918   -# ifndef irqsafe_cpu_cmpxchg_8
919   -# define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
920   -# endif
921   -# define irqsafe_cpu_cmpxchg(pcp, oval, nval) \
922   - __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
923   -#endif
924   -
925   -#define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
926   -({ \
927   - int ret__; \
928   - unsigned long flags; \
929   - local_irq_save(flags); \
930   - ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
931   - oval1, oval2, nval1, nval2); \
932   - local_irq_restore(flags); \
933   - ret__; \
934   -})
935   -
936   -#ifndef irqsafe_cpu_cmpxchg_double
937   -# ifndef irqsafe_cpu_cmpxchg_double_1
938   -# define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
939   - irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
940   -# endif
941   -# ifndef irqsafe_cpu_cmpxchg_double_2
942   -# define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
943   - irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
944   -# endif
945   -# ifndef irqsafe_cpu_cmpxchg_double_4
946   -# define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
947   - irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
948   -# endif
949   -# ifndef irqsafe_cpu_cmpxchg_double_8
950   -# define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
951   - irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
952   -# endif
953   -# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
954   - __pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
955 809 #endif
956 810  
957 811 #endif /* __LINUX_PERCPU_H */
... ... @@ -129,33 +129,33 @@
129 129 __this_cpu_inc(mib[0]->mibs[field])
130 130  
131 131 #define SNMP_INC_STATS_USER(mib, field) \
132   - irqsafe_cpu_inc(mib[0]->mibs[field])
  132 + this_cpu_inc(mib[0]->mibs[field])
133 133  
134 134 #define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
135 135 atomic_long_inc(&mib->mibs[field])
136 136  
137 137 #define SNMP_INC_STATS(mib, field) \
138   - irqsafe_cpu_inc(mib[0]->mibs[field])
  138 + this_cpu_inc(mib[0]->mibs[field])
139 139  
140 140 #define SNMP_DEC_STATS(mib, field) \
141   - irqsafe_cpu_dec(mib[0]->mibs[field])
  141 + this_cpu_dec(mib[0]->mibs[field])
142 142  
143 143 #define SNMP_ADD_STATS_BH(mib, field, addend) \
144 144 __this_cpu_add(mib[0]->mibs[field], addend)
145 145  
146 146 #define SNMP_ADD_STATS_USER(mib, field, addend) \
147   - irqsafe_cpu_add(mib[0]->mibs[field], addend)
  147 + this_cpu_add(mib[0]->mibs[field], addend)
148 148  
149 149 #define SNMP_ADD_STATS(mib, field, addend) \
150   - irqsafe_cpu_add(mib[0]->mibs[field], addend)
  150 + this_cpu_add(mib[0]->mibs[field], addend)
151 151 /*
152 152 * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr"
153 153 * to make @ptr a non-percpu pointer.
154 154 */
155 155 #define SNMP_UPD_PO_STATS(mib, basefield, addend) \
156 156 do { \
157   - irqsafe_cpu_inc(mib[0]->mibs[basefield##PKTS]); \
158   - irqsafe_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \
  157 + this_cpu_inc(mib[0]->mibs[basefield##PKTS]); \
  158 + this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \
159 159 } while (0)
160 160 #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
161 161 do { \
... ... @@ -1978,7 +1978,7 @@
1978 1978 page->pobjects = pobjects;
1979 1979 page->next = oldpage;
1980 1980  
1981   - } while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
  1981 + } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
1982 1982 stat(s, CPU_PARTIAL_FREE);
1983 1983 return pobjects;
1984 1984 }
... ... @@ -2304,7 +2304,7 @@
2304 2304 * Since this is without lock semantics the protection is only against
2305 2305 * code executing on this cpu *not* from access by other cpus.
2306 2306 */
2307   - if (unlikely(!irqsafe_cpu_cmpxchg_double(
  2307 + if (unlikely(!this_cpu_cmpxchg_double(
2308 2308 s->cpu_slab->freelist, s->cpu_slab->tid,
2309 2309 object, tid,
2310 2310 get_freepointer_safe(s, object), next_tid(tid)))) {
... ... @@ -2534,7 +2534,7 @@
2534 2534 if (likely(page == c->page)) {
2535 2535 set_freepointer(s, object, c->freelist);
2536 2536  
2537   - if (unlikely(!irqsafe_cpu_cmpxchg_double(
  2537 + if (unlikely(!this_cpu_cmpxchg_double(
2538 2538 s->cpu_slab->freelist, s->cpu_slab->tid,
2539 2539 c->freelist, tid,
2540 2540 object, next_tid(tid)))) {
... ... @@ -69,12 +69,12 @@
69 69  
70 70 static void caifd_put(struct caif_device_entry *e)
71 71 {
72   - irqsafe_cpu_dec(*e->pcpu_refcnt);
  72 + this_cpu_dec(*e->pcpu_refcnt);
73 73 }
74 74  
75 75 static void caifd_hold(struct caif_device_entry *e)
76 76 {
77   - irqsafe_cpu_inc(*e->pcpu_refcnt);
  77 + this_cpu_inc(*e->pcpu_refcnt);
78 78 }
79 79  
80 80 static int caifd_refcnt_read(struct caif_device_entry *e)
... ... @@ -177,14 +177,14 @@
177 177 {
178 178 struct cffrml *this = container_obj(layr);
179 179 if (layr != NULL && this->pcpu_refcnt != NULL)
180   - irqsafe_cpu_dec(*this->pcpu_refcnt);
  180 + this_cpu_dec(*this->pcpu_refcnt);
181 181 }
182 182  
183 183 void cffrml_hold(struct cflayer *layr)
184 184 {
185 185 struct cffrml *this = container_obj(layr);
186 186 if (layr != NULL && this->pcpu_refcnt != NULL)
187   - irqsafe_cpu_inc(*this->pcpu_refcnt);
  187 + this_cpu_inc(*this->pcpu_refcnt);
188 188 }
189 189  
190 190 int cffrml_refcnt_read(struct cflayer *layr)