diff -purN a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h --- a/arch/sparc/include/asm/atomic_64.h 2012-02-14 08:38:07.611358372 -0800 +++ b/arch/sparc/include/asm/atomic_64.h 2012-02-14 10:50:15.655243374 -0800 @@ -14,18 +14,40 @@ #define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) ((v)->counter) +static inline int atomic_read_unchecked(const atomic_unchecked_t *v) +{ + return v->counter; +} #define atomic64_read(v) ((v)->counter) +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) +{ + return v->counter; +} #define atomic_set(v, i) (((v)->counter) = i) +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) +{ + v->counter = i; +} #define atomic64_set(v, i) (((v)->counter) = i) +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) +{ + v->counter = i; +} extern void atomic_add(int, atomic_t *); +extern void atomic_add_unchecked(int, atomic_unchecked_t *); extern void atomic64_add(long, atomic64_t *); +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *); extern void atomic_sub(int, atomic_t *); +extern void atomic_sub_unchecked(int, atomic_unchecked_t *); extern void atomic64_sub(long, atomic64_t *); +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *); extern int atomic_add_ret(int, atomic_t *); +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *); extern long atomic64_add_ret(long, atomic64_t *); +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *); extern int atomic_sub_ret(int, atomic_t *); extern long atomic64_sub_ret(long, atomic64_t *); @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi #define atomic64_dec_return(v) atomic64_sub_ret(1, v) #define atomic_inc_return(v) atomic_add_ret(1, v) +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) +{ + return atomic_add_ret_unchecked(1, v); +} #define atomic64_inc_return(v) atomic64_add_ret(1, v) +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) +{ + return atomic64_add_ret_unchecked(1, v); +} #define atomic_sub_return(i, v) atomic_sub_ret(i, v) #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v) #define atomic_add_return(i, v) atomic_add_ret(i, v) +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) +{ + return atomic_add_ret_unchecked(i, v); +} #define atomic64_add_return(i, v) atomic64_add_ret(i, v) +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) +{ + return atomic64_add_ret_unchecked(i, v); +} /* * atomic_inc_and_test - increment and test @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi * other cases. */ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) +{ + return atomic_inc_return_unchecked(v) == 0; +} #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0) @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0) #define atomic_inc(v) atomic_add(1, v) +static inline void atomic_inc_unchecked(atomic_unchecked_t *v) +{ + atomic_add_unchecked(1, v); +} #define atomic64_inc(v) atomic64_add(1, v) +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) +{ + atomic64_add_unchecked(1, v); +} #define atomic_dec(v) atomic_sub(1, v) +static inline void atomic_dec_unchecked(atomic_unchecked_t *v) +{ + atomic_sub_unchecked(1, v); +} #define atomic64_dec(v) atomic64_sub(1, v) +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) +{ + atomic64_sub_unchecked(1, v); +} #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) +{ + return cmpxchg(&v->counter, old, new); +} #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) +{ + return xchg(&v->counter, new); +} static inline int atomic_add_unless(atomic_t *v, int a, int u) { - int c, old; + int c, old, new; c = atomic_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic_cmpxchg((v), c, c + (a)); + + asm volatile("addcc %2, %0, %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "tvs %%icc, 6\n" +#endif + + : "=r" (new) + : "0" (c), "ir" (a) + : "cc"); + + old = atomic_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; } - return c != (u); + return c != u; } #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom #define atomic64_cmpxchg(v, o, n) \ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new) +{ + return xchg(&v->counter, new); +} static inline long atomic64_add_unless(atomic64_t *v, long a, long u) { - long c, old; + long c, old, new; c = atomic64_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic64_cmpxchg((v), c, c + (a)); + + asm volatile("addcc %2, %0, %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "tvs %%xcc, 6\n" +#endif + + : "=r" (new) + : "0" (c), "ir" (a) + : "cc"); + + old = atomic64_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; } - return c != (u); + return c != u; } #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) diff -purN a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h --- a/arch/sparc/include/asm/spinlock_64.h 2012-02-14 08:38:07.587358372 -0800 +++ b/arch/sparc/include/asm/spinlock_64.h 2012-02-14 10:50:15.659243374 -0800 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ -static void inline arch_read_lock(raw_rwlock_t *lock) +static inline void arch_read_lock(raw_rwlock_t *lock) { unsigned long tmp1, tmp2; __asm__ __volatile__ ( "1: ldsw [%2], %0\n" " brlz,pn %0, 2f\n" -"4: add %0, 1, %1\n" +"4: addcc %0, 1, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT +" tvs %%icc, 6\n" +#endif + " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%icc, 1b\n" @@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rw " .previous" : "=&r" (tmp1), "=&r" (tmp2) : "r" (lock) - : "memory"); + : "memory", "cc"); } -static int inline arch_read_trylock(raw_rwlock_t *lock) +static inline int arch_read_trylock(raw_rwlock_t *lock) { int tmp1, tmp2; @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_ "1: ldsw [%2], %0\n" " brlz,a,pn %0, 2f\n" " mov 0, %0\n" -" add %0, 1, %1\n" +" addcc %0, 1, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT +" tvs %%icc, 6\n" +#endif + " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%icc, 1b\n" @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_ return tmp1; } -static void inline arch_read_unlock(raw_rwlock_t *lock) +static inline void arch_read_unlock(raw_rwlock_t *lock) { unsigned long tmp1, tmp2; __asm__ __volatile__( "1: lduw [%2], %0\n" -" sub %0, 1, %1\n" +" subcc %0, 1, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT +" tvs %%icc, 6\n" +#endif + " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%xcc, 1b\n" @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_ : "memory"); } -static void inline arch_write_lock(raw_rwlock_t *lock) +static inline void arch_write_lock(raw_rwlock_t *lock) { unsigned long mask, tmp1, tmp2; @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r : "memory"); } -static void inline arch_write_unlock(raw_rwlock_t *lock) +static inline void arch_write_unlock(raw_rwlock_t *lock) { __asm__ __volatile__( " stw %%g0, [%0]" @@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw : "memory"); } -static int inline arch_write_trylock(raw_rwlock_t *lock) +static inline int arch_write_trylock(raw_rwlock_t *lock) { unsigned long mask, tmp1, tmp2, result; diff -purN a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c --- a/arch/sparc/kernel/traps_64.c 2012-02-14 08:38:07.655358371 -0800 +++ b/arch/sparc/kernel/traps_64.c 2012-02-14 10:50:15.659243374 -0800 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl -= 0x100; if (regs->tstate & TSTATE_PRIV) { + +#ifdef CONFIG_PAX_REFCOUNT + if (lvl == 6) + pax_report_refcount_overflow(regs); +#endif + sprintf(buffer, "Kernel bad sw trap %lx", lvl); die_if_kernel(buffer, regs); } @@ -116,6 +122,11 @@ void bad_trap_tl1(struct pt_regs *regs, 0, lvl, SIGTRAP) == NOTIFY_STOP) return; +#ifdef CONFIG_PAX_REFCOUNT + if (lvl == 6) + pax_report_refcount_overflow(regs); +#endif + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); sprintf (buffer, "Bad trap %lx at tl>0", lvl); diff -purN a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S --- a/arch/sparc/lib/atomic_64.S 2012-02-14 08:38:07.619358372 -0800 +++ b/arch/sparc/lib/atomic_64.S 2012-02-14 10:50:15.659243374 -0800 @@ -18,7 +18,12 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: lduw [%o1], %g1 - add %g1, %o0, %g7 + addcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %icc, 2f @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic_add, .-atomic_add + .globl atomic_add_unchecked + .type atomic_add_unchecked,#function +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: lduw [%o1], %g1 + add %g1, %o0, %g7 + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 2f + nop + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic_add_unchecked, .-atomic_add_unchecked + .globl atomic_sub .type atomic_sub,#function atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: lduw [%o1], %g1 - sub %g1, %o0, %g7 + subcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %icc, 2f @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic_sub, .-atomic_sub + .globl atomic_sub_unchecked + .type atomic_sub_unchecked,#function +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: lduw [%o1], %g1 + sub %g1, %o0, %g7 + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 2f + nop + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic_sub_unchecked, .-atomic_sub_unchecked + .globl atomic_add_ret .type atomic_add_ret,#function atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: lduw [%o1], %g1 - add %g1, %o0, %g7 + addcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %icc, 2f @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic_add_ret, .-atomic_add_ret + .globl atomic_add_ret_unchecked + .type atomic_add_ret_unchecked,#function +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: lduw [%o1], %g1 + addcc %g1, %o0, %g7 + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 2f + add %g7, %o0, %g7 + sra %g7, 0, %o0 + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked + .globl atomic_sub_ret .type atomic_sub_ret,#function atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: lduw [%o1], %g1 - sub %g1, %o0, %g7 + subcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %icc, 2f @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: ldx [%o1], %g1 - add %g1, %o0, %g7 + addcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %xcc, 6 +#endif + casx [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %xcc, 2f @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic64_add, .-atomic64_add + .globl atomic64_add_unchecked + .type atomic64_add_unchecked,#function +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: ldx [%o1], %g1 + addcc %g1, %o0, %g7 + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 2f + nop + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic64_add_unchecked, .-atomic64_add_unchecked + .globl atomic64_sub .type atomic64_sub,#function atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: ldx [%o1], %g1 - sub %g1, %o0, %g7 + subcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %xcc, 6 +#endif + casx [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %xcc, 2f @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic64_sub, .-atomic64_sub + .globl atomic64_sub_unchecked + .type atomic64_sub_unchecked,#function +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: ldx [%o1], %g1 + subcc %g1, %o0, %g7 + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 2f + nop + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked + .globl atomic64_add_ret .type atomic64_add_ret,#function atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: ldx [%o1], %g1 - add %g1, %o0, %g7 + addcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %xcc, 6 +#endif + casx [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %xcc, 2f @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic64_add_ret, .-atomic64_add_ret + .globl atomic64_add_ret_unchecked + .type atomic64_add_ret_unchecked,#function +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: ldx [%o1], %g1 + addcc %g1, %o0, %g7 + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 2f + add %g7, %o0, %g7 + mov %g7, %o0 + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked + .globl atomic64_sub_ret .type atomic64_sub_ret,#function atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: ldx [%o1], %g1 - sub %g1, %o0, %g7 + subcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %xcc, 6 +#endif + casx [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %xcc, 2f diff -purN a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S --- a/arch/sparc/lib/rwsem_64.S 2012-02-14 08:38:07.619358372 -0800 +++ b/arch/sparc/lib/rwsem_64.S 2012-02-14 10:50:15.663243374 -0800 @@ -11,7 +11,12 @@ .globl __down_read __down_read: 1: lduw [%o0], %g1 - add %g1, 1, %g7 + addcc %g1, 1, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o0], %g1, %g7 cmp %g1, %g7 bne,pn %icc, 1b @@ -33,7 +38,12 @@ __down_read: .globl __down_read_trylock __down_read_trylock: 1: lduw [%o0], %g1 - add %g1, 1, %g7 + addcc %g1, 1, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cmp %g7, 0 bl,pn %icc, 2f mov 0, %o1 @@ -51,7 +61,12 @@ __down_write: or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1 1: lduw [%o0], %g3 - add %g3, %g1, %g7 + addcc %g3, %g1, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o0], %g3, %g7 cmp %g3, %g7 bne,pn %icc, 1b @@ -77,7 +92,12 @@ __down_write_trylock: cmp %g3, 0 bne,pn %icc, 2f mov 0, %o1 - add %g3, %g1, %g7 + addcc %g3, %g1, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o0], %g3, %g7 cmp %g3, %g7 bne,pn %icc, 1b @@ -90,7 +110,12 @@ __down_write_trylock: __up_read: 1: lduw [%o0], %g1 - sub %g1, 1, %g7 + subcc %g1, 1, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o0], %g1, %g7 cmp %g1, %g7 bne,pn %icc, 1b @@ -118,7 +143,12 @@ __up_write: or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1 1: lduw [%o0], %g3 - sub %g3, %g1, %g7 + subcc %g3, %g1, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o0], %g3, %g7 cmp %g3, %g7 bne,pn %icc, 1b @@ -143,7 +173,12 @@ __downgrade_write: or %g1, %lo(RWSEM_WAITING_BIAS), %g1 1: lduw [%o0], %g3 - sub %g3, %g1, %g7 + subcc %g3, %g1, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o0], %g3, %g7 cmp %g3, %g7 bne,pn %icc, 1b diff -purN a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h --- a/arch/x86/include/asm/atomic_32.h 2012-02-14 08:38:05.847358452 -0800 +++ b/arch/x86/include/asm/atomic_32.h 2012-02-14 10:50:15.663243374 -0800 @@ -25,6 +25,17 @@ static inline int atomic_read(const atom } /** + * atomic_read_unchecked - read atomic variable + * @v: pointer of type atomic_unchecked_t + * + * Atomically reads the value of @v. + */ +static inline int atomic_read_unchecked(const atomic_unchecked_t *v) +{ + return v->counter; +} + +/** * atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t * } /** + * atomic_set_unchecked - set atomic variable + * @v: pointer of type atomic_unchecked_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) +{ + v->counter = i; +} + +/** * atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t * */ static inline void atomic_add(int i, atomic_t *v) { - asm volatile(LOCK_PREFIX "addl %1,%0" + asm volatile(LOCK_PREFIX "addl %1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subl %1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+m" (v->counter) + : "ir" (i)); +} + +/** + * atomic_add_unchecked - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_unchecked_t + * + * Atomically adds @i to @v. + */ +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "addl %1,%0\n" : "+m" (v->counter) : "ir" (i)); } @@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato */ static inline void atomic_sub(int i, atomic_t *v) { - asm volatile(LOCK_PREFIX "subl %1,%0" + asm volatile(LOCK_PREFIX "subl %1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "addl %1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+m" (v->counter) + : "ir" (i)); +} + +/** + * atomic_sub_unchecked - subtract integer from atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_unchecked_t + * + * Atomically subtracts @i from @v. + */ +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "subl %1,%0\n" : "+m" (v->counter) : "ir" (i)); } @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in { unsigned char c; - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" + asm volatile(LOCK_PREFIX "subl %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "addl %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "+m" (v->counter), "=qm" (c) : "ir" (i) : "memory"); return c; @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in */ static inline void atomic_inc(atomic_t *v) { - asm volatile(LOCK_PREFIX "incl %0" + asm volatile(LOCK_PREFIX "incl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decl %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+m" (v->counter)); +} + +/** + * atomic_inc_unchecked - increment atomic variable + * @v: pointer of type atomic_unchecked_t + * + * Atomically increments @v by 1. + */ +static inline void atomic_inc_unchecked(atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "incl %0\n" : "+m" (v->counter)); } @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t * */ static inline void atomic_dec(atomic_t *v) { - asm volatile(LOCK_PREFIX "decl %0" + asm volatile(LOCK_PREFIX "decl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "incl %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+m" (v->counter)); +} + +/** + * atomic_dec_unchecked - decrement atomic variable + * @v: pointer of type atomic_unchecked_t + * + * Atomically decrements @v by 1. + */ +static inline void atomic_dec_unchecked(atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "decl %0\n" : "+m" (v->counter)); } @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at { unsigned char c; - asm volatile(LOCK_PREFIX "decl %0; sete %1" + asm volatile(LOCK_PREFIX "decl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "incl %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "+m" (v->counter), "=qm" (c) : : "memory"); return c != 0; @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at { unsigned char c; - asm volatile(LOCK_PREFIX "incl %0; sete %1" + asm volatile(LOCK_PREFIX "incl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decl %0\n" + "into\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" + : "+m" (v->counter), "=qm" (c) + : : "memory"); + return c != 0; +} + +/** + * atomic_inc_and_test_unchecked - increment and test + * @v: pointer of type atomic_unchecked_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) +{ + unsigned char c; + + asm volatile(LOCK_PREFIX "incl %0\n" + "sete %1\n" : "+m" (v->counter), "=qm" (c) : : "memory"); return c != 0; @@ -156,7 +309,16 @@ static inline int atomic_add_negative(in { unsigned char c; - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" + asm volatile(LOCK_PREFIX "addl %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subl %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sets %1\n" : "+m" (v->counter), "=qm" (c) : "ir" (i) : "memory"); return c; @@ -179,7 +341,15 @@ static inline int atomic_add_return(int #endif /* Modern 486+ processor */ __i = i; - asm volatile(LOCK_PREFIX "xaddl %0, %1" + asm volatile(LOCK_PREFIX "xaddl %0, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "movl %0, %1\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+r" (i), "+m" (v->counter) : : "memory"); return i + __i; @@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */ } /** + * atomic_add_return_unchecked - add integer and return + * @v: pointer of type atomic_unchecked_t + * @i: integer value to add + * + * Atomically adds @i to @v and returns @i + @v + */ +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) +{ + int __i; +#ifdef CONFIG_M386 + unsigned long flags; + if (unlikely(boot_cpu_data.x86 <= 3)) + goto no_xadd; +#endif + /* Modern 486+ processor */ + __i = i; + asm volatile(LOCK_PREFIX "xaddl %0, %1" + : "+r" (i), "+m" (v->counter) + : : "memory"); + return i + __i; + +#ifdef CONFIG_M386 +no_xadd: /* Legacy 386 processor */ + local_irq_save(flags); + __i = atomic_read_unchecked(v); + atomic_set_unchecked(v, i + __i); + local_irq_restore(flags); + return i + __i; +#endif +} + +/** * atomic_sub_return - subtract integer and return * @v: pointer of type atomic_t * @i: integer value to subtract @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_ return cmpxchg(&v->counter, old, new); } +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) +{ + return cmpxchg(&v->counter, old, new); +} + static inline int atomic_xchg(atomic_t *v, int new) { return xchg(&v->counter, new); } +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) +{ + return xchg(&v->counter, new); +} + /** * atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t * */ static inline int atomic_add_unless(atomic_t *v, int a, int u) { - int c, old; + int c, old, new; c = atomic_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic_cmpxchg((v), c, c + (a)); + + asm volatile("addl %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "subl %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=r" (new) + : "0" (c), "ir" (a)); + + old = atomic_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; } - return c != (u); + return c != u; } #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_return(v) (atomic_add_return(1, v)) +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) +{ + return atomic_add_return_unchecked(1, v); +} #define atomic_dec_return(v) (atomic_sub_return(1, v)) /* These are x86-specific, used by some header files */ @@ -266,9 +495,18 @@ typedef struct { u64 __aligned(8) counter; } atomic64_t; +#ifdef CONFIG_PAX_REFCOUNT +typedef struct { + u64 __aligned(8) counter; +} atomic64_unchecked_t; +#else +typedef atomic64_t atomic64_unchecked_t; +#endif + #define ATOMIC64_INIT(val) { (val) } extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val); /** * atomic64_xchg - xchg atomic64 variable @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t * * the old value. */ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val); +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val); /** * atomic64_set - set atomic64 variable @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr extern void atomic64_set(atomic64_t *ptr, u64 new_val); /** + * atomic64_unchecked_set - set atomic64 variable + * @ptr: pointer to type atomic64_unchecked_t + * @new_val: value to assign + * + * Atomically sets the value of @ptr to @new_val. + */ +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val); + +/** * atomic64_read - read atomic64 variable * @ptr: pointer to type atomic64_t * @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64 return res; } -extern u64 atomic64_read(atomic64_t *ptr); +/** + * atomic64_read_unchecked - read atomic64 variable + * @ptr: pointer to type atomic64_unchecked_t + * + * Atomically reads the value of @ptr and returns it. + */ +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr) +{ + u64 res; + + /* + * Note, we inline this atomic64_unchecked_t primitive because + * it only clobbers EAX/EDX and leaves the others + * untouched. We also (somewhat subtly) rely on the + * fact that cmpxchg8b returns the current 64-bit value + * of the memory location we are touching: + */ + asm volatile( + "mov %%ebx, %%eax\n\t" + "mov %%ecx, %%edx\n\t" + LOCK_PREFIX "cmpxchg8b %1\n" + : "=&A" (res) + : "m" (*ptr) + ); + + return res; +} /** * atomic64_add_return - add and return @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta * Other variants with different arithmetic operators: */ extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr); extern u64 atomic64_inc_return(atomic64_t *ptr); +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr); extern u64 atomic64_dec_return(atomic64_t *ptr); +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr); /** * atomic64_add - add integer to atomic64 variable @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_ extern void atomic64_add(u64 delta, atomic64_t *ptr); /** + * atomic64_add_unchecked - add integer to atomic64 variable + * @delta: integer value to add + * @ptr: pointer to type atomic64_unchecked_t + * + * Atomically adds @delta to @ptr. + */ +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr); + +/** * atomic64_sub - subtract the atomic64 variable * @delta: integer value to subtract * @ptr: pointer to type atomic64_t @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom extern void atomic64_sub(u64 delta, atomic64_t *ptr); /** + * atomic64_sub_unchecked - subtract the atomic64 variable + * @delta: integer value to subtract + * @ptr: pointer to type atomic64_unchecked_t + * + * Atomically subtracts @delta from @ptr. + */ +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr); + +/** * atomic64_sub_and_test - subtract value from variable and test result * @delta: integer value to subtract * @ptr: pointer to type atomic64_t @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del extern void atomic64_inc(atomic64_t *ptr); /** + * atomic64_inc_unchecked - increment atomic64 variable + * @ptr: pointer to type atomic64_unchecked_t + * + * Atomically increments @ptr by 1. + */ +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr); + +/** * atomic64_dec - decrement atomic64 variable * @ptr: pointer to type atomic64_t * @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr extern void atomic64_dec(atomic64_t *ptr); /** + * atomic64_dec_unchecked - decrement atomic64 variable + * @ptr: pointer to type atomic64_unchecked_t + * + * Atomically decrements @ptr by 1. + */ +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr); + +/** * atomic64_dec_and_test - decrement and test * @ptr: pointer to type atomic64_t * diff -purN a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h --- a/arch/x86/include/asm/atomic_64.h 2012-02-14 08:38:05.919358448 -0800 +++ b/arch/x86/include/asm/atomic_64.h 2012-02-14 10:50:15.671243373 -0800 @@ -24,6 +24,17 @@ static inline int atomic_read(const atom } /** + * atomic_read_unchecked - read atomic variable + * @v: pointer of type atomic_unchecked_t + * + * Atomically reads the value of @v. + */ +static inline int atomic_read_unchecked(const atomic_unchecked_t *v) +{ + return v->counter; +} + +/** * atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t * } /** + * atomic_set_unchecked - set atomic variable + * @v: pointer of type atomic_unchecked_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) +{ + v->counter = i; +} + +/** * atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t * */ static inline void atomic_add(int i, atomic_t *v) { - asm volatile(LOCK_PREFIX "addl %1,%0" + asm volatile(LOCK_PREFIX "addl %1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subl %1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=m" (v->counter) + : "ir" (i), "m" (v->counter)); +} + +/** + * atomic_add_unchecked - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_unchecked_t + * + * Atomically adds @i to @v. + */ +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "addl %1,%0\n" : "=m" (v->counter) : "ir" (i), "m" (v->counter)); } @@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato */ static inline void atomic_sub(int i, atomic_t *v) { - asm volatile(LOCK_PREFIX "subl %1,%0" + asm volatile(LOCK_PREFIX "subl %1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "addl %1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=m" (v->counter) + : "ir" (i), "m" (v->counter)); +} + +/** + * atomic_sub_unchecked - subtract the atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_unchecked_t + * + * Atomically subtracts @i from @v. + */ +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "subl %1,%0\n" : "=m" (v->counter) : "ir" (i), "m" (v->counter)); } @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in { unsigned char c; - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" + asm volatile(LOCK_PREFIX "subl %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "addl %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "=m" (v->counter), "=qm" (c) : "ir" (i), "m" (v->counter) : "memory"); return c; @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in */ static inline void atomic_inc(atomic_t *v) { - asm volatile(LOCK_PREFIX "incl %0" + asm volatile(LOCK_PREFIX "incl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decl %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=m" (v->counter) + : "m" (v->counter)); +} + +/** + * atomic_inc_unchecked - increment atomic variable + * @v: pointer of type atomic_unchecked_t + * + * Atomically increments @v by 1. + */ +static inline void atomic_inc_unchecked(atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "incl %0\n" : "=m" (v->counter) : "m" (v->counter)); } @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t * */ static inline void atomic_dec(atomic_t *v) { - asm volatile(LOCK_PREFIX "decl %0" + asm volatile(LOCK_PREFIX "decl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "incl %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=m" (v->counter) + : "m" (v->counter)); +} + +/** + * atomic_dec_unchecked - decrement atomic variable + * @v: pointer of type atomic_unchecked_t + * + * Atomically decrements @v by 1. + */ +static inline void atomic_dec_unchecked(atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "decl %0\n" : "=m" (v->counter) : "m" (v->counter)); } @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at { unsigned char c; - asm volatile(LOCK_PREFIX "decl %0; sete %1" + asm volatile(LOCK_PREFIX "decl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "incl %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory"); return c != 0; @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at { unsigned char c; - asm volatile(LOCK_PREFIX "incl %0; sete %1" + asm volatile(LOCK_PREFIX "incl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decl %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); + return c != 0; +} + +/** + * atomic_inc_and_test_unchecked - increment and test + * @v: pointer of type atomic_unchecked_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) +{ + unsigned char c; + + asm volatile(LOCK_PREFIX "incl %0\n" + "sete %1\n" : "=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory"); return c != 0; @@ -157,7 +312,16 @@ static inline int atomic_add_negative(in { unsigned char c; - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" + asm volatile(LOCK_PREFIX "addl %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subl %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sets %1\n" : "=m" (v->counter), "=qm" (c) : "ir" (i), "m" (v->counter) : "memory"); return c; @@ -173,7 +337,31 @@ static inline int atomic_add_negative(in static inline int atomic_add_return(int i, atomic_t *v) { int __i = i; - asm volatile(LOCK_PREFIX "xaddl %0, %1" + asm volatile(LOCK_PREFIX "xaddl %0, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "movl %0, %1\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+r" (i), "+m" (v->counter) + : : "memory"); + return i + __i; +} + +/** + * atomic_add_return_unchecked - add and return + * @i: integer value to add + * @v: pointer of type atomic_unchecked_t + * + * Atomically adds @i to @v and returns @i + @v + */ +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) +{ + int __i = i; + asm volatile(LOCK_PREFIX "xaddl %0, %1\n" : "+r" (i), "+m" (v->counter) : : "memory"); return i + __i; @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int } #define atomic_inc_return(v) (atomic_add_return(1, v)) +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) +{ + return atomic_add_return_unchecked(1, v); +} #define atomic_dec_return(v) (atomic_sub_return(1, v)) /* The 64-bit atomic type */ @@ -204,6 +396,18 @@ static inline long atomic64_read(const a } /** + * atomic64_read_unchecked - read atomic64 variable + * @v: pointer of type atomic64_unchecked_t + * + * Atomically reads the value of @v. + * Doesn't imply a read memory barrier. + */ +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) +{ + return v->counter; +} + +/** * atomic64_set - set atomic64 variable * @v: pointer to type atomic64_t * @i: required value @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64 } /** + * atomic64_set_unchecked - set atomic64 variable + * @v: pointer to type atomic64_unchecked_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) +{ + v->counter = i; +} + +/** * atomic64_add - add integer to atomic64 variable * @i: integer value to add * @v: pointer to type atomic64_t @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64 */ static inline void atomic64_add(long i, atomic64_t *v) { + asm volatile(LOCK_PREFIX "addq %1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subq %1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=m" (v->counter) + : "er" (i), "m" (v->counter)); +} + +/** + * atomic64_add_unchecked - add integer to atomic64 variable + * @i: integer value to add + * @v: pointer to type atomic64_unchecked_t + * + * Atomically adds @i to @v. + */ +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v) +{ asm volatile(LOCK_PREFIX "addq %1,%0" : "=m" (v->counter) : "er" (i), "m" (v->counter)); @@ -238,7 +476,15 @@ static inline void atomic64_add(long i, */ static inline void atomic64_sub(long i, atomic64_t *v) { - asm volatile(LOCK_PREFIX "subq %1,%0" + asm volatile(LOCK_PREFIX "subq %1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "addq %1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "=m" (v->counter) : "er" (i), "m" (v->counter)); } @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test( { unsigned char c; - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" + asm volatile(LOCK_PREFIX "subq %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "addq %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "=m" (v->counter), "=qm" (c) : "er" (i), "m" (v->counter) : "memory"); return c; @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test( */ static inline void atomic64_inc(atomic64_t *v) { + asm volatile(LOCK_PREFIX "incq %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decq %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=m" (v->counter) + : "m" (v->counter)); +} + +/** + * atomic64_inc_unchecked - increment atomic64 variable + * @v: pointer to type atomic64_unchecked_t + * + * Atomically increments @v by 1. + */ +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) +{ asm volatile(LOCK_PREFIX "incq %0" : "=m" (v->counter) : "m" (v->counter)); @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64 */ static inline void atomic64_dec(atomic64_t *v) { - asm volatile(LOCK_PREFIX "decq %0" + asm volatile(LOCK_PREFIX "decq %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "incq %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=m" (v->counter) + : "m" (v->counter)); +} + +/** + * atomic64_dec_unchecked - decrement atomic64 variable + * @v: pointer to type atomic64_t + * + * Atomically decrements @v by 1. + */ +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "decq %0\n" : "=m" (v->counter) : "m" (v->counter)); } @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test( { unsigned char c; - asm volatile(LOCK_PREFIX "decq %0; sete %1" + asm volatile(LOCK_PREFIX "decq %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "incq %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory"); return c != 0; @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test( { unsigned char c; - asm volatile(LOCK_PREFIX "incq %0; sete %1" + asm volatile(LOCK_PREFIX "incq %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decq %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory"); return c != 0; @@ -337,7 +652,16 @@ static inline int atomic64_add_negative( { unsigned char c; - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" + asm volatile(LOCK_PREFIX "addq %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subq %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sets %1\n" : "=m" (v->counter), "=qm" (c) : "er" (i), "m" (v->counter) : "memory"); return c; @@ -353,7 +677,31 @@ static inline int atomic64_add_negative( static inline long atomic64_add_return(long i, atomic64_t *v) { long __i = i; - asm volatile(LOCK_PREFIX "xaddq %0, %1;" + asm volatile(LOCK_PREFIX "xaddq %0, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "movq %0, %1\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+r" (i), "+m" (v->counter) + : : "memory"); + return i + __i; +} + +/** + * atomic64_add_return_unchecked - add and return + * @i: integer value to add + * @v: pointer to type atomic64_unchecked_t + * + * Atomically adds @i to @v and returns @i + @v + */ +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) +{ + long __i = i; + asm volatile(LOCK_PREFIX "xaddq %0, %1" : "+r" (i), "+m" (v->counter) : : "memory"); return i + __i; @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l } #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) +{ + return atomic64_add_return_unchecked(1, v); +} #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom return cmpxchg(&v->counter, old, new); } +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new) +{ + return cmpxchg(&v->counter, old, new); +} + static inline long atomic64_xchg(atomic64_t *v, long new) { return xchg(&v->counter, new); } +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new) +{ + return xchg(&v->counter, new); +} + static inline long atomic_cmpxchg(atomic_t *v, int old, int new) { return cmpxchg(&v->counter, old, new); } +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) +{ + return cmpxchg(&v->counter, old, new); +} + static inline long atomic_xchg(atomic_t *v, int new) { return xchg(&v->counter, new); } +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new) +{ + return xchg(&v->counter, new); +} + /** * atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t */ static inline int atomic_add_unless(atomic_t *v, int a, int u) { - int c, old; + int c, old, new; c = atomic_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic_cmpxchg((v), c, c + (a)); + + asm volatile("addl %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "subl %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=r" (new) + : "0" (c), "ir" (a)); + + old = atomic_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; } - return c != (u); + return c != u; } #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom */ static inline int atomic64_add_unless(atomic64_t *v, long a, long u) { - long c, old; + long c, old, new; c = atomic64_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic64_cmpxchg((v), c, c + (a)); + + asm volatile("addq %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "subq %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=r" (new) + : "0" (c), "er" (a)); + + old = atomic64_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; } - return c != (u); + return c != u; } /** diff -purN a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h --- a/arch/x86/include/asm/hw_irq.h 2012-02-14 08:38:05.887358450 -0800 +++ b/arch/x86/include/asm/hw_irq.h 2012-02-14 10:50:15.671243373 -0800 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void); extern void enable_IO_APIC(void); /* Statistics */ -extern atomic_t irq_err_count; -extern atomic_t irq_mis_count; +extern atomic_unchecked_t irq_err_count; +extern atomic_unchecked_t irq_mis_count; /* EISA */ extern void eisa_set_level_irq(unsigned int irq); diff -purN a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h --- a/arch/x86/include/asm/local.h 2012-02-14 08:38:05.919358448 -0800 +++ b/arch/x86/include/asm/local.h 2012-02-14 10:50:15.671243373 -0800 @@ -18,26 +18,58 @@ typedef struct { static inline void local_inc(local_t *l) { - asm volatile(_ASM_INC "%0" + asm volatile(_ASM_INC "%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_DEC "%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+m" (l->a.counter)); } static inline void local_dec(local_t *l) { - asm volatile(_ASM_DEC "%0" + asm volatile(_ASM_DEC "%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_INC "%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+m" (l->a.counter)); } static inline void local_add(long i, local_t *l) { - asm volatile(_ASM_ADD "%1,%0" + asm volatile(_ASM_ADD "%1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_SUB "%1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+m" (l->a.counter) : "ir" (i)); } static inline void local_sub(long i, local_t *l) { - asm volatile(_ASM_SUB "%1,%0" + asm volatile(_ASM_SUB "%1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_ADD "%1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+m" (l->a.counter) : "ir" (i)); } @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon { unsigned char c; - asm volatile(_ASM_SUB "%2,%0; sete %1" + asm volatile(_ASM_SUB "%2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_ADD "%2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "+m" (l->a.counter), "=qm" (c) : "ir" (i) : "memory"); return c; @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc { unsigned char c; - asm volatile(_ASM_DEC "%0; sete %1" + asm volatile(_ASM_DEC "%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_INC "%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "+m" (l->a.counter), "=qm" (c) : : "memory"); return c != 0; @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc { unsigned char c; - asm volatile(_ASM_INC "%0; sete %1" + asm volatile(_ASM_INC "%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_DEC "%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "+m" (l->a.counter), "=qm" (c) : : "memory"); return c != 0; @@ -110,7 +169,16 @@ static inline int local_add_negative(lon { unsigned char c; - asm volatile(_ASM_ADD "%2,%0; sets %1" + asm volatile(_ASM_ADD "%2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_SUB "%2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sets %1\n" : "+m" (l->a.counter), "=qm" (c) : "ir" (i) : "memory"); return c; @@ -133,7 +201,15 @@ static inline long local_add_return(long #endif /* Modern 486+ processor */ __i = i; - asm volatile(_ASM_XADD "%0, %1;" + asm volatile(_ASM_XADD "%0, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_MOV "%0,%1\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+r" (i), "+m" (l->a.counter) : : "memory"); return i + __i; diff -purN a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h --- a/arch/x86/include/asm/rwsem.h 2012-02-14 08:38:05.915358448 -0800 +++ b/arch/x86/include/asm/rwsem.h 2012-02-14 10:50:15.675243373 -0800 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw { asm volatile("# beginning down_read\n\t" LOCK_PREFIX _ASM_INC "(%1)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX _ASM_DEC "(%1)\n\t" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + /* adds 0x00000001, returns the old value */ " jns 1f\n" " call call_rwsem_down_read_failed\n" @@ -139,6 +147,14 @@ static inline int __down_read_trylock(st "1:\n\t" " mov %1,%2\n\t" " add %3,%2\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "sub %3,%2\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + " jle 2f\n\t" LOCK_PREFIX " cmpxchg %2,%0\n\t" " jnz 1b\n\t" @@ -160,6 +176,14 @@ static inline void __down_write_nested(s tmp = RWSEM_ACTIVE_WRITE_BIAS; asm volatile("# beginning down_write\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "mov %1,(%2)\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + /* subtract 0x0000ffff, returns the old value */ " test %1,%1\n\t" /* was the count 0 before? */ @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS; asm volatile("# beginning __up_read\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "mov %1,(%2)\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + /* subtracts 1, returns the old value */ " jns 1f\n\t" " call call_rwsem_wake\n" @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_ rwsem_count_t tmp; asm volatile("# beginning __up_write\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "mov %1,(%2)\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + /* tries to transition 0xffff0001 -> 0x00000000 */ " jz 1f\n" @@ -234,6 +274,14 @@ static inline void __downgrade_write(str { asm volatile("# beginning __downgrade_write\n\t" LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX _ASM_SUB "%2,(%1)\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + /* * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) @@ -253,7 +301,15 @@ static inline void __downgrade_write(str static inline void rwsem_atomic_add(rwsem_count_t delta, struct rw_semaphore *sem) { - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX _ASM_SUB "%1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+m" (sem->count) : "er" (delta)); } @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic { rwsem_count_t tmp = delta; - asm volatile(LOCK_PREFIX "xadd %0,%1" + asm volatile(LOCK_PREFIX "xadd %0,%1\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "mov %0,%1\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+r" (tmp), "+m" (sem->count) : : "memory"); diff -purN a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h --- a/arch/x86/include/asm/spinlock.h 2012-02-14 08:38:05.919358448 -0800 +++ b/arch/x86/include/asm/spinlock.h 2012-02-14 10:50:15.675243373 -0800 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r static inline void __raw_read_lock(raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX " addl $1,(%0)\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + "jns 1f\n" "call __read_lock_failed\n\t" "1:\n" @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r static inline void __raw_write_lock(raw_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX " addl %1,(%0)\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + "jz 1f\n" "call __write_lock_failed\n\t" "1:\n" @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra static inline void __raw_read_unlock(raw_rwlock_t *rw) { - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); + asm volatile(LOCK_PREFIX "incl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decl %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + :"+m" (rw->lock) : : "memory"); } static inline void __raw_write_unlock(raw_rwlock_t *rw) { - asm volatile(LOCK_PREFIX "addl %1, %0" + asm volatile(LOCK_PREFIX "addl %1, %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subl %1, %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); } diff -purN a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c --- a/arch/x86/kernel/apic/apic.c 2012-02-14 08:38:06.023358444 -0800 +++ b/arch/x86/kernel/apic/apic.c 2012-02-14 10:50:15.675243373 -0800 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs apic_write(APIC_ESR, 0); v1 = apic_read(APIC_ESR); ack_APIC_irq(); - atomic_inc(&irq_err_count); + atomic_inc_unchecked(&irq_err_count); /* * Here is what the APIC error bits mean: diff -purN a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c --- a/arch/x86/kernel/apic/io_apic.c 2012-02-14 08:38:06.023358444 -0800 +++ b/arch/x86/kernel/apic/io_apic.c 2012-02-14 10:50:15.679243373 -0800 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i ack_APIC_irq(); } -atomic_t irq_mis_count; +atomic_unchecked_t irq_mis_count; static void ack_apic_level(unsigned int irq) { @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int /* Tail end of version 0x11 I/O APIC bug workaround */ if (!(v & (1 << (i & 0x1f)))) { - atomic_inc(&irq_mis_count); + atomic_inc_unchecked(&irq_mis_count); spin_lock(&ioapic_lock); __mask_and_edge_IO_APIC_irq(cfg); __unmask_and_level_IO_APIC_irq(cfg); diff -purN a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c --- a/arch/x86/kernel/cpu/mcheck/mce.c 2012-02-14 08:38:06.019358444 -0800 +++ b/arch/x86/kernel/cpu/mcheck/mce.c 2012-02-14 10:50:15.687243373 -0800 @@ -224,10 +224,10 @@ static void print_mce_tail(void) #define PANIC_TIMEOUT 5 /* 5 seconds */ -static atomic_t mce_paniced; +static atomic_unchecked_t mce_paniced; static int fake_panic; -static atomic_t mce_fake_paniced; +static atomic_unchecked_t mce_fake_paniced; /* Panic in progress. Enable interrupts and wait for final IPI */ static void wait_for_panic(void) @@ -251,7 +251,7 @@ static void mce_panic(char *msg, struct /* * Make sure only one CPU runs in machine check panic */ - if (atomic_inc_return(&mce_paniced) > 1) + if (atomic_inc_return_unchecked(&mce_paniced) > 1) wait_for_panic(); barrier(); @@ -259,7 +259,7 @@ static void mce_panic(char *msg, struct console_verbose(); } else { /* Don't log too much for fake panic */ - if (atomic_inc_return(&mce_fake_paniced) > 1) + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1) return; } print_mce_head(); @@ -628,7 +628,7 @@ static int mce_timed_out(u64 *t) * might have been modified by someone else. */ rmb(); - if (atomic_read(&mce_paniced)) + if (atomic_read_unchecked(&mce_paniced)) wait_for_panic(); if (!monarch_timeout) goto out; @@ -2094,7 +2094,7 @@ struct dentry *mce_get_debugfs_dir(void) static void mce_reset(void) { cpu_missing = 0; - atomic_set(&mce_fake_paniced, 0); + atomic_set_unchecked(&mce_fake_paniced, 0); atomic_set(&mce_executing, 0); atomic_set(&mce_callin, 0); atomic_set(&global_nwo, 0); diff -purN a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c --- a/arch/x86/kernel/cpu/perf_event.c 2012-02-14 08:38:06.019358444 -0800 +++ b/arch/x86/kernel/cpu/perf_event.c 2012-02-14 10:50:15.687243373 -0800 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event * count to the generic event atomically: */ again: - prev_raw_count = atomic64_read(&hwc->prev_count); + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count); rdmsrl(hwc->event_base + idx, new_raw_count); - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) goto again; @@ -741,7 +741,7 @@ again: delta = (new_raw_count << shift) - (prev_raw_count << shift); delta >>= shift; - atomic64_add(delta, &event->count); + atomic64_add_unchecked(delta, &event->count); atomic64_sub(delta, &hwc->period_left); return new_raw_count; @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev * The hw event starts counting from this event offset, * mark it to be able to extra future deltas: */ - atomic64_set(&hwc->prev_count, (u64)-left); + atomic64_set_unchecked(&hwc->prev_count, (u64)-left); err = checking_wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.event_mask); diff -purN a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c --- a/arch/x86/kernel/ftrace.c 2012-02-14 08:38:05.987358445 -0800 +++ b/arch/x86/kernel/ftrace.c 2012-02-14 10:50:15.695243372 -0800 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the static void *mod_code_newcode; /* holds the text to write to the IP */ static unsigned nmi_wait_count; -static atomic_t nmi_update_count = ATOMIC_INIT(0); +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0); int ftrace_arch_read_dyn_info(char *buf, int size) { @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, r = snprintf(buf, size, "%u %u", nmi_wait_count, - atomic_read(&nmi_update_count)); + atomic_read_unchecked(&nmi_update_count)); return r; } @@ -150,7 +150,7 @@ void ftrace_nmi_enter(void) if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { smp_rmb(); ftrace_mod_code(); - atomic_inc(&nmi_update_count); + atomic_inc_unchecked(&nmi_update_count); } /* Must have previous changes seen before executions */ smp_mb(); diff -purN a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c --- a/arch/x86/kernel/i8259.c 2012-02-14 08:38:05.987358445 -0800 +++ b/arch/x86/kernel/i8259.c 2012-02-14 10:50:15.695243372 -0800 @@ -208,7 +208,7 @@ spurious_8259A_irq: "spurious 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } - atomic_inc(&irq_err_count); + atomic_inc_unchecked(&irq_err_count); /* * Theoretically we do not have to handle this IRQ, * but in Linux this does not cause problems and is diff -purN a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c --- a/arch/x86/kernel/irq.c 2012-02-14 08:38:06.003358445 -0800 +++ b/arch/x86/kernel/irq.c 2012-02-14 10:50:15.695243372 -0800 @@ -15,7 +15,7 @@ #include #include -atomic_t irq_err_count; +atomic_unchecked_t irq_err_count; /* Function pointer for generic interrupt vector handling */ void (*generic_interrupt_extension)(void) = NULL; @@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); seq_printf(p, " Machine check polls\n"); #endif - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count)); #if defined(CONFIG_X86_IO_APIC) - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count)); #endif return 0; } @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu) u64 arch_irq_stat(void) { - u64 sum = atomic_read(&irq_err_count); + u64 sum = atomic_read_unchecked(&irq_err_count); #ifdef CONFIG_X86_IO_APIC - sum += atomic_read(&irq_mis_count); + sum += atomic_read_unchecked(&irq_mis_count); #endif return sum; } diff -purN a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c --- a/arch/x86/kernel/kgdb.c 2012-02-14 08:38:05.979358446 -0800 +++ b/arch/x86/kernel/kgdb.c 2012-02-14 10:50:15.695243372 -0800 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec /* clear the trace bit */ linux_regs->flags &= ~X86_EFLAGS_TF; - atomic_set(&kgdb_cpu_doing_single_step, -1); + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1); /* set the trace bit if we're stepping */ if (remcomInBuffer[0] == 's') { linux_regs->flags |= X86_EFLAGS_TF; kgdb_single_step = 1; - atomic_set(&kgdb_cpu_doing_single_step, + atomic_set_unchecked(&kgdb_cpu_doing_single_step, raw_smp_processor_id()); } @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args break; case DIE_DEBUG: - if (atomic_read(&kgdb_cpu_doing_single_step) == + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) == raw_smp_processor_id()) { if (user_mode(regs)) return single_step_cont(regs, args); diff -purN a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c --- a/arch/x86/kernel/tboot.c 2012-02-14 08:38:05.987358445 -0800 +++ b/arch/x86/kernel/tboot.c 2012-02-14 10:50:15.699243372 -0800 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1 tboot_shutdown(acpi_shutdown_map[sleep_state]); } -static atomic_t ap_wfs_count; +static atomic_unchecked_t ap_wfs_count; static int tboot_wait_for_aps(int num_aps) { @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback( { switch (action) { case CPU_DYING: - atomic_inc(&ap_wfs_count); + atomic_inc_unchecked(&ap_wfs_count); if (num_online_cpus() == 1) - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count))) + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count))) return NOTIFY_BAD; break; } @@ -340,7 +340,7 @@ static __init int tboot_late_init(void) tboot_create_trampoline(); - atomic_set(&ap_wfs_count, 0); + atomic_set_unchecked(&ap_wfs_count, 0); register_hotcpu_notifier(&tboot_cpu_notifier); return 0; } diff -purN a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c --- a/arch/x86/kernel/traps.c 2012-02-14 08:38:05.979358446 -0800 +++ b/arch/x86/kernel/traps.c 2012-02-14 10:50:15.699243372 -0800 @@ -238,6 +238,12 @@ kernel_trap: tsk->thread.trap_no = trapnr; die(str, regs, error_code); } + +#ifdef CONFIG_PAX_REFCOUNT + if (trapnr == 4) + pax_report_refcount_overflow(regs); +#endif + return; #ifdef CONFIG_X86_32 diff -purN a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c --- a/arch/x86/lib/atomic64_32.c 2012-02-14 08:38:05.935358447 -0800 +++ b/arch/x86/lib/atomic64_32.c 2012-02-14 10:50:15.699243372 -0800 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6 } EXPORT_SYMBOL(atomic64_cmpxchg); +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val) +{ + return cmpxchg8b(&ptr->counter, old_val, new_val); +} +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked); + /** * atomic64_xchg - xchg atomic64 variable * @ptr: pointer to type atomic64_t @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n EXPORT_SYMBOL(atomic64_xchg); /** + * atomic64_xchg_unchecked - xchg atomic64 variable + * @ptr: pointer to type atomic64_unchecked_t + * @new_val: value to assign + * + * Atomically xchgs the value of @ptr to @new_val and returns + * the old value. + */ +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val) +{ + /* + * Try first with a (possibly incorrect) assumption about + * what we have there. We'll do two loops most likely, + * but we'll get an ownership MESI transaction straight away + * instead of a read transaction followed by a + * flush-for-ownership transaction: + */ + u64 old_val, real_val = 0; + + do { + old_val = real_val; + + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val); + + } while (real_val != old_val); + + return old_val; +} +EXPORT_SYMBOL(atomic64_xchg_unchecked); + +/** * atomic64_set - set atomic64 variable * @ptr: pointer to type atomic64_t * @new_val: value to assign @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n EXPORT_SYMBOL(atomic64_set); /** -EXPORT_SYMBOL(atomic64_read); + * atomic64_unchecked_set - set atomic64 variable + * @ptr: pointer to type atomic64_unchecked_t + * @new_val: value to assign + * + * Atomically sets the value of @ptr to @new_val. + */ +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val) +{ + atomic64_xchg_unchecked(ptr, new_val); +} +EXPORT_SYMBOL(atomic64_set_unchecked); + +/** * atomic64_add_return - add and return * @delta: integer value to add * @ptr: pointer to type atomic64_t @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del } EXPORT_SYMBOL(atomic64_add_return); +/** + * atomic64_add_return_unchecked - add and return + * @delta: integer value to add + * @ptr: pointer to type atomic64_unchecked_t + * + * Atomically adds @delta to @ptr and returns @delta + *@ptr + */ +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr) +{ + /* + * Try first with a (possibly incorrect) assumption about + * what we have there. We'll do two loops most likely, + * but we'll get an ownership MESI transaction straight away + * instead of a read transaction followed by a + * flush-for-ownership transaction: + */ + u64 old_val, new_val, real_val = 0; + + do { + old_val = real_val; + new_val = old_val + delta; + + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val); + + } while (real_val != old_val); + + return new_val; +} +EXPORT_SYMBOL(atomic64_add_return_unchecked); + u64 atomic64_sub_return(u64 delta, atomic64_t *ptr) { return atomic64_add_return(-delta, ptr); } EXPORT_SYMBOL(atomic64_sub_return); +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr) +{ + return atomic64_add_return_unchecked(-delta, ptr); +} +EXPORT_SYMBOL(atomic64_sub_return_unchecked); + u64 atomic64_inc_return(atomic64_t *ptr) { return atomic64_add_return(1, ptr); } EXPORT_SYMBOL(atomic64_inc_return); +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr) +{ + return atomic64_add_return_unchecked(1, ptr); +} +EXPORT_SYMBOL(atomic64_inc_return_unchecked); + u64 atomic64_dec_return(atomic64_t *ptr) { return atomic64_sub_return(1, ptr); } EXPORT_SYMBOL(atomic64_dec_return); +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr) +{ + return atomic64_sub_return_unchecked(1, ptr); +} +EXPORT_SYMBOL(atomic64_dec_return_unchecked); + /** * atomic64_add - add integer to atomic64 variable * @delta: integer value to add @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t EXPORT_SYMBOL(atomic64_add); /** + * atomic64_add_unchecked - add integer to atomic64 variable + * @delta: integer value to add + * @ptr: pointer to type atomic64_unchecked_t + * + * Atomically adds @delta to @ptr. + */ +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr) +{ + atomic64_add_return_unchecked(delta, ptr); +} +EXPORT_SYMBOL(atomic64_add_unchecked); + +/** * atomic64_sub - subtract the atomic64 variable * @delta: integer value to subtract * @ptr: pointer to type atomic64_t @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t EXPORT_SYMBOL(atomic64_sub); /** + * atomic64_sub_unchecked - subtract the atomic64 variable + * @delta: integer value to subtract + * @ptr: pointer to type atomic64_unchecked_t + * + * Atomically subtracts @delta from @ptr. + */ +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr) +{ + atomic64_add_unchecked(-delta, ptr); +} +EXPORT_SYMBOL(atomic64_sub_unchecked); + +/** * atomic64_sub_and_test - subtract value from variable and test result * @delta: integer value to subtract * @ptr: pointer to type atomic64_t @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr) EXPORT_SYMBOL(atomic64_inc); /** + * atomic64_inc_unchecked - increment atomic64 variable + * @ptr: pointer to type atomic64_unchecked_t + * + * Atomically increments @ptr by 1. + */ +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr) +{ + atomic64_add_unchecked(1, ptr); +} +EXPORT_SYMBOL(atomic64_inc_unchecked); + +/** * atomic64_dec - decrement atomic64 variable * @ptr: pointer to type atomic64_t * @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr) EXPORT_SYMBOL(atomic64_dec); /** + * atomic64_dec_unchecked - decrement atomic64 variable + * @ptr: pointer to type atomic64_unchecked_t + * + * Atomically decrements @ptr by 1. + */ +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr) +{ + atomic64_sub_unchecked(1, ptr); +} +EXPORT_SYMBOL(atomic64_dec_unchecked); + +/** * atomic64_dec_and_test - decrement and test * @ptr: pointer to type atomic64_t * diff -purN a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S --- a/arch/x86/lib/rwlock_64.S 2012-02-14 08:38:05.935358447 -0800 +++ b/arch/x86/lib/rwlock_64.S 2012-02-14 10:50:15.707243372 -0800 @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -10,13 +11,34 @@ ENTRY(__write_lock_failed) CFI_STARTPROC LOCK_PREFIX addl $RW_LOCK_BIAS,(%rdi) + +#ifdef CONFIG_PAX_REFCOUNT + jno 1234f + LOCK_PREFIX + subl $RW_LOCK_BIAS,(%rdi) + int $4 +1234: + _ASM_EXTABLE(1234b, 1234b) +#endif + 1: rep nop cmpl $RW_LOCK_BIAS,(%rdi) jne 1b LOCK_PREFIX subl $RW_LOCK_BIAS,(%rdi) + +#ifdef CONFIG_PAX_REFCOUNT + jno 1234f + LOCK_PREFIX + addl $RW_LOCK_BIAS,(%rdi) + int $4 +1234: + _ASM_EXTABLE(1234b, 1234b) +#endif + jnz __write_lock_failed + pax_force_retaddr ret CFI_ENDPROC END(__write_lock_failed) @@ -26,13 +48,34 @@ ENTRY(__read_lock_failed) CFI_STARTPROC LOCK_PREFIX incl (%rdi) + +#ifdef CONFIG_PAX_REFCOUNT + jno 1234f + LOCK_PREFIX + decl (%rdi) + int $4 +1234: + _ASM_EXTABLE(1234b, 1234b) +#endif + 1: rep nop cmpl $1,(%rdi) js 1b LOCK_PREFIX decl (%rdi) + +#ifdef CONFIG_PAX_REFCOUNT + jno 1234f + LOCK_PREFIX + incl (%rdi) + int $4 +1234: + _ASM_EXTABLE(1234b, 1234b) +#endif + js __read_lock_failed + pax_force_retaddr ret CFI_ENDPROC END(__read_lock_failed) diff -purN a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c --- a/arch/x86/mm/mmio-mod.c 2012-02-14 08:38:05.819358452 -0800 +++ b/arch/x86/mm/mmio-mod.c 2012-02-14 10:50:15.707243372 -0800 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, static void ioremap_trace_core(resource_size_t offset, unsigned long size, void __iomem *addr) { - static atomic_t next_id; + static atomic_unchecked_t next_id; struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL); /* These are page-unaligned. */ struct mmiotrace_map map = { @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_ .private = trace }, .phys = offset, - .id = atomic_inc_return(&next_id) + .id = atomic_inc_return_unchecked(&next_id) }; map.map_id = trace->id; diff -purN a/drivers/atm/adummy.c b/drivers/atm/adummy.c --- a/drivers/atm/adummy.c 2012-02-14 08:38:09.279358298 -0800 +++ b/drivers/atm/adummy.c 2012-02-14 10:50:15.707243372 -0800 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); return 0; } diff -purN a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c --- a/drivers/atm/ambassador.c 2012-02-14 08:38:09.263358298 -0800 +++ b/drivers/atm/ambassador.c 2012-02-14 10:50:15.711243372 -0800 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx); // VC layer stats - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); // free the descriptor kfree (tx_descr); @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, dump_skb ("<<<", vc, skb); // VC layer stats - atomic_inc(&atm_vcc->stats->rx); + atomic_inc_unchecked(&atm_vcc->stats->rx); __net_timestamp(skb); // end of our responsability atm_vcc->push (atm_vcc, skb); @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, } else { PRINTK (KERN_INFO, "dropped over-size frame"); // should we count this? - atomic_inc(&atm_vcc->stats->rx_drop); + atomic_inc_unchecked(&atm_vcc->stats->rx_drop); } } else { @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at } if (check_area (skb->data, skb->len)) { - atomic_inc(&atm_vcc->stats->tx_err); + atomic_inc_unchecked(&atm_vcc->stats->tx_err); return -ENOMEM; // ? } diff -purN a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c --- a/drivers/atm/atmtcp.c 2012-02-14 08:38:09.263358298 -0800 +++ b/drivers/atm/atmtcp.c 2012-02-14 10:50:15.711243372 -0800 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); if (dev_data) return 0; - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -ENOLINK; } size = skb->len+sizeof(struct atmtcp_hdr); @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc if (!new_skb) { if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -ENOBUFS; } hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr)); @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); out_vcc->push(out_vcc,new_skb); - atomic_inc(&vcc->stats->tx); - atomic_inc(&out_vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->tx); + atomic_inc_unchecked(&out_vcc->stats->rx); return 0; } @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci)); read_unlock(&vcc_sklist_lock); if (!out_vcc) { - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); goto done; } skb_pull(skb,sizeof(struct atmtcp_hdr)); @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc __net_timestamp(new_skb); skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len); out_vcc->push(out_vcc,new_skb); - atomic_inc(&vcc->stats->tx); - atomic_inc(&out_vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->tx); + atomic_inc_unchecked(&out_vcc->stats->rx); done: if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); diff -purN a/drivers/atm/eni.c b/drivers/atm/eni.c --- a/drivers/atm/eni.c 2012-02-14 08:38:09.263358298 -0800 +++ b/drivers/atm/eni.c 2012-02-14 10:50:15.711243372 -0800 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc) DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n", vcc->dev->number); length = 0; - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } else { length = ATM_CELL_SIZE-1; /* no HEC */ @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc) size); } eff = length = 0; - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } else { size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2); @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc) "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n", vcc->dev->number,vcc->vci,length,size << 2,descr); length = eff = 0; - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } } skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL; @@ -770,7 +770,7 @@ rx_dequeued++; vcc->push(vcc,skb); pushed++; } - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } wake_up(&eni_dev->rx_wait); } @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d PCI_DMA_TODEVICE); if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb_irq(skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); wake_up(&eni_dev->tx_wait); dma_complete++; } diff -purN a/drivers/atm/firestream.c b/drivers/atm/firestream.c --- a/drivers/atm/firestream.c 2012-02-14 08:38:09.263358298 -0800 +++ b/drivers/atm/firestream.c 2012-02-14 10:50:15.715243371 -0800 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct } } - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); fs_dprintk (FS_DEBUG_TXMEM, "i"); fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb); @@ -815,7 +815,7 @@ static void process_incoming (struct fs_ #endif skb_put (skb, qe->p1 & 0xffff); ATM_SKB(skb)->vcc = atm_vcc; - atomic_inc(&atm_vcc->stats->rx); + atomic_inc_unchecked(&atm_vcc->stats->rx); __net_timestamp(skb); fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb); atm_vcc->push (atm_vcc, skb); @@ -836,12 +836,12 @@ static void process_incoming (struct fs_ kfree (pe); } if (atm_vcc) - atomic_inc(&atm_vcc->stats->rx_drop); + atomic_inc_unchecked(&atm_vcc->stats->rx_drop); break; case 0x1f: /* Reassembly abort: no buffers. */ /* Silently increment error counter. */ if (atm_vcc) - atomic_inc(&atm_vcc->stats->rx_drop); + atomic_inc_unchecked(&atm_vcc->stats->rx_drop); break; default: /* Hmm. Haven't written the code to handle the others yet... -- REW */ printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n", diff -purN a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c --- a/drivers/atm/fore200e.c 2012-02-14 08:38:09.279358298 -0800 +++ b/drivers/atm/fore200e.c 2012-02-14 10:50:15.719243371 -0800 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200 #endif /* check error condition */ if (*entry->status & STATUS_ERROR) - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); else - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); } } @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2 if (skb == NULL) { DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); return -ENOMEM; } @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2 dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); return -ENOMEM; } ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200 DPRINTK(2, "damaged PDU on %d.%d.%d\n", fore200e->atm_dev->number, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } } @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc goto retry_here; } - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); fore200e->tx_sat++; DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", diff -purN a/drivers/atm/he.c b/drivers/atm/he.c --- a/drivers/atm/he.c 2012-02-14 08:38:09.263358298 -0800 +++ b/drivers/atm/he.c 2012-02-14 10:50:15.727243371 -0800 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { hprintk("HBUF_ERR! (cid 0x%x)\n", cid); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); goto return_host_buffers; } @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i RBRQ_LEN_ERR(he_dev->rbrq_head) ? "LEN_ERR" : "", vcc->vpi, vcc->vci); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto return_host_buffers; } @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i vcc->push(vcc, skb); spin_lock(&he_dev->global_lock); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); return_host_buffers: ++pdus_assembled; @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str tpd->vcc->pop(tpd->vcc, tpd->skb); else dev_kfree_skb_any(tpd->skb); - atomic_inc(&tpd->vcc->stats->tx_err); + atomic_inc_unchecked(&tpd->vcc->stats->tx_err); } pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); return; @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -EINVAL; } @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -EINVAL; } #endif @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); spin_unlock_irqrestore(&he_dev->global_lock, flags); return -ENOMEM; } @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); spin_unlock_irqrestore(&he_dev->global_lock, flags); return -ENOMEM; } @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b __enqueue_tpd(he_dev, tpd, cid); spin_unlock_irqrestore(&he_dev->global_lock, flags); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); return 0; } diff -purN a/drivers/atm/horizon.c b/drivers/atm/horizon.c --- a/drivers/atm/horizon.c 2012-02-14 08:38:09.263358298 -0800 +++ b/drivers/atm/horizon.c 2012-02-14 10:50:15.739243370 -0800 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, { struct atm_vcc * vcc = ATM_SKB(skb)->vcc; // VC layer stats - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); __net_timestamp(skb); // end of our responsability vcc->push (vcc, skb); @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev->tx_iovec = NULL; // VC layer stats - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); // free the skb hrz_kfree_skb (skb); diff -purN a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c --- a/drivers/atm/idt77252.c 2012-02-14 08:38:09.263358298 -0800 +++ b/drivers/atm/idt77252.c 2012-02-14 10:50:15.743243370 -0800 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str else dev_kfree_skb(skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); } atomic_dec(&scq->used); @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st if ((sb = dev_alloc_skb(64)) == NULL) { printk("%s: Can't allocate buffers for aal0.\n", card->name); - atomic_add(i, &vcc->stats->rx_drop); + atomic_add_unchecked(i, &vcc->stats->rx_drop); break; } if (!atm_charge(vcc, sb->truesize)) { RXPRINTK("%s: atm_charge() dropped aal0 packets.\n", card->name); - atomic_add(i - 1, &vcc->stats->rx_drop); + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); dev_kfree_skb(sb); break; } @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); cell += ATM_CELL_PAYLOAD; } @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st "(CDC: %08x)\n", card->name, len, rpp->len, readl(SAR_REG_CDC)); recycle_rx_pool_skb(card, rpp); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); return; } if (stat & SAR_RSQE_CRC) { RXPRINTK("%s: AAL5 CRC error.\n", card->name); recycle_rx_pool_skb(card, rpp); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); return; } if (skb_queue_len(&rpp->queue) > 1) { @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st RXPRINTK("%s: Can't alloc RX skb.\n", card->name); recycle_rx_pool_skb(card, rpp); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); return; } if (!atm_charge(vcc, skb->truesize)) { @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); return; } @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); if (skb->truesize > SAR_FB_SIZE_3) add_rx_skb(card, 3, SAR_FB_SIZE_3, 1); @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car if (vcc->qos.aal != ATM_AAL0) { RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n", card->name, vpi, vci); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); goto drop; } if ((sb = dev_alloc_skb(64)) == NULL) { printk("%s: Can't allocate buffers for AAL0.\n", card->name); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto drop; } @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); drop: skb_pull(queue, 64); @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s if (vc == NULL) { printk("%s: NULL connection in send().\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } if (!test_bit(VCF_TX, &vc->flags)) { printk("%s: Trying to transmit on a non-tx VC.\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s break; default: printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } if (skb_shinfo(skb)->nr_frags != 0) { printk("%s: No scatter-gather yet.\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s err = queue_skb(card, vc, skb, oam); if (err) { - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return err; } @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v skb = dev_alloc_skb(64); if (!skb) { printk("%s: Out of memory in send_oam().\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -ENOMEM; } atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); diff -purN a/drivers/atm/iphase.c b/drivers/atm/iphase.c --- a/drivers/atm/iphase.c 2012-02-14 08:38:09.279358298 -0800 +++ b/drivers/atm/iphase.c 2012-02-14 10:50:15.747243370 -0800 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev) status = (u_short) (buf_desc_ptr->desc_mode); if (status & (RX_CER | RX_PTE | RX_OFL)) { - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); IF_ERR(printk("IA: bad packet, dropping it");) if (status & RX_CER) { IF_ERR(printk(" cause: packet CRC error\n");) @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev) len = dma_addr - buf_addr; if (len > iadev->rx_buf_sz) { printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto out_free_desc; } @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev * ia_vcc = INPH_IA_VCC(vcc); if (ia_vcc == NULL) { - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); dev_kfree_skb_any(skb); atm_return(vcc, atm_guess_pdu2truesize(len)); goto INCR_DLE; @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev * if ((length > iadev->rx_buf_sz) || (length > (skb->len - sizeof(struct cpcs_trailer)))) { - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)", length, skb->len);) dev_kfree_skb_any(skb); @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev * IF_RX(printk("rx_dle_intr: skb push");) vcc->push(vcc,skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); iadev->rx_pkt_cnt++; } INCR_DLE: @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, { struct k_sonet_stats *stats; stats = &PRIV(_ia_dev[board])->sonet_stats; - printk("section_bip: %d\n", atomic_read(&stats->section_bip)); - printk("line_bip : %d\n", atomic_read(&stats->line_bip)); - printk("path_bip : %d\n", atomic_read(&stats->path_bip)); - printk("line_febe : %d\n", atomic_read(&stats->line_febe)); - printk("path_febe : %d\n", atomic_read(&stats->path_febe)); - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs)); - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs)); - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells)); - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells)); + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip)); + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip)); + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip)); + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe)); + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe)); + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs)); + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs)); + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells)); + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells)); } ia_cmds.status = 0; break; @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc if ((desc == 0) || (desc > iadev->num_tx_desc)) { IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); if (vcc->pop) vcc->pop(vcc, skb); else @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc ATM_DESC(skb) = vcc->vci; skb_queue_tail(&iadev->tx_dma_q, skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); iadev->tx_pkt_cnt++; /* Increment transaction counter */ writel(2, iadev->dma+IPHASE5575_TX_COUNTER); #if 0 /* add flow control logic */ - if (atomic_read(&vcc->stats->tx) % 20 == 0) { + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) { if (iavcc->vc_desc_cnt > 10) { vcc->tx_quota = vcc->tx_quota * 3 / 4; printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); diff -purN a/drivers/atm/lanai.c b/drivers/atm/lanai.c --- a/drivers/atm/lanai.c 2012-02-14 08:38:09.275358298 -0800 +++ b/drivers/atm/lanai.c 2012-02-14 10:50:15.751243370 -0800 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0); lanai_endtx(lanai, lvcc); lanai_free_skb(lvcc->tx.atmvcc, skb); - atomic_inc(&lvcc->tx.atmvcc->stats->tx); + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx); } /* Try to fill the buffer - don't call unless there is backlog */ @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc ATM_SKB(skb)->vcc = lvcc->rx.atmvcc; __net_timestamp(skb); lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb); - atomic_inc(&lvcc->rx.atmvcc->stats->rx); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx); out: lvcc->rx.buf.ptr = end; cardvcc_write(lvcc, endptr, vcc_rxreadptr); @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 " "vcc %d\n", lanai->number, (unsigned int) s, vci); lanai->stats.service_rxnotaal5++; - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); return 0; } if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) { @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d int bytes; read_unlock(&vcc_sklist_lock); DPRINTK("got trashed rx pdu on vci %d\n", vci); - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); lvcc->stats.x.aal5.service_trash++; bytes = (SERVICE_GET_END(s) * 16) - (((unsigned long) lvcc->rx.buf.ptr) - @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d } if (s & SERVICE_STREAM) { read_unlock(&vcc_sklist_lock); - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); lvcc->stats.x.aal5.service_stream++; printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream " "PDU on VCI %d!\n", lanai->number, vci); @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d return 0; } DPRINTK("got rx crc error on vci %d\n", vci); - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); lvcc->stats.x.aal5.service_rxcrc++; lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4]; cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr); diff -purN a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c --- a/drivers/atm/nicstar.c 2012-02-14 08:38:09.279358298 -0800 +++ b/drivers/atm/nicstar.c 2012-02-14 10:50:15.759243369 -0800 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, if ((vc = (vc_map *) vcc->dev_data) == NULL) { printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, if (!vc->tx) { printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, if (skb_shinfo(skb)->nr_frags != 0) { printk("nicstar%d: No scatter-gather yet.\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, if (push_scqe(card, vc, scq, &scqe, skb) != 0) { - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EIO; } - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); return 0; } @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_ { printk("nicstar%d: Can't allocate buffers for aal0.\n", card->index); - atomic_add(i,&vcc->stats->rx_drop); + atomic_add_unchecked(i,&vcc->stats->rx_drop); break; } if (!atm_charge(vcc, sb->truesize)) { RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n", card->index); - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */ + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */ dev_kfree_skb_any(sb); break; } @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_ ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); cell += ATM_CELL_PAYLOAD; } @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_ if (iovb == NULL) { printk("nicstar%d: Out of iovec buffers.\n", card->index); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); recycle_rx_buf(card, skb); return; } @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_ else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS) { printk("nicstar%d: received too big AAL5 SDU.\n", card->index); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS); NS_SKB(iovb)->iovcnt = 0; iovb->len = 0; @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_ printk("nicstar%d: Expected a small buffer, and this is not one.\n", card->index); which_list(card, skb); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_rx_buf(card, skb); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_ printk("nicstar%d: Expected a large buffer, and this is not one.\n", card->index); which_list(card, skb); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_SKB(iovb)->iovcnt); vc->rx_iov = NULL; @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_ printk(" - PDU size mismatch.\n"); else printk(".\n"); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_SKB(iovb)->iovcnt); vc->rx_iov = NULL; @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_ if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_ ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } } else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */ @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_ if (!atm_charge(vcc, sb->truesize)) { push_rxbufs(card, sb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_ ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } push_rxbufs(card, skb); @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_ if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_ ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } push_rxbufs(card, sb); @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_ if (hb == NULL) { printk("nicstar%d: Out of huge buffers.\n", card->index); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_SKB(iovb)->iovcnt); vc->rx_iov = NULL; @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_ } else dev_kfree_skb_any(hb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_ #endif /* NS_USE_DESTRUCTORS */ __net_timestamp(hb); vcc->push(vcc, hb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } } diff -purN a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c --- a/drivers/atm/solos-pci.c 2012-02-14 08:38:09.263358298 -0800 +++ b/drivers/atm/solos-pci.c 2012-02-14 10:50:15.759243369 -0800 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg) } atm_charge(vcc, skb->truesize); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); break; case PKT_STATUS: @@ -1023,7 +1023,7 @@ static uint32_t fpga_tx(struct solos_car vcc = SKB_CB(oldskb)->vcc; if (vcc) { - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); solos_pop(vcc, oldskb); } else dev_kfree_skb_irq(oldskb); diff -purN a/drivers/atm/suni.c b/drivers/atm/suni.c --- a/drivers/atm/suni.c 2012-02-14 08:38:09.275358298 -0800 +++ b/drivers/atm/suni.c 2012-02-14 10:50:15.763243369 -0800 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock); #define ADD_LIMITED(s,v) \ - atomic_add((v),&stats->s); \ - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX); + atomic_add_unchecked((v),&stats->s); \ + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX); static void suni_hz(unsigned long from_timer) diff -purN a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c --- a/drivers/atm/uPD98402.c 2012-02-14 08:38:09.263358298 -0800 +++ b/drivers/atm/uPD98402.c 2012-02-14 10:50:15.763243369 -0800 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d struct sonet_stats tmp; int error = 0; - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp); if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp)); if (zero && !error) { @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev #define ADD_LIMITED(s,v) \ - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \ - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \ - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); } + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \ + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \ + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); } static void stat_event(struct atm_dev *dev) @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev if (reason & uPD98402_INT_PFM) stat_event(dev); if (reason & uPD98402_INT_PCO) { (void) GET(PCOCR); /* clear interrupt cause */ - atomic_add(GET(HECCT), + atomic_add_unchecked(GET(HECCT), &PRIV(dev)->sonet_stats.uncorr_hcs); } if ((reason & uPD98402_INT_RFO) && @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO | uPD98402_INT_LOS),PIMR); /* enable them */ (void) fetch_stats(dev,NULL,1); /* clear kernel counters */ - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1); - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1); - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1); + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1); + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1); + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1); return 0; } diff -purN a/drivers/atm/zatm.c b/drivers/atm/zatm.c --- a/drivers/atm/zatm.c 2012-02-14 08:38:09.279358298 -0800 +++ b/drivers/atm/zatm.c 2012-02-14 10:50:15.763243369 -0800 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy } if (!size) { dev_kfree_skb_irq(skb); - if (vcc) atomic_inc(&vcc->stats->rx_err); + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err); continue; } if (!atm_charge(vcc,skb->truesize)) { @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy skb->len = size; ATM_SKB(skb)->vcc = vcc; vcc->push(vcc,skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } zout(pos & 0xffff,MTA(mbx)); #if 0 /* probably a stupid idea */ @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD skb_queue_head(&zatm_vcc->backlog,skb); break; } - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); wake_up(&zatm_vcc->tx_wait); } diff -purN a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c --- a/drivers/char/ipmi/ipmi_msghandler.c 2012-02-14 08:38:11.859358182 -0800 +++ b/drivers/char/ipmi/ipmi_msghandler.c 2012-02-14 10:50:15.771243370 -0800 @@ -414,7 +414,7 @@ struct ipmi_smi { struct proc_dir_entry *proc_dir; char proc_dir_name[10]; - atomic_t stats[IPMI_NUM_STATS]; + atomic_unchecked_t stats[IPMI_NUM_STATS]; /* * run_to_completion duplicate of smb_info, smi_info @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex); #define ipmi_inc_stat(intf, stat) \ - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]) #define ipmi_get_stat(intf, stat) \ - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])) static int is_lan_addr(struct ipmi_addr *addr) { @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha INIT_LIST_HEAD(&intf->cmd_rcvrs); init_waitqueue_head(&intf->waitq); for (i = 0; i < IPMI_NUM_STATS; i++) - atomic_set(&intf->stats[i], 0); + atomic_set_unchecked(&intf->stats[i], 0); intf->proc_dir = NULL; diff -purN a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c --- a/drivers/char/ipmi/ipmi_si_intf.c 2012-02-14 08:38:11.859358182 -0800 +++ b/drivers/char/ipmi/ipmi_si_intf.c 2012-02-14 10:50:15.771243370 -0800 @@ -277,7 +277,7 @@ struct smi_info { unsigned char slave_addr; /* Counters and things for the proc filesystem. */ - atomic_t stats[SI_NUM_STATS]; + atomic_unchecked_t stats[SI_NUM_STATS]; struct task_struct *thread; @@ -285,9 +285,9 @@ struct smi_info { }; #define smi_inc_stat(smi, stat) \ - atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat]) #define smi_get_stat(smi, stat) \ - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat])) #define SI_MAX_PARMS 4 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info atomic_set(&new_smi->req_events, 0); new_smi->run_to_completion = 0; for (i = 0; i < SI_NUM_STATS; i++) - atomic_set(&new_smi->stats[i], 0); + atomic_set_unchecked(&new_smi->stats[i], 0); new_smi->interrupt_disabled = 0; atomic_set(&new_smi->stop_operation, 0); diff -purN a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c --- a/drivers/edac/edac_pci_sysfs.c 2012-02-14 08:38:11.967358176 -0800 +++ b/drivers/edac/edac_pci_sysfs.c 2012-02-14 10:50:15.775243369 -0800 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */ static int edac_pci_poll_msec = 1000; /* one second workq period */ -static atomic_t pci_parity_count = ATOMIC_INIT(0); -static atomic_t pci_nonparity_count = ATOMIC_INIT(0); +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0); +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0); static struct kobject *edac_pci_top_main_kobj; static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0); @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str edac_printk(KERN_CRIT, EDAC_PCI, "Signaled System Error on %s\n", pci_name(dev)); - atomic_inc(&pci_nonparity_count); + atomic_inc_unchecked(&pci_nonparity_count); } if (status & (PCI_STATUS_PARITY)) { @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str "Master Data Parity Error on %s\n", pci_name(dev)); - atomic_inc(&pci_parity_count); + atomic_inc_unchecked(&pci_parity_count); } if (status & (PCI_STATUS_DETECTED_PARITY)) { @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str "Detected Parity Error on %s\n", pci_name(dev)); - atomic_inc(&pci_parity_count); + atomic_inc_unchecked(&pci_parity_count); } } @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " "Signaled System Error on %s\n", pci_name(dev)); - atomic_inc(&pci_nonparity_count); + atomic_inc_unchecked(&pci_nonparity_count); } if (status & (PCI_STATUS_PARITY)) { @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str "Master Data Parity Error on " "%s\n", pci_name(dev)); - atomic_inc(&pci_parity_count); + atomic_inc_unchecked(&pci_parity_count); } if (status & (PCI_STATUS_DETECTED_PARITY)) { @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str "Detected Parity Error on %s\n", pci_name(dev)); - atomic_inc(&pci_parity_count); + atomic_inc_unchecked(&pci_parity_count); } } } @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void) if (!check_pci_errors) return; - before_count = atomic_read(&pci_parity_count); + before_count = atomic_read_unchecked(&pci_parity_count); /* scan all PCI devices looking for a Parity Error on devices and * bridges. @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void) /* Only if operator has selected panic on PCI Error */ if (edac_pci_get_panic_on_pe()) { /* If the count is different 'after' from 'before' */ - if (before_count != atomic_read(&pci_parity_count)) + if (before_count != atomic_read_unchecked(&pci_parity_count)) panic("EDAC: PCI Parity Error"); } } diff -purN a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c --- a/drivers/gpio/vr41xx_giu.c 2012-02-14 08:38:10.319358251 -0800 +++ b/drivers/gpio/vr41xx_giu.c 2012-02-14 10:50:15.775243369 -0800 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq) printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n", maskl, pendl, maskh, pendh); - atomic_inc(&irq_err_count); + atomic_inc_unchecked(&irq_err_count); return -EINVAL; } diff -purN a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c --- a/drivers/gpu/drm/drm_drv.c 2012-02-14 08:38:11.799358184 -0800 +++ b/drivers/gpu/drm/drm_drv.c 2012-02-14 10:50:15.783243368 -0800 @@ -448,7 +448,7 @@ long drm_ioctl(struct file *filp, dev = file_priv->minor->dev; atomic_inc(&dev->ioctl_count); - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]); ++file_priv->ioctl_count; DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", diff -purN a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c --- a/drivers/gpu/drm/drm_fops.c 2012-02-14 08:38:11.727358187 -0800 +++ b/drivers/gpu/drm/drm_fops.c 2012-02-14 10:50:15.783243368 -0800 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * } for (i = 0; i < ARRAY_SIZE(dev->counts); i++) - atomic_set(&dev->counts[i], 0); + atomic_set_unchecked(&dev->counts[i], 0); dev->sigdata.lock = NULL; @@ -130,7 +130,7 @@ int drm_open(struct inode *inode, struct retcode = drm_open_helper(inode, filp, dev); if (!retcode) { - atomic_inc(&dev->counts[_DRM_STAT_OPENS]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]); spin_lock(&dev->count_lock); if (!dev->open_count++) { spin_unlock(&dev->count_lock); @@ -567,7 +567,7 @@ int drm_release(struct inode *inode, str * End inline drm_release */ - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]); spin_lock(&dev->count_lock); if (!--dev->open_count) { if (atomic_read(&dev->ioctl_count)) { diff -purN a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c --- a/drivers/gpu/drm/drm_gem.c 2012-02-14 08:38:11.727358187 -0800 +++ b/drivers/gpu/drm/drm_gem.c 2012-02-14 10:50:15.783243368 -0800 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev) spin_lock_init(&dev->object_name_lock); idr_init(&dev->object_name_idr); atomic_set(&dev->object_count, 0); - atomic_set(&dev->object_memory, 0); + atomic_set_unchecked(&dev->object_memory, 0); atomic_set(&dev->pin_count, 0); - atomic_set(&dev->pin_memory, 0); + atomic_set_unchecked(&dev->pin_memory, 0); atomic_set(&dev->gtt_count, 0); - atomic_set(&dev->gtt_memory, 0); + atomic_set_unchecked(&dev->gtt_memory, 0); mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); if (!mm) { @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device * goto fput; } atomic_inc(&dev->object_count); - atomic_add(obj->size, &dev->object_memory); + atomic_add_unchecked(obj->size, &dev->object_memory); return obj; fput: fput(obj->filp); @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref) fput(obj->filp); atomic_dec(&dev->object_count); - atomic_sub(obj->size, &dev->object_memory); + atomic_sub_unchecked(obj->size, &dev->object_memory); kfree(obj); } EXPORT_SYMBOL(drm_gem_object_free); diff -purN a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c --- a/drivers/gpu/drm/drm_ioctl.c 2012-02-14 08:38:11.787358184 -0800 +++ b/drivers/gpu/drm/drm_ioctl.c 2012-02-14 10:50:15.783243368 -0800 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, stats->data[i].value = (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0); else - stats->data[i].value = atomic_read(&dev->counts[i]); + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]); stats->data[i].type = dev->types[i]; } diff -purN a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c --- a/drivers/gpu/drm/drm_lock.c 2012-02-14 08:38:11.695358188 -0800 +++ b/drivers/gpu/drm/drm_lock.c 2012-02-14 10:50:15.791243368 -0800 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi if (drm_lock_take(&master->lock, lock->context)) { master->lock.file_priv = file_priv; master->lock.lock_time = jiffies; - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ } @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v return -EINVAL; } - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]); /* kernel_context_switch isn't used by any of the x86 drm * modules but is required by the Sparc driver. diff -purN a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c --- a/drivers/gpu/drm/i810/i810_dma.c 2012-02-14 08:38:11.727358187 -0800 +++ b/drivers/gpu/drm/i810/i810_dma.c 2012-02-14 10:50:15.791243368 -0800 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de dma->buflist[vertex->idx], vertex->discard, vertex->used); - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); - atomic_inc(&dev->counts[_DRM_STAT_DMA]); + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]); sarea_priv->last_enqueue = dev_priv->counter - 1; sarea_priv->last_dispatch = (int)hw_status[5]; @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, mc->last_render); - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); - atomic_inc(&dev->counts[_DRM_STAT_DMA]); + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]); sarea_priv->last_enqueue = dev_priv->counter - 1; sarea_priv->last_dispatch = (int)hw_status[5]; diff -purN a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h --- a/drivers/gpu/drm/i810/i810_drv.h 2012-02-14 08:38:11.727358187 -0800 +++ b/drivers/gpu/drm/i810/i810_drv.h 2012-02-14 10:50:15.791243368 -0800 @@ -108,8 +108,8 @@ typedef struct drm_i810_private { int page_flipping; wait_queue_head_t irq_queue; - atomic_t irq_received; - atomic_t irq_emitted; + atomic_unchecked_t irq_received; + atomic_unchecked_t irq_emitted; int front_offset; } drm_i810_private_t; diff -purN a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h --- a/drivers/gpu/drm/i830/i830_drv.h 2012-02-14 08:38:11.727358187 -0800 +++ b/drivers/gpu/drm/i830/i830_drv.h 2012-02-14 10:50:15.795243368 -0800 @@ -115,8 +115,8 @@ typedef struct drm_i830_private { int page_flipping; wait_queue_head_t irq_queue; - atomic_t irq_received; - atomic_t irq_emitted; + atomic_unchecked_t irq_received; + atomic_unchecked_t irq_emitted; int use_mi_batchbuffer_start; diff -purN a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c --- a/drivers/gpu/drm/i830/i830_irq.c 2012-02-14 08:38:11.727358187 -0800 +++ b/drivers/gpu/drm/i830/i830_irq.c 2012-02-14 10:50:15.795243368 -0800 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_ I830_WRITE16(I830REG_INT_IDENTITY_R, temp); - atomic_inc(&dev_priv->irq_received); + atomic_inc_unchecked(&dev_priv->irq_received); wake_up_interruptible(&dev_priv->irq_queue); return IRQ_HANDLED; @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi DRM_DEBUG("%s\n", __func__); - atomic_inc(&dev_priv->irq_emitted); + atomic_inc_unchecked(&dev_priv->irq_emitted); BEGIN_LP_RING(2); OUT_RING(0); OUT_RING(GFX_OP_USER_INTERRUPT); ADVANCE_LP_RING(); - return atomic_read(&dev_priv->irq_emitted); + return atomic_read_unchecked(&dev_priv->irq_emitted); } static int i830_wait_irq(struct drm_device * dev, int irq_nr) @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi DRM_DEBUG("%s\n", __func__); - if (atomic_read(&dev_priv->irq_received) >= irq_nr) + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr) return 0; dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT; @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi for (;;) { __set_current_state(TASK_INTERRUPTIBLE); - if (atomic_read(&dev_priv->irq_received) >= irq_nr) + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr) break; if ((signed)(end - jiffies) <= 0) { DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n", @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d I830_WRITE16(I830REG_HWSTAM, 0xffff); I830_WRITE16(I830REG_INT_MASK_R, 0x0); I830_WRITE16(I830REG_INT_ENABLE_R, 0x0); - atomic_set(&dev_priv->irq_received, 0); - atomic_set(&dev_priv->irq_emitted, 0); + atomic_set_unchecked(&dev_priv->irq_received, 0); + atomic_set_unchecked(&dev_priv->irq_emitted, 0); init_waitqueue_head(&dev_priv->irq_queue); } diff -purN a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c --- a/drivers/gpu/drm/i915/i915_debugfs.c 2012-02-14 08:38:11.799358184 -0800 +++ b/drivers/gpu/drm/i915/i915_debugfs.c 2012-02-14 10:50:15.803243367 -0800 @@ -194,7 +194,7 @@ static int i915_interrupt_info(struct se I915_READ(GTIMR)); } seq_printf(m, "Interrupts received: %d\n", - atomic_read(&dev_priv->irq_received)); + atomic_read_unchecked(&dev_priv->irq_received)); if (dev_priv->hw_status_page != NULL) { seq_printf(m, "Current sequence: %d\n", i915_get_gem_seqno(dev)); diff -purN a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h --- a/drivers/gpu/drm/i915/i915_drv.h 2012-02-14 08:38:11.791358184 -0800 +++ b/drivers/gpu/drm/i915/i915_drv.h 2012-02-14 10:50:15.803243367 -0800 @@ -228,7 +228,7 @@ typedef struct drm_i915_private { int page_flipping; wait_queue_head_t irq_queue; - atomic_t irq_received; + atomic_unchecked_t irq_received; /** Protects user_irq_refcount and irq_mask_reg */ spinlock_t user_irq_lock; /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ diff -purN a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c --- a/drivers/gpu/drm/i915/i915_gem.c 2012-02-14 08:38:11.799358184 -0800 +++ b/drivers/gpu/drm/i915/i915_gem.c 2012-02-14 10:50:15.807243367 -0800 @@ -107,7 +107,7 @@ i915_gem_get_aperture_ioctl(struct drm_d args->aper_size = dev->gtt_total; args->aper_available_size = (args->aper_size - - atomic_read(&dev->pin_memory)); + atomic_read_unchecked(&dev->pin_memory)); return 0; } @@ -2105,7 +2105,7 @@ i915_gem_object_unbind(struct drm_gem_ob if (obj_priv->gtt_space) { atomic_dec(&dev->gtt_count); - atomic_sub(obj->size, &dev->gtt_memory); + atomic_sub_unchecked(obj->size, &dev->gtt_memory); drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; @@ -2587,7 +2587,7 @@ i915_gem_object_bind_to_gtt(struct drm_g goto search_free; } atomic_inc(&dev->gtt_count); - atomic_add(obj->size, &dev->gtt_memory); + atomic_add_unchecked(obj->size, &dev->gtt_memory); /* Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in @@ -3721,9 +3721,9 @@ i915_gem_do_execbuffer(struct drm_device "%d/%d gtt bytes\n", atomic_read(&dev->object_count), atomic_read(&dev->pin_count), - atomic_read(&dev->object_memory), - atomic_read(&dev->pin_memory), - atomic_read(&dev->gtt_memory), + atomic_read_unchecked(&dev->object_memory), + atomic_read_unchecked(&dev->pin_memory), + atomic_read_unchecked(&dev->gtt_memory), dev->gtt_total); } goto err; @@ -4073,7 +4073,7 @@ i915_gem_object_pin(struct drm_gem_objec */ if (obj_priv->pin_count == 1) { atomic_inc(&dev->pin_count); - atomic_add(obj->size, &dev->pin_memory); + atomic_add_unchecked(obj->size, &dev->pin_memory); if (!obj_priv->active && (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 && !list_empty(&obj_priv->list)) @@ -4106,7 +4106,7 @@ i915_gem_object_unpin(struct drm_gem_obj list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); atomic_dec(&dev->pin_count); - atomic_sub(obj->size, &dev->pin_memory); + atomic_sub_unchecked(obj->size, &dev->pin_memory); } i915_verify_inactive(dev, __FILE__, __LINE__); } diff -purN a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c --- a/drivers/gpu/drm/i915/i915_irq.c 2012-02-14 08:38:11.791358184 -0800 +++ b/drivers/gpu/drm/i915/i915_irq.c 2012-02-14 10:50:15.811243367 -0800 @@ -574,7 +574,7 @@ irqreturn_t i915_driver_irq_handler(DRM_ int irq_received; int ret = IRQ_NONE; - atomic_inc(&dev_priv->irq_received); + atomic_inc_unchecked(&dev_priv->irq_received); if (HAS_PCH_SPLIT(dev)) return ironlake_irq_handler(dev); @@ -1079,7 +1079,7 @@ void i915_driver_irq_preinstall(struct d { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - atomic_set(&dev_priv->irq_received, 0); + atomic_set_unchecked(&dev_priv->irq_received, 0); INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->error_work, i915_error_work_func); diff -purN a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h --- a/drivers/gpu/drm/mga/mga_drv.h 2012-02-14 08:38:11.695358188 -0800 +++ b/drivers/gpu/drm/mga/mga_drv.h 2012-02-14 10:50:15.811243367 -0800 @@ -120,9 +120,9 @@ typedef struct drm_mga_private { u32 clear_cmd; u32 maccess; - atomic_t vbl_received; /**< Number of vblanks received. */ + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */ wait_queue_head_t fence_queue; - atomic_t last_fence_retired; + atomic_unchecked_t last_fence_retired; u32 next_fence_to_post; unsigned int fb_cpp; diff -purN a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c --- a/drivers/gpu/drm/mga/mga_irq.c 2012-02-14 08:38:11.699358189 -0800 +++ b/drivers/gpu/drm/mga/mga_irq.c 2012-02-14 10:50:15.815243367 -0800 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de if (crtc != 0) return 0; - return atomic_read(&dev_priv->vbl_received); + return atomic_read_unchecked(&dev_priv->vbl_received); } @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I /* VBLANK interrupt */ if (status & MGA_VLINEPEN) { MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); - atomic_inc(&dev_priv->vbl_received); + atomic_inc_unchecked(&dev_priv->vbl_received); drm_handle_vblank(dev, 0); handled = 1; } @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I MGA_WRITE(MGA_PRIMEND, prim_end); } - atomic_inc(&dev_priv->last_fence_retired); + atomic_inc_unchecked(&dev_priv->last_fence_retired); DRM_WAKEUP(&dev_priv->fence_queue); handled = 1; } @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev * using fences. */ DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ, - (((cur_fence = atomic_read(&dev_priv->last_fence_retired)) + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired)) - *sequence) <= (1 << 23))); *sequence = cur_fence; diff -purN a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c --- a/drivers/gpu/drm/r128/r128_cce.c 2012-02-14 08:38:11.751358185 -0800 +++ b/drivers/gpu/drm/r128/r128_cce.c 2012-02-14 10:50:15.815243367 -0800 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d /* GH: Simple idle check. */ - atomic_set(&dev_priv->idle_count, 0); + atomic_set_unchecked(&dev_priv->idle_count, 0); /* We don't support anything other than bus-mastering ring mode, * but the ring can be in either AGP or PCI space for the ring diff -purN a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h --- a/drivers/gpu/drm/r128/r128_drv.h 2012-02-14 08:38:11.751358185 -0800 +++ b/drivers/gpu/drm/r128/r128_drv.h 2012-02-14 10:50:15.819243367 -0800 @@ -90,14 +90,14 @@ typedef struct drm_r128_private { int is_pci; unsigned long cce_buffers_offset; - atomic_t idle_count; + atomic_unchecked_t idle_count; int page_flipping; int current_page; u32 crtc_offset; u32 crtc_offset_cntl; - atomic_t vbl_received; + atomic_unchecked_t vbl_received; u32 color_fmt; unsigned int front_offset; diff -purN a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c --- a/drivers/gpu/drm/r128/r128_irq.c 2012-02-14 08:38:11.751358185 -0800 +++ b/drivers/gpu/drm/r128/r128_irq.c 2012-02-14 10:50:15.823243367 -0800 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d if (crtc != 0) return 0; - return atomic_read(&dev_priv->vbl_received); + return atomic_read_unchecked(&dev_priv->vbl_received); } irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_ /* VBLANK interrupt */ if (status & R128_CRTC_VBLANK_INT) { R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); - atomic_inc(&dev_priv->vbl_received); + atomic_inc_unchecked(&dev_priv->vbl_received); drm_handle_vblank(dev, 0); return IRQ_HANDLED; } diff -purN a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c --- a/drivers/gpu/drm/r128/r128_state.c 2012-02-14 08:38:11.751358185 -0800 +++ b/drivers/gpu/drm/r128/r128_state.c 2012-02-14 10:50:15.823243367 -0800 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv) { - if (atomic_read(&dev_priv->idle_count) == 0) { + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) { r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); } else { - atomic_set(&dev_priv->idle_count, 0); + atomic_set_unchecked(&dev_priv->idle_count, 0); } } diff -purN a/drivers/gpu/drm/radeon/r100_reg_safe.h b/drivers/gpu/drm/radeon/r100_reg_safe.h --- a/drivers/gpu/drm/radeon/r100_reg_safe.h 1969-12-31 16:00:00.000000000 -0800 +++ b/drivers/gpu/drm/radeon/r100_reg_safe.h 2012-02-14 12:24:53.414045670 -0800 @@ -0,0 +1,28 @@ +static const unsigned r100_reg_safe_bm[102] = { + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF, + 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF, + 0xFFFFFFCF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFF9F, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x38E7FE1F, 0xFFC3FF8E, 0x7FF8FFFF, 0xFFFF803C, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFEFFFF, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFD, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFCFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFEF, +}; diff -purN a/drivers/gpu/drm/radeon/r200_reg_safe.h b/drivers/gpu/drm/radeon/r200_reg_safe.h --- a/drivers/gpu/drm/radeon/r200_reg_safe.h 1969-12-31 16:00:00.000000000 -0800 +++ b/drivers/gpu/drm/radeon/r200_reg_safe.h 2012-02-14 12:24:33.418045821 -0800 @@ -0,0 +1,28 @@ +static const unsigned r200_reg_safe_bm[102] = { + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF, + 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFE7FE1F, 0xF003FFFF, 0x7EFFFFFF, 0xFFFF803C, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFEFCE, 0xFFFEFFFF, 0xFFFFFFFE, + 0x020E0FF0, 0xFFCC83FD, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFBFFFF, 0xEFFCFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xDFDFDFDF, 0x3FFDDFDF, 0xFFFFFFFF, 0xFFFFFF7F, + 0xFFFFFFFF, 0x00FFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFE3F, 0xFFFFFFEF, +}; diff -purN a/drivers/gpu/drm/radeon/r300_reg_safe.h b/drivers/gpu/drm/radeon/r300_reg_safe.h --- a/drivers/gpu/drm/radeon/r300_reg_safe.h 1969-12-31 16:00:00.000000000 -0800 +++ b/drivers/gpu/drm/radeon/r300_reg_safe.h 2012-02-14 12:24:53.526045670 -0800 @@ -0,0 +1,42 @@ +static const unsigned r300_reg_safe_bm[159] = { + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF, + 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000, + 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF, + 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF, + 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0x0000C100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0003FC01, 0xFFFFFCFF, 0xFF800B19, +}; diff -purN a/drivers/gpu/drm/radeon/r420_reg_safe.h b/drivers/gpu/drm/radeon/r420_reg_safe.h --- a/drivers/gpu/drm/radeon/r420_reg_safe.h 1969-12-31 16:00:00.000000000 -0800 +++ b/drivers/gpu/drm/radeon/r420_reg_safe.h 2012-02-14 12:24:53.590045670 -0800 @@ -0,0 +1,42 @@ +static const unsigned r420_reg_safe_bm[159] = { + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF, + 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000, + 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF, + 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF, + 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0x00000100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0xFF800000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0003FC01, 0xFFFFFCFF, 0xFF800B19, +}; diff -purN a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h --- a/drivers/gpu/drm/radeon/radeon_drv.h 2012-02-14 08:38:11.787358184 -0800 +++ b/drivers/gpu/drm/radeon/radeon_drv.h 2012-02-14 10:50:15.827243366 -0800 @@ -254,7 +254,7 @@ typedef struct drm_radeon_private { /* SW interrupt */ wait_queue_head_t swi_queue; - atomic_t swi_emitted; + atomic_unchecked_t swi_emitted; int vblank_crtc; uint32_t irq_enable_reg; uint32_t r500_disp_irq_reg; diff -purN a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c --- a/drivers/gpu/drm/radeon/radeon_fence.c 2012-02-14 08:38:11.779358185 -0800 +++ b/drivers/gpu/drm/radeon/radeon_fence.c 2012-02-14 10:50:15.831243366 -0800 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); return 0; } - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq); if (!rdev->cp.ready) { /* FIXME: cp is not running assume everythings is done right * away @@ -328,7 +328,7 @@ int radeon_fence_driver_init(struct rade return r; } WREG32(rdev->fence_drv.scratch_reg, 0); - atomic_set(&rdev->fence_drv.seq, 0); + atomic_set_unchecked(&rdev->fence_drv.seq, 0); INIT_LIST_HEAD(&rdev->fence_drv.created); INIT_LIST_HEAD(&rdev->fence_drv.emited); INIT_LIST_HEAD(&rdev->fence_drv.signaled); diff -purN a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h --- a/drivers/gpu/drm/radeon/radeon.h 2012-02-14 08:38:11.779358185 -0800 +++ b/drivers/gpu/drm/radeon/radeon.h 2012-02-14 10:50:15.831243366 -0800 @@ -155,7 +155,7 @@ int radeon_pm_init(struct radeon_device */ struct radeon_fence_driver { uint32_t scratch_reg; - atomic_t seq; + atomic_unchecked_t seq; uint32_t last_seq; unsigned long count_timeout; wait_queue_head_t queue; diff -purN a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c --- a/drivers/gpu/drm/radeon/radeon_irq.c 2012-02-14 08:38:11.775358185 -0800 +++ b/drivers/gpu/drm/radeon/radeon_irq.c 2012-02-14 10:50:15.831243366 -0800 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de unsigned int ret; RING_LOCALS; - atomic_inc(&dev_priv->swi_emitted); - ret = atomic_read(&dev_priv->swi_emitted); + atomic_inc_unchecked(&dev_priv->swi_emitted); + ret = atomic_read_unchecked(&dev_priv->swi_emitted); BEGIN_RING(4); OUT_RING_REG(RADEON_LAST_SWI_REG, ret); @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; - atomic_set(&dev_priv->swi_emitted, 0); + atomic_set_unchecked(&dev_priv->swi_emitted, 0); DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); dev->max_vblank_count = 0x001fffff; diff -purN a/drivers/gpu/drm/radeon/rn50_reg_safe.h b/drivers/gpu/drm/radeon/rn50_reg_safe.h --- a/drivers/gpu/drm/radeon/rn50_reg_safe.h 1969-12-31 16:00:00.000000000 -0800 +++ b/drivers/gpu/drm/radeon/rn50_reg_safe.h 2012-02-14 12:24:53.490045671 -0800 @@ -0,0 +1,28 @@ +static const unsigned rn50_reg_safe_bm[102] = { + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF, + 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, +}; diff -purN a/drivers/gpu/drm/radeon/rs600_reg_safe.h b/drivers/gpu/drm/radeon/rs600_reg_safe.h --- a/drivers/gpu/drm/radeon/rs600_reg_safe.h 1969-12-31 16:00:00.000000000 -0800 +++ b/drivers/gpu/drm/radeon/rs600_reg_safe.h 2012-02-14 12:24:18.714045930 -0800 @@ -0,0 +1,57 @@ +static const unsigned rs600_reg_safe_bm[219] = { + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF, + 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000, + 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF, + 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF, + 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0x00000100, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0xFF800000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0003FC01, 0xFFFFFCFF, 0xFF800B19, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, +}; diff -purN a/drivers/gpu/drm/radeon/rv515_reg_safe.h b/drivers/gpu/drm/radeon/rv515_reg_safe.h --- a/drivers/gpu/drm/radeon/rv515_reg_safe.h 1969-12-31 16:00:00.000000000 -0800 +++ b/drivers/gpu/drm/radeon/rv515_reg_safe.h 2012-02-14 12:24:21.218045911 -0800 @@ -0,0 +1,57 @@ +static const unsigned rv515_reg_safe_bm[219] = { + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF, + 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000, + 0xF0000038, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x1FFFF878, 0xFFFFE000, 0xFFFFFE1E, 0xFFFFFFFF, + 0x388F8F50, 0xFFF88082, 0xFF0000FC, 0xFAE009FF, + 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0xFFFF8CFC, 0xFFFFC1FF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0003FC01, 0x3FFFFCFF, 0xFF800B19, 0xFFDFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, +}; diff -purN a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h --- a/drivers/gpu/drm/via/via_drv.h 2012-02-14 08:38:11.727358187 -0800 +++ b/drivers/gpu/drm/via/via_drv.h 2012-02-14 10:50:15.835243367 -0800 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer { typedef uint32_t maskarray_t[5]; typedef struct drm_via_irq { - atomic_t irq_received; + atomic_unchecked_t irq_received; uint32_t pending_mask; uint32_t enable_mask; wait_queue_head_t irq_queue; @@ -75,7 +75,7 @@ typedef struct drm_via_private { struct timeval last_vblank; int last_vblank_valid; unsigned usec_per_vblank; - atomic_t vbl_received; + atomic_unchecked_t vbl_received; drm_via_state_t hc_state; char pci_buf[VIA_PCI_BUF_SIZE]; const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; diff -purN a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c --- a/drivers/gpu/drm/via/via_irq.c 2012-02-14 08:38:11.727358187 -0800 +++ b/drivers/gpu/drm/via/via_irq.c 2012-02-14 10:50:15.835243367 -0800 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de if (crtc != 0) return 0; - return atomic_read(&dev_priv->vbl_received); + return atomic_read_unchecked(&dev_priv->vbl_received); } irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I status = VIA_READ(VIA_REG_INTERRUPT); if (status & VIA_IRQ_VBLANK_PENDING) { - atomic_inc(&dev_priv->vbl_received); - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { + atomic_inc_unchecked(&dev_priv->vbl_received); + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) { do_gettimeofday(&cur_vblank); if (dev_priv->last_vblank_valid) { dev_priv->usec_per_vblank = @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I dev_priv->last_vblank = cur_vblank; dev_priv->last_vblank_valid = 1; } - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) { + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) { DRM_DEBUG("US per vblank is: %u\n", dev_priv->usec_per_vblank); } @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I for (i = 0; i < dev_priv->num_irqs; ++i) { if (status & cur_irq->pending_mask) { - atomic_inc(&cur_irq->irq_received); + atomic_inc_unchecked(&cur_irq->irq_received); DRM_WAKEUP(&cur_irq->irq_queue); handled = 1; if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) { @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, ((VIA_READ(masks[irq][2]) & masks[irq][3]) == masks[irq][4])); - cur_irq_sequence = atomic_read(&cur_irq->irq_received); + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received); } else { DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, (((cur_irq_sequence = - atomic_read(&cur_irq->irq_received)) - + atomic_read_unchecked(&cur_irq->irq_received)) - *sequence) <= (1 << 23))); } *sequence = cur_irq_sequence; @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr } for (i = 0; i < dev_priv->num_irqs; ++i) { - atomic_set(&cur_irq->irq_received, 0); + atomic_set_unchecked(&cur_irq->irq_received, 0); cur_irq->enable_mask = dev_priv->irq_masks[i][0]; cur_irq->pending_mask = dev_priv->irq_masks[i][1]; DRM_INIT_WAITQUEUE(&cur_irq->irq_queue); @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { case VIA_IRQ_RELATIVE: irqwait->request.sequence += - atomic_read(&cur_irq->irq_received); + atomic_read_unchecked(&cur_irq->irq_received); irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; case VIA_IRQ_ABSOLUTE: break; diff -purN a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c --- a/drivers/hid/hid-core.c 2012-02-14 08:38:12.015358175 -0800 +++ b/drivers/hid/hid-core.c 2012-02-14 10:50:15.847243365 -0800 @@ -1758,7 +1758,7 @@ static bool hid_ignore(struct hid_device int hid_add_device(struct hid_device *hdev) { - static atomic_t id = ATOMIC_INIT(0); + static atomic_unchecked_t id = ATOMIC_INIT(0); int ret; if (WARN_ON(hdev->status & HID_STAT_ADDED)) @@ -1772,7 +1772,7 @@ int hid_add_device(struct hid_device *hd /* XXX hack, any other cleaner solution after the driver core * is converted to allow more than 20 bytes as the device name? */ dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, - hdev->vendor, hdev->product, atomic_inc_return(&id)); + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id)); ret = device_add(&hdev->dev); if (!ret) diff -purN a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c --- a/drivers/hwmon/lis3lv02d.c 2012-02-14 08:38:10.371358247 -0800 +++ b/drivers/hwmon/lis3lv02d.c 2012-02-14 10:50:15.847243365 -0800 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in * the lid is closed. This leads to interrupts as soon as a little move * is done. */ - atomic_inc(&lis3_dev.count); + atomic_inc_unchecked(&lis3_dev.count); wake_up_interruptible(&lis3_dev.misc_wait); kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in if (test_and_set_bit(0, &lis3_dev.misc_opened)) return -EBUSY; /* already open */ - atomic_set(&lis3_dev.count, 0); + atomic_set_unchecked(&lis3_dev.count, 0); /* * The sensor can generate interrupts for free-fall and direction @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc add_wait_queue(&lis3_dev.misc_wait, &wait); while (true) { set_current_state(TASK_INTERRUPTIBLE); - data = atomic_xchg(&lis3_dev.count, 0); + data = atomic_xchg_unchecked(&lis3_dev.count, 0); if (data) break; @@ -244,7 +244,7 @@ out: static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait) { poll_wait(file, &lis3_dev.misc_wait, wait); - if (atomic_read(&lis3_dev.count)) + if (atomic_read_unchecked(&lis3_dev.count)) return POLLIN | POLLRDNORM; return 0; } diff -purN a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h --- a/drivers/hwmon/lis3lv02d.h 2012-02-14 08:38:10.339358250 -0800 +++ b/drivers/hwmon/lis3lv02d.h 2012-02-14 10:50:15.847243365 -0800 @@ -201,7 +201,7 @@ struct lis3lv02d { struct input_polled_dev *idev; /* input device */ struct platform_device *pdev; /* platform device */ - atomic_t count; /* interrupt count after last read */ + atomic_unchecked_t count; /* interrupt count after last read */ int xcalib; /* calibrated null value for x */ int ycalib; /* calibrated null value for y */ int zcalib; /* calibrated null value for z */ diff -purN a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c --- a/drivers/hwmon/sht15.c 2012-02-14 08:38:10.371358247 -0800 +++ b/drivers/hwmon/sht15.c 2012-02-14 10:50:15.847243365 -0800 @@ -112,7 +112,7 @@ struct sht15_data { int supply_uV; int supply_uV_valid; struct work_struct update_supply_work; - atomic_t interrupt_handled; + atomic_unchecked_t interrupt_handled; }; /** @@ -245,13 +245,13 @@ static inline int sht15_update_single_va return ret; gpio_direction_input(data->pdata->gpio_data); - atomic_set(&data->interrupt_handled, 0); + atomic_set_unchecked(&data->interrupt_handled, 0); enable_irq(gpio_to_irq(data->pdata->gpio_data)); if (gpio_get_value(data->pdata->gpio_data) == 0) { disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); /* Only relevant if the interrupt hasn't occured. */ - if (!atomic_read(&data->interrupt_handled)) + if (!atomic_read_unchecked(&data->interrupt_handled)) schedule_work(&data->read_work); } ret = wait_event_timeout(data->wait_queue, @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired struct sht15_data *data = d; /* First disable the interrupt */ disable_irq_nosync(irq); - atomic_inc(&data->interrupt_handled); + atomic_inc_unchecked(&data->interrupt_handled); /* Then schedule a reading work struct */ if (data->flag != SHT15_READING_NOTHING) schedule_work(&data->read_work); @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo here as could have gone low in meantime so verify it hasn't! */ - atomic_set(&data->interrupt_handled, 0); + atomic_set_unchecked(&data->interrupt_handled, 0); enable_irq(gpio_to_irq(data->pdata->gpio_data)); /* If still not occured or another handler has been scheduled */ if (gpio_get_value(data->pdata->gpio_data) - || atomic_read(&data->interrupt_handled)) + || atomic_read_unchecked(&data->interrupt_handled)) return; } /* Read the data back from the device */ diff -purN a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c --- a/drivers/infiniband/core/cm.c 2012-02-14 08:38:12.047358172 -0800 +++ b/drivers/infiniband/core/cm.c 2012-02-14 10:50:15.851243365 -0800 @@ -112,7 +112,7 @@ static char const counter_group_names[CM struct cm_counter_group { struct kobject obj; - atomic_long_t counter[CM_ATTR_COUNT]; + atomic_long_unchecked_t counter[CM_ATTR_COUNT]; }; struct cm_counter_attribute { @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm struct ib_mad_send_buf *msg = NULL; int ret; - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_REQ_COUNTER]); /* Quick state check to discard duplicate REQs. */ @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm if (!cm_id_priv) return; - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_REP_COUNTER]); ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); if (ret) @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work if (cm_id_priv->id.state != IB_CM_REP_SENT && cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { spin_unlock_irq(&cm_id_priv->lock); - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_RTU_COUNTER]); goto out; } @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, dreq_msg->local_comm_id); if (!cm_id_priv) { - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); cm_issue_drep(work->port, work->mad_recv_wc); return -EINVAL; @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor case IB_CM_MRA_REP_RCVD: break; case IB_CM_TIMEWAIT: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor cm_free_msg(msg); goto deref; case IB_CM_DREQ_RCVD: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); goto unlock; default: @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work ib_modify_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg, timeout)) { if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) - atomic_long_inc(&work->port-> + atomic_long_inc_unchecked(&work->port-> counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); goto out; @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work break; case IB_CM_MRA_REQ_RCVD: case IB_CM_MRA_REP_RCVD: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); /* fall through */ default: @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work case IB_CM_LAP_IDLE: break; case IB_CM_MRA_LAP_SENT: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_LAP_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work cm_free_msg(msg); goto deref; case IB_CM_LAP_RCVD: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_LAP_COUNTER]); goto unlock; default: @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); if (cur_cm_id_priv) { spin_unlock_irq(&cm.lock); - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_SIDR_REQ_COUNTER]); goto out; /* Duplicate message. */ } @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) msg->retries = 1; - atomic_long_add(1 + msg->retries, + atomic_long_add_unchecked(1 + msg->retries, &port->counter_group[CM_XMIT].counter[attr_index]); if (msg->retries) - atomic_long_add(msg->retries, + atomic_long_add_unchecked(msg->retries, &port->counter_group[CM_XMIT_RETRIES]. counter[attr_index]); @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma } attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); - atomic_long_inc(&port->counter_group[CM_RECV]. + atomic_long_inc_unchecked(&port->counter_group[CM_RECV]. counter[attr_id - CM_ATTR_ID_OFFSET]); work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, @@ -3595,7 +3595,7 @@ static ssize_t cm_show_counter(struct ko cm_attr = container_of(attr, struct cm_counter_attribute, attr); return sprintf(buf, "%ld\n", - atomic_long_read(&group->counter[cm_attr->index])); + atomic_long_read_unchecked(&group->counter[cm_attr->index])); } static struct sysfs_ops cm_counter_ops = { diff -purN a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c --- a/drivers/infiniband/core/fmr_pool.c 2012-02-14 08:38:12.051358173 -0800 +++ b/drivers/infiniband/core/fmr_pool.c 2012-02-14 10:50:15.859243366 -0800 @@ -97,8 +97,8 @@ struct ib_fmr_pool { struct task_struct *thread; - atomic_t req_ser; - atomic_t flush_ser; + atomic_unchecked_t req_ser; + atomic_unchecked_t flush_ser; wait_queue_head_t force_wait; }; @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p struct ib_fmr_pool *pool = pool_ptr; do { - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) { + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) { ib_fmr_batch_release(pool); - atomic_inc(&pool->flush_ser); + atomic_inc_unchecked(&pool->flush_ser); wake_up_interruptible(&pool->force_wait); if (pool->flush_function) @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p } set_current_state(TASK_INTERRUPTIBLE); - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 && + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 && !kthread_should_stop()) schedule(); __set_current_state(TASK_RUNNING); @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s pool->dirty_watermark = params->dirty_watermark; pool->dirty_len = 0; spin_lock_init(&pool->pool_lock); - atomic_set(&pool->req_ser, 0); - atomic_set(&pool->flush_ser, 0); + atomic_set_unchecked(&pool->req_ser, 0); + atomic_set_unchecked(&pool->flush_ser, 0); init_waitqueue_head(&pool->force_wait); pool->thread = kthread_run(ib_fmr_cleanup_thread, @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool } spin_unlock_irq(&pool->pool_lock); - serial = atomic_inc_return(&pool->req_ser); + serial = atomic_inc_return_unchecked(&pool->req_ser); wake_up_process(pool->thread); if (wait_event_interruptible(pool->force_wait, - atomic_read(&pool->flush_ser) - serial >= 0)) + atomic_read_unchecked(&pool->flush_ser) - serial >= 0)) return -EINTR; return 0; @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr } else { list_add_tail(&fmr->list, &pool->dirty_list); if (++pool->dirty_len >= pool->dirty_watermark) { - atomic_inc(&pool->req_ser); + atomic_inc_unchecked(&pool->req_ser); wake_up_process(pool->thread); } } diff -purN a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c --- a/drivers/infiniband/hw/nes/nes.c 2012-02-14 08:38:12.051358173 -0800 +++ b/drivers/infiniband/hw/nes/nes.c 2012-02-14 10:50:15.859243366 -0800 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi LIST_HEAD(nes_adapter_list); static LIST_HEAD(nes_dev_list); -atomic_t qps_destroyed; +atomic_unchecked_t qps_destroyed; static unsigned int ee_flsh_adapter; static unsigned int sysfs_nonidx_addr; @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str struct nes_adapter *nesadapter = nesdev->nesadapter; u32 qp_id; - atomic_inc(&qps_destroyed); + atomic_inc_unchecked(&qps_destroyed); /* Free the control structures */ diff -purN a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c --- a/drivers/infiniband/hw/nes/nes_cm.c 2012-02-14 08:38:12.055358173 -0800 +++ b/drivers/infiniband/hw/nes/nes_cm.c 2012-02-14 10:50:15.863243365 -0800 @@ -69,11 +69,11 @@ u32 cm_packets_received; u32 cm_listens_created; u32 cm_listens_destroyed; u32 cm_backlog_drops; -atomic_t cm_loopbacks; -atomic_t cm_nodes_created; -atomic_t cm_nodes_destroyed; -atomic_t cm_accel_dropped_pkts; -atomic_t cm_resets_recvd; +atomic_unchecked_t cm_loopbacks; +atomic_unchecked_t cm_nodes_created; +atomic_unchecked_t cm_nodes_destroyed; +atomic_unchecked_t cm_accel_dropped_pkts; +atomic_unchecked_t cm_resets_recvd; static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *); @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = { static struct nes_cm_core *g_cm_core; -atomic_t cm_connects; -atomic_t cm_accepts; -atomic_t cm_disconnects; -atomic_t cm_closes; -atomic_t cm_connecteds; -atomic_t cm_connect_reqs; -atomic_t cm_rejects; +atomic_unchecked_t cm_connects; +atomic_unchecked_t cm_accepts; +atomic_unchecked_t cm_disconnects; +atomic_unchecked_t cm_closes; +atomic_unchecked_t cm_connecteds; +atomic_unchecked_t cm_connect_reqs; +atomic_unchecked_t cm_rejects; /** @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node( cm_node->rem_mac); add_hte_node(cm_core, cm_node); - atomic_inc(&cm_nodes_created); + atomic_inc_unchecked(&cm_nodes_created); return cm_node; } @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm } atomic_dec(&cm_core->node_cnt); - atomic_inc(&cm_nodes_destroyed); + atomic_inc_unchecked(&cm_nodes_destroyed); nesqp = cm_node->nesqp; if (nesqp) { nesqp->cm_node = NULL; @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm static void drop_packet(struct sk_buff *skb) { - atomic_inc(&cm_accel_dropped_pkts); + atomic_inc_unchecked(&cm_accel_dropped_pkts); dev_kfree_skb_any(skb); } @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm int reset = 0; /* whether to send reset in case of err.. */ int passive_state; - atomic_inc(&cm_resets_recvd); + atomic_inc_unchecked(&cm_resets_recvd); nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u." " refcnt=%d\n", cm_node, cm_node->state, atomic_read(&cm_node->ref_count)); @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne rem_ref_cm_node(cm_node->cm_core, cm_node); return NULL; } - atomic_inc(&cm_loopbacks); + atomic_inc_unchecked(&cm_loopbacks); loopbackremotenode->loopbackpartner = cm_node; loopbackremotenode->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c add_ref_cm_node(cm_node); } else if (cm_node->state == NES_CM_STATE_TSA) { rem_ref_cm_node(cm_core, cm_node); - atomic_inc(&cm_accel_dropped_pkts); + atomic_inc_unchecked(&cm_accel_dropped_pkts); dev_kfree_skb_any(skb); break; } @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne if ((cm_id) && (cm_id->event_handler)) { if (issue_disconn) { - atomic_inc(&cm_disconnects); + atomic_inc_unchecked(&cm_disconnects); cm_event.event = IW_CM_EVENT_DISCONNECT; cm_event.status = disconn_status; cm_event.local_addr = cm_id->local_addr; @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne } if (issue_close) { - atomic_inc(&cm_closes); + atomic_inc_unchecked(&cm_closes); nes_disconnect(nesqp, 1); cm_id->provider_data = nesqp; @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n", nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener); - atomic_inc(&cm_accepts); + atomic_inc_unchecked(&cm_accepts); nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", atomic_read(&nesvnic->netdev->refcnt)); @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c struct nes_cm_core *cm_core; - atomic_inc(&cm_rejects); + atomic_inc_unchecked(&cm_rejects); cm_node = (struct nes_cm_node *) cm_id->provider_data; loopback = cm_node->loopbackpartner; cm_core = cm_node->cm_core; @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, ntohl(cm_id->local_addr.sin_addr.s_addr), ntohs(cm_id->local_addr.sin_port)); - atomic_inc(&cm_connects); + atomic_inc_unchecked(&cm_connects); nesqp->active_conn = 1; /* cache the cm_id in the qp */ @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne if (nesqp->destroyed) { return; } - atomic_inc(&cm_connecteds); + atomic_inc_unchecked(&cm_connecteds); nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on" " local port 0x%04X. jiffies = %lu.\n", nesqp->hwqp.qp_id, @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm ret = cm_id->event_handler(cm_id, &cm_event); cm_id->add_ref(cm_id); - atomic_inc(&cm_closes); + atomic_inc_unchecked(&cm_closes); cm_event.event = IW_CM_EVENT_CLOSE; cm_event.status = IW_CM_EVENT_STATUS_OK; cm_event.provider_data = cm_id->provider_data; @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_ return; cm_id = cm_node->cm_id; - atomic_inc(&cm_connect_reqs); + atomic_inc_unchecked(&cm_connect_reqs); nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", cm_node, cm_id, jiffies); @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n return; cm_id = cm_node->cm_id; - atomic_inc(&cm_connect_reqs); + atomic_inc_unchecked(&cm_connect_reqs); nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", cm_node, cm_id, jiffies); diff -purN a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h --- a/drivers/infiniband/hw/nes/nes.h 2012-02-14 08:38:12.055358173 -0800 +++ b/drivers/infiniband/hw/nes/nes.h 2012-02-14 10:50:15.863243365 -0800 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level; extern unsigned int wqm_quanta; extern struct list_head nes_adapter_list; -extern atomic_t cm_connects; -extern atomic_t cm_accepts; -extern atomic_t cm_disconnects; -extern atomic_t cm_closes; -extern atomic_t cm_connecteds; -extern atomic_t cm_connect_reqs; -extern atomic_t cm_rejects; -extern atomic_t mod_qp_timouts; -extern atomic_t qps_created; -extern atomic_t qps_destroyed; -extern atomic_t sw_qps_destroyed; +extern atomic_unchecked_t cm_connects; +extern atomic_unchecked_t cm_accepts; +extern atomic_unchecked_t cm_disconnects; +extern atomic_unchecked_t cm_closes; +extern atomic_unchecked_t cm_connecteds; +extern atomic_unchecked_t cm_connect_reqs; +extern atomic_unchecked_t cm_rejects; +extern atomic_unchecked_t mod_qp_timouts; +extern atomic_unchecked_t qps_created; +extern atomic_unchecked_t qps_destroyed; +extern atomic_unchecked_t sw_qps_destroyed; extern u32 mh_detected; extern u32 mh_pauses_sent; extern u32 cm_packets_sent; @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans; extern u32 cm_listens_created; extern u32 cm_listens_destroyed; extern u32 cm_backlog_drops; -extern atomic_t cm_loopbacks; -extern atomic_t cm_nodes_created; -extern atomic_t cm_nodes_destroyed; -extern atomic_t cm_accel_dropped_pkts; -extern atomic_t cm_resets_recvd; +extern atomic_unchecked_t cm_loopbacks; +extern atomic_unchecked_t cm_nodes_created; +extern atomic_unchecked_t cm_nodes_destroyed; +extern atomic_unchecked_t cm_accel_dropped_pkts; +extern atomic_unchecked_t cm_resets_recvd; extern u32 int_mod_timer_init; extern u32 int_mod_cq_depth_256; diff -purN a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c --- a/drivers/infiniband/hw/nes/nes_nic.c 2012-02-14 08:38:12.055358173 -0800 +++ b/drivers/infiniband/hw/nes/nes_nic.c 2012-02-14 10:50:15.871243364 -0800 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats target_stat_values[++index] = mh_detected; target_stat_values[++index] = mh_pauses_sent; target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits; - target_stat_values[++index] = atomic_read(&cm_connects); - target_stat_values[++index] = atomic_read(&cm_accepts); - target_stat_values[++index] = atomic_read(&cm_disconnects); - target_stat_values[++index] = atomic_read(&cm_connecteds); - target_stat_values[++index] = atomic_read(&cm_connect_reqs); - target_stat_values[++index] = atomic_read(&cm_rejects); - target_stat_values[++index] = atomic_read(&mod_qp_timouts); - target_stat_values[++index] = atomic_read(&qps_created); - target_stat_values[++index] = atomic_read(&sw_qps_destroyed); - target_stat_values[++index] = atomic_read(&qps_destroyed); - target_stat_values[++index] = atomic_read(&cm_closes); + target_stat_values[++index] = atomic_read_unchecked(&cm_connects); + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts); + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects); + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds); + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs); + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects); + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts); + target_stat_values[++index] = atomic_read_unchecked(&qps_created); + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed); + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed); + target_stat_values[++index] = atomic_read_unchecked(&cm_closes); target_stat_values[++index] = cm_packets_sent; target_stat_values[++index] = cm_packets_bounced; target_stat_values[++index] = cm_packets_created; @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats target_stat_values[++index] = cm_listens_created; target_stat_values[++index] = cm_listens_destroyed; target_stat_values[++index] = cm_backlog_drops; - target_stat_values[++index] = atomic_read(&cm_loopbacks); - target_stat_values[++index] = atomic_read(&cm_nodes_created); - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed); - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts); - target_stat_values[++index] = atomic_read(&cm_resets_recvd); + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks); + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created); + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed); + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts); + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd); target_stat_values[++index] = int_mod_timer_init; target_stat_values[++index] = int_mod_cq_depth_1; target_stat_values[++index] = int_mod_cq_depth_4; diff -purN a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c --- a/drivers/infiniband/hw/nes/nes_verbs.c 2012-02-14 08:38:12.055358173 -0800 +++ b/drivers/infiniband/hw/nes/nes_verbs.c 2012-02-14 10:50:15.875243364 -0800 @@ -45,9 +45,9 @@ #include -atomic_t mod_qp_timouts; -atomic_t qps_created; -atomic_t sw_qps_destroyed; +atomic_unchecked_t mod_qp_timouts; +atomic_unchecked_t qps_created; +atomic_unchecked_t sw_qps_destroyed; static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev); @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc if (init_attr->create_flags) return ERR_PTR(-EINVAL); - atomic_inc(&qps_created); + atomic_inc_unchecked(&qps_created); switch (init_attr->qp_type) { case IB_QPT_RC: if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) { @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp * struct iw_cm_event cm_event; int ret; - atomic_inc(&sw_qps_destroyed); + atomic_inc_unchecked(&sw_qps_destroyed); nesqp->destroyed = 1; /* Blow away the connection if it exists. */ diff -purN a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c --- a/drivers/input/gameport/gameport.c 2012-02-14 08:38:09.419358290 -0800 +++ b/drivers/input/gameport/gameport.c 2012-02-14 10:50:15.875243364 -0800 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys); */ static void gameport_init_port(struct gameport *gameport) { - static atomic_t gameport_no = ATOMIC_INIT(0); + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0); __module_get(THIS_MODULE); mutex_init(&gameport->drv_mutex); device_initialize(&gameport->dev); - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1); + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1); gameport->dev.bus = &gameport_bus; gameport->dev.release = gameport_release_port; if (gameport->parent) diff -purN a/drivers/input/input.c b/drivers/input/input.c --- a/drivers/input/input.c 2012-02-14 08:38:09.439358289 -0800 +++ b/drivers/input/input.c 2012-02-14 10:50:15.879243364 -0800 @@ -1624,7 +1624,7 @@ EXPORT_SYMBOL(input_set_capability); */ int input_register_device(struct input_dev *dev) { - static atomic_t input_no = ATOMIC_INIT(0); + static atomic_unchecked_t input_no = ATOMIC_INIT(0); struct input_handler *handler; const char *path; int error; @@ -1651,7 +1651,7 @@ int input_register_device(struct input_d dev->setkeycode = input_default_setkeycode; dev_set_name(&dev->dev, "input%ld", - (unsigned long) atomic_inc_return(&input_no) - 1); + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1); error = device_add(&dev->dev); if (error) diff -purN a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c --- a/drivers/input/joystick/xpad.c 2012-02-14 08:38:09.439358289 -0800 +++ b/drivers/input/joystick/xpad.c 2012-02-14 10:50:15.879243364 -0800 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas static int xpad_led_probe(struct usb_xpad *xpad) { - static atomic_t led_seq = ATOMIC_INIT(0); + static atomic_unchecked_t led_seq = ATOMIC_INIT(0); long led_no; struct xpad_led *led; struct led_classdev *led_cdev; @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa if (!led) return -ENOMEM; - led_no = (long)atomic_inc_return(&led_seq) - 1; + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1; snprintf(led->name, sizeof(led->name), "xpad%ld", led_no); led->xpad = xpad; diff -purN a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c --- a/drivers/input/serio/serio.c 2012-02-14 08:38:09.419358290 -0800 +++ b/drivers/input/serio/serio.c 2012-02-14 10:50:15.883243364 -0800 @@ -527,7 +527,7 @@ static void serio_release_port(struct de */ static void serio_init_port(struct serio *serio) { - static atomic_t serio_no = ATOMIC_INIT(0); + static atomic_unchecked_t serio_no = ATOMIC_INIT(0); __module_get(THIS_MODULE); @@ -536,7 +536,7 @@ static void serio_init_port(struct serio mutex_init(&serio->drv_mutex); device_initialize(&serio->dev); dev_set_name(&serio->dev, "serio%ld", - (long)atomic_inc_return(&serio_no) - 1); + (long)atomic_inc_return_unchecked(&serio_no) - 1); serio->dev.bus = &serio_bus; serio->dev.release = serio_release_port; if (serio->parent) { diff -purN a/drivers/md/dm.c b/drivers/md/dm.c --- a/drivers/md/dm.c 2012-02-14 08:38:11.843358182 -0800 +++ b/drivers/md/dm.c 2012-02-14 10:50:15.883243364 -0800 @@ -165,9 +165,9 @@ struct mapped_device { /* * Event handling. */ - atomic_t event_nr; + atomic_unchecked_t event_nr; wait_queue_head_t eventq; - atomic_t uevent_seq; + atomic_unchecked_t uevent_seq; struct list_head uevent_list; spinlock_t uevent_lock; /* Protect access to uevent_list */ @@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(i rwlock_init(&md->map_lock); atomic_set(&md->holders, 1); atomic_set(&md->open_count, 0); - atomic_set(&md->event_nr, 0); - atomic_set(&md->uevent_seq, 0); + atomic_set_unchecked(&md->event_nr, 0); + atomic_set_unchecked(&md->uevent_seq, 0); INIT_LIST_HEAD(&md->uevent_list); spin_lock_init(&md->uevent_lock); @@ -1927,7 +1927,7 @@ static void event_callback(void *context dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); - atomic_inc(&md->event_nr); + atomic_inc_unchecked(&md->event_nr); wake_up(&md->eventq); } @@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_dev uint32_t dm_next_uevent_seq(struct mapped_device *md) { - return atomic_add_return(1, &md->uevent_seq); + return atomic_add_return_unchecked(1, &md->uevent_seq); } uint32_t dm_get_event_nr(struct mapped_device *md) { - return atomic_read(&md->event_nr); + return atomic_read_unchecked(&md->event_nr); } int dm_wait_event(struct mapped_device *md, int event_nr) { return wait_event_interruptible(md->eventq, - (event_nr != atomic_read(&md->event_nr))); + (event_nr != atomic_read_unchecked(&md->event_nr))); } void dm_uevent_add(struct mapped_device *md, struct list_head *elist) diff -purN a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c --- a/drivers/md/dm-raid1.c 2012-02-14 08:38:11.839358183 -0800 +++ b/drivers/md/dm-raid1.c 2012-02-14 10:50:15.887243364 -0800 @@ -41,7 +41,7 @@ enum dm_raid1_error { struct mirror { struct mirror_set *ms; - atomic_t error_count; + atomic_unchecked_t error_count; unsigned long error_type; struct dm_dev *dev; sector_t offset; @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m * simple way to tell if a device has encountered * errors. */ - atomic_inc(&m->error_count); + atomic_inc_unchecked(&m->error_count); if (test_and_set_bit(error_type, &m->error_type)) return; @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m } for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++) - if (!atomic_read(&new->error_count)) { + if (!atomic_read_unchecked(&new->error_count)) { set_default_mirror(new); break; } @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru struct mirror *m = get_default_mirror(ms); do { - if (likely(!atomic_read(&m->error_count))) + if (likely(!atomic_read_unchecked(&m->error_count))) return m; if (m-- == ms->mirror) @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m) { struct mirror *default_mirror = get_default_mirror(m->ms); - return !atomic_read(&default_mirror->error_count); + return !atomic_read_unchecked(&default_mirror->error_count); } static int mirror_available(struct mirror_set *ms, struct bio *bio) @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set * */ if (likely(region_in_sync(ms, region, 1))) m = choose_mirror(ms, bio->bi_sector); - else if (m && atomic_read(&m->error_count)) + else if (m && atomic_read_unchecked(&m->error_count)) m = NULL; if (likely(m)) @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set } ms->mirror[mirror].ms = ms; - atomic_set(&(ms->mirror[mirror].error_count), 0); + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0); ms->mirror[mirror].error_type = 0; ms->mirror[mirror].offset = offset; @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ */ static char device_status_char(struct mirror *m) { - if (!atomic_read(&(m->error_count))) + if (!atomic_read_unchecked(&(m->error_count))) return 'A'; return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' : diff -purN a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c --- a/drivers/md/dm-stripe.c 2012-02-14 08:38:11.823358183 -0800 +++ b/drivers/md/dm-stripe.c 2012-02-14 10:50:15.895243363 -0800 @@ -20,7 +20,7 @@ struct stripe { struct dm_dev *dev; sector_t physical_start; - atomic_t error_count; + atomic_unchecked_t error_count; }; struct stripe_c { @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target * kfree(sc); return r; } - atomic_set(&(sc->stripe[i].error_count), 0); + atomic_set_unchecked(&(sc->stripe[i].error_count), 0); } ti->private = sc; @@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe DMEMIT("%d ", sc->stripes); for (i = 0; i < sc->stripes; i++) { DMEMIT("%s ", sc->stripe[i].dev->name); - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ? + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ? 'D' : 'A'; } buffer[i] = '\0'; @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe */ for (i = 0; i < sc->stripes; i++) if (!strcmp(sc->stripe[i].dev->name, major_minor)) { - atomic_inc(&(sc->stripe[i].error_count)); - if (atomic_read(&(sc->stripe[i].error_count)) < + atomic_inc_unchecked(&(sc->stripe[i].error_count)); + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) < DM_IO_ERROR_THRESHOLD) queue_work(kstriped, &sc->kstriped_ws); } diff -purN a/drivers/md/md.c b/drivers/md/md.c --- a/drivers/md/md.c 2012-02-14 08:38:11.831358182 -0800 +++ b/drivers/md/md.c 2012-02-14 10:50:15.895243363 -0800 @@ -153,10 +153,10 @@ static int start_readonly; * start build, activate spare */ static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); -static atomic_t md_event_count; +static atomic_unchecked_t md_event_count; void md_new_event(mddev_t *mddev) { - atomic_inc(&md_event_count); + atomic_inc_unchecked(&md_event_count); wake_up(&md_event_waiters); } EXPORT_SYMBOL_GPL(md_new_event); @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event); */ static void md_new_event_inintr(mddev_t *mddev) { - atomic_inc(&md_event_count); + atomic_inc_unchecked(&md_event_count); wake_up(&md_event_waiters); } @@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev rdev->preferred_minor = 0xffff; rdev->data_offset = le64_to_cpu(sb->data_offset); - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; @@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, else sb->resync_offset = cpu_to_le64(0); - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors)); sb->raid_disks = cpu_to_le32(mddev->raid_disks); sb->size = cpu_to_le64(mddev->dev_sectors); @@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho static ssize_t errors_show(mdk_rdev_t *rdev, char *page) { - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors)); } static ssize_t @@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const cha char *e; unsigned long n = simple_strtoul(buf, &e, 10); if (*buf && (*e == 0 || *e == '\n')) { - atomic_set(&rdev->corrected_errors, n); + atomic_set_unchecked(&rdev->corrected_errors, n); return len; } return -EINVAL; @@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_ rdev->data_offset = 0; rdev->sb_events = 0; atomic_set(&rdev->nr_pending, 0); - atomic_set(&rdev->read_errors, 0); - atomic_set(&rdev->corrected_errors, 0); + atomic_set_unchecked(&rdev->read_errors, 0); + atomic_set_unchecked(&rdev->corrected_errors, 0); size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; if (!size) { @@ -5962,7 +5962,7 @@ static int md_seq_show(struct seq_file * spin_unlock(&pers_lock); seq_printf(seq, "\n"); - mi->event = atomic_read(&md_event_count); + mi->event = atomic_read_unchecked(&md_event_count); return 0; } if (v == (void*)2) { @@ -6085,7 +6085,7 @@ static int md_seq_open(struct inode *ino else { struct seq_file *p = file->private_data; p->private = mi; - mi->event = atomic_read(&md_event_count); + mi->event = atomic_read_unchecked(&md_event_count); } return error; } @@ -6101,7 +6101,7 @@ static unsigned int mdstat_poll(struct f /* always allow read */ mask = POLLIN | POLLRDNORM; - if (mi->event != atomic_read(&md_event_count)) + if (mi->event != atomic_read_unchecked(&md_event_count)) mask |= POLLERR | POLLPRI; return mask; } @@ -6145,7 +6145,7 @@ static int is_mddev_idle(mddev_t *mddev, struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + (int)part_stat_read(&disk->part0, sectors[1]) - - atomic_read(&disk->sync_io); + atomic_read_unchecked(&disk->sync_io); /* sync IO will cause sync_io to increase before the disk_stats * as sync_io is counted when a request starts, and * disk_stats is counted when it completes. diff -purN a/drivers/md/md.h b/drivers/md/md.h --- a/drivers/md/md.h 2012-02-14 08:38:11.827358182 -0800 +++ b/drivers/md/md.h 2012-02-14 10:50:15.899243363 -0800 @@ -94,10 +94,10 @@ struct mdk_rdev_s * only maintained for arrays that * support hot removal */ - atomic_t read_errors; /* number of consecutive read errors that + atomic_unchecked_t read_errors; /* number of consecutive read errors that * we have tried to ignore. */ - atomic_t corrected_errors; /* number of corrected read errors, + atomic_unchecked_t corrected_errors; /* number of corrected read errors, * for reporting to userspace and storing * in superblock. */ @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) { - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); } struct mdk_personality diff -purN a/drivers/md/raid10.c b/drivers/md/raid10.c --- a/drivers/md/raid10.c 2012-02-14 08:38:11.827358182 -0800 +++ b/drivers/md/raid10.c 2012-02-14 10:50:15.899243363 -0800 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi if (test_bit(BIO_UPTODATE, &bio->bi_flags)) set_bit(R10BIO_Uptodate, &r10_bio->state); else { - atomic_add(r10_bio->sectors, + atomic_add_unchecked(r10_bio->sectors, &conf->mirrors[d].rdev->corrected_errors); if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) md_error(r10_bio->mddev, @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, test_bit(In_sync, &rdev->flags)) { atomic_inc(&rdev->nr_pending); rcu_read_unlock(); - atomic_add(s, &rdev->corrected_errors); + atomic_add_unchecked(s, &rdev->corrected_errors); if (sync_page_io(rdev->bdev, r10_bio->devs[sl].addr + sect + rdev->data_offset, diff -purN a/drivers/md/raid1.c b/drivers/md/raid1.c --- a/drivers/md/raid1.c 2012-02-14 08:38:11.835358183 -0800 +++ b/drivers/md/raid1.c 2012-02-14 10:50:15.899243363 -0800 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t * if (r1_bio->bios[d]->bi_end_io != end_sync_read) continue; rdev = conf->mirrors[d].rdev; - atomic_add(s, &rdev->corrected_errors); + atomic_add_unchecked(s, &rdev->corrected_errors); if (sync_page_io(rdev->bdev, sect + rdev->data_offset, s<<9, @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, /* Well, this device is dead */ md_error(mddev, rdev); else { - atomic_add(s, &rdev->corrected_errors); + atomic_add_unchecked(s, &rdev->corrected_errors); printk(KERN_INFO "raid1:%s: read error corrected " "(%d sectors at %llu on %s)\n", diff -purN a/drivers/md/raid5.c b/drivers/md/raid5.c --- a/drivers/md/raid5.c 2012-02-14 08:38:11.835358183 -0800 +++ b/drivers/md/raid5.c 2012-02-14 10:50:15.907243363 -0800 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea bi->bi_next = NULL; if ((rw & WRITE) && test_bit(R5_ReWrite, &sh->dev[i].flags)) - atomic_add(STRIPE_SECTORS, + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors); generic_make_request(bi); } else { @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); } - if (atomic_read(&conf->disks[i].rdev->read_errors)) - atomic_set(&conf->disks[i].rdev->read_errors, 0); + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors)) + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0); } else { const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); int retry = 0; rdev = conf->disks[i].rdev; clear_bit(R5_UPTODATE, &sh->dev[i].flags); - atomic_inc(&rdev->read_errors); + atomic_inc_unchecked(&rdev->read_errors); if (conf->mddev->degraded >= conf->max_degraded) printk_rl(KERN_WARNING "raid5:%s: read error not correctable " @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc (unsigned long long)(sh->sector + rdev->data_offset), bdn); - else if (atomic_read(&rdev->read_errors) + else if (atomic_read_unchecked(&rdev->read_errors) > conf->max_nr_stripes) printk(KERN_WARNING "raid5:%s: Too many read errors, failing device %s.\n", diff -purN a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c --- a/drivers/media/video/cx18/cx18-driver.c 2012-02-14 08:38:10.131358259 -0800 +++ b/drivers/media/video/cx18/cx18-driver.c 2012-02-14 10:50:15.907243363 -0800 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl MODULE_DEVICE_TABLE(pci, cx18_pci_tbl); -static atomic_t cx18_instance = ATOMIC_INIT(0); +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0); /* Parameter declarations */ static int cardtype[CX18_MAX_CARDS]; @@ -800,7 +800,7 @@ static int __devinit cx18_probe(struct p struct cx18 *cx; /* FIXME - module parameter arrays constrain max instances */ - i = atomic_inc_return(&cx18_instance) - 1; + i = atomic_inc_return_unchecked(&cx18_instance) - 1; if (i >= CX18_MAX_CARDS) { printk(KERN_ERR "cx18: cannot manage card %d, driver has a " "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1); diff -purN a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c --- a/drivers/media/video/ivtv/ivtv-driver.c 2012-02-14 08:38:10.055358262 -0800 +++ b/drivers/media/video/ivtv/ivtv-driver.c 2012-02-14 10:50:15.915243362 -0800 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl); /* ivtv instance counter */ -static atomic_t ivtv_instance = ATOMIC_INIT(0); +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0); /* Parameter declarations */ static int cardtype[IVTV_MAX_CARDS]; diff -purN a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c --- a/drivers/media/video/omap24xxcam.c 2012-02-14 08:38:10.087358261 -0800 +++ b/drivers/media/video/omap24xxcam.c 2012-02-14 10:50:15.915243362 -0800 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags); do_gettimeofday(&vb->ts); - vb->field_count = atomic_add_return(2, &fh->field_count); + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count); if (csr & csr_error) { vb->state = VIDEOBUF_ERROR; if (!atomic_read(&fh->cam->in_reset)) { diff -purN a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h --- a/drivers/media/video/omap24xxcam.h 2012-02-14 08:38:10.023358263 -0800 +++ b/drivers/media/video/omap24xxcam.h 2012-02-14 10:50:15.915243362 -0800 @@ -533,7 +533,7 @@ struct omap24xxcam_fh { spinlock_t vbq_lock; /* spinlock for the videobuf queue */ struct videobuf_queue vbq; struct v4l2_pix_format pix; /* serialise pix by vbq->lock */ - atomic_t field_count; /* field counter for videobuf_buffer */ + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */ /* accessing cam here doesn't need serialisation: it's constant */ struct omap24xxcam_device *cam; }; diff -purN a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c --- a/drivers/media/video/v4l2-device.c 2012-02-14 08:38:10.115358259 -0800 +++ b/drivers/media/video/v4l2-device.c 2012-02-14 10:50:15.919243362 -0800 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device * EXPORT_SYMBOL_GPL(v4l2_device_register); int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename, - atomic_t *instance) + atomic_unchecked_t *instance) { - int num = atomic_inc_return(instance) - 1; + int num = atomic_inc_return_unchecked(instance) - 1; int len = strlen(basename); if (basename[len - 1] >= '0' && basename[len - 1] <= '9') diff -purN a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c --- a/drivers/message/i2o/iop.c 2012-02-14 08:38:09.919358268 -0800 +++ b/drivers/message/i2o/iop.c 2012-02-14 10:50:15.919243362 -0800 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro spin_lock_irqsave(&c->context_list_lock, flags); - if (unlikely(atomic_inc_and_test(&c->context_list_counter))) - atomic_inc(&c->context_list_counter); + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter))) + atomic_inc_unchecked(&c->context_list_counter); - entry->context = atomic_read(&c->context_list_counter); + entry->context = atomic_read_unchecked(&c->context_list_counter); list_add(&entry->list, &c->context_list); @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi #if BITS_PER_LONG == 64 spin_lock_init(&c->context_list_lock); - atomic_set(&c->context_list_counter, 0); + atomic_set_unchecked(&c->context_list_counter, 0); INIT_LIST_HEAD(&c->context_list); #endif diff -purN a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c --- a/drivers/misc/sgi-gru/gruhandles.c 2012-02-14 08:38:08.811358319 -0800 +++ b/drivers/misc/sgi-gru/gruhandles.c 2012-02-14 10:50:15.919243362 -0800 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic static void update_mcs_stats(enum mcs_op op, unsigned long clks) { - atomic_long_inc(&mcs_op_statistics[op].count); - atomic_long_add(clks, &mcs_op_statistics[op].total); + atomic_long_inc_unchecked(&mcs_op_statistics[op].count); + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total); if (mcs_op_statistics[op].max < clks) mcs_op_statistics[op].max = clks; } diff -purN a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c --- a/drivers/misc/sgi-gru/gruprocfs.c 2012-02-14 08:38:08.803358318 -0800 +++ b/drivers/misc/sgi-gru/gruprocfs.c 2012-02-14 10:50:15.923243363 -0800 @@ -32,9 +32,9 @@ #define printstat(s, f) printstat_val(s, &gru_stats.f, #f) -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id) +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id) { - unsigned long val = atomic_long_read(v); + unsigned long val = atomic_long_read_unchecked(v); if (val) seq_printf(s, "%16lu %s\n", val, id); @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"}; for (op = 0; op < mcsop_last; op++) { - count = atomic_long_read(&mcs_op_statistics[op].count); - total = atomic_long_read(&mcs_op_statistics[op].total); + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count); + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total); max = mcs_op_statistics[op].max; seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count, count ? total / count : 0, max); diff -purN a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h --- a/drivers/misc/sgi-gru/grutables.h 2012-02-14 08:38:08.803358318 -0800 +++ b/drivers/misc/sgi-gru/grutables.h 2012-02-14 10:50:15.923243363 -0800 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids; * GRU statistics. */ struct gru_stats_s { - atomic_long_t vdata_alloc; - atomic_long_t vdata_free; - atomic_long_t gts_alloc; - atomic_long_t gts_free; - atomic_long_t vdata_double_alloc; - atomic_long_t gts_double_allocate; - atomic_long_t assign_context; - atomic_long_t assign_context_failed; - atomic_long_t free_context; - atomic_long_t load_user_context; - atomic_long_t load_kernel_context; - atomic_long_t lock_kernel_context; - atomic_long_t unlock_kernel_context; - atomic_long_t steal_user_context; - atomic_long_t steal_kernel_context; - atomic_long_t steal_context_failed; - atomic_long_t nopfn; - atomic_long_t break_cow; - atomic_long_t asid_new; - atomic_long_t asid_next; - atomic_long_t asid_wrap; - atomic_long_t asid_reuse; - atomic_long_t intr; - atomic_long_t intr_mm_lock_failed; - atomic_long_t call_os; - atomic_long_t call_os_offnode_reference; - atomic_long_t call_os_check_for_bug; - atomic_long_t call_os_wait_queue; - atomic_long_t user_flush_tlb; - atomic_long_t user_unload_context; - atomic_long_t user_exception; - atomic_long_t set_context_option; - atomic_long_t migrate_check; - atomic_long_t migrated_retarget; - atomic_long_t migrated_unload; - atomic_long_t migrated_unload_delay; - atomic_long_t migrated_nopfn_retarget; - atomic_long_t migrated_nopfn_unload; - atomic_long_t tlb_dropin; - atomic_long_t tlb_dropin_fail_no_asid; - atomic_long_t tlb_dropin_fail_upm; - atomic_long_t tlb_dropin_fail_invalid; - atomic_long_t tlb_dropin_fail_range_active; - atomic_long_t tlb_dropin_fail_idle; - atomic_long_t tlb_dropin_fail_fmm; - atomic_long_t tlb_dropin_fail_no_exception; - atomic_long_t tlb_dropin_fail_no_exception_war; - atomic_long_t tfh_stale_on_fault; - atomic_long_t mmu_invalidate_range; - atomic_long_t mmu_invalidate_page; - atomic_long_t mmu_clear_flush_young; - atomic_long_t flush_tlb; - atomic_long_t flush_tlb_gru; - atomic_long_t flush_tlb_gru_tgh; - atomic_long_t flush_tlb_gru_zero_asid; - - atomic_long_t copy_gpa; - - atomic_long_t mesq_receive; - atomic_long_t mesq_receive_none; - atomic_long_t mesq_send; - atomic_long_t mesq_send_failed; - atomic_long_t mesq_noop; - atomic_long_t mesq_send_unexpected_error; - atomic_long_t mesq_send_lb_overflow; - atomic_long_t mesq_send_qlimit_reached; - atomic_long_t mesq_send_amo_nacked; - atomic_long_t mesq_send_put_nacked; - atomic_long_t mesq_qf_not_full; - atomic_long_t mesq_qf_locked; - atomic_long_t mesq_qf_noop_not_full; - atomic_long_t mesq_qf_switch_head_failed; - atomic_long_t mesq_qf_unexpected_error; - atomic_long_t mesq_noop_unexpected_error; - atomic_long_t mesq_noop_lb_overflow; - atomic_long_t mesq_noop_qlimit_reached; - atomic_long_t mesq_noop_amo_nacked; - atomic_long_t mesq_noop_put_nacked; + atomic_long_unchecked_t vdata_alloc; + atomic_long_unchecked_t vdata_free; + atomic_long_unchecked_t gts_alloc; + atomic_long_unchecked_t gts_free; + atomic_long_unchecked_t vdata_double_alloc; + atomic_long_unchecked_t gts_double_allocate; + atomic_long_unchecked_t assign_context; + atomic_long_unchecked_t assign_context_failed; + atomic_long_unchecked_t free_context; + atomic_long_unchecked_t load_user_context; + atomic_long_unchecked_t load_kernel_context; + atomic_long_unchecked_t lock_kernel_context; + atomic_long_unchecked_t unlock_kernel_context; + atomic_long_unchecked_t steal_user_context; + atomic_long_unchecked_t steal_kernel_context; + atomic_long_unchecked_t steal_context_failed; + atomic_long_unchecked_t nopfn; + atomic_long_unchecked_t break_cow; + atomic_long_unchecked_t asid_new; + atomic_long_unchecked_t asid_next; + atomic_long_unchecked_t asid_wrap; + atomic_long_unchecked_t asid_reuse; + atomic_long_unchecked_t intr; + atomic_long_unchecked_t intr_mm_lock_failed; + atomic_long_unchecked_t call_os; + atomic_long_unchecked_t call_os_offnode_reference; + atomic_long_unchecked_t call_os_check_for_bug; + atomic_long_unchecked_t call_os_wait_queue; + atomic_long_unchecked_t user_flush_tlb; + atomic_long_unchecked_t user_unload_context; + atomic_long_unchecked_t user_exception; + atomic_long_unchecked_t set_context_option; + atomic_long_unchecked_t migrate_check; + atomic_long_unchecked_t migrated_retarget; + atomic_long_unchecked_t migrated_unload; + atomic_long_unchecked_t migrated_unload_delay; + atomic_long_unchecked_t migrated_nopfn_retarget; + atomic_long_unchecked_t migrated_nopfn_unload; + atomic_long_unchecked_t tlb_dropin; + atomic_long_unchecked_t tlb_dropin_fail_no_asid; + atomic_long_unchecked_t tlb_dropin_fail_upm; + atomic_long_unchecked_t tlb_dropin_fail_invalid; + atomic_long_unchecked_t tlb_dropin_fail_range_active; + atomic_long_unchecked_t tlb_dropin_fail_idle; + atomic_long_unchecked_t tlb_dropin_fail_fmm; + atomic_long_unchecked_t tlb_dropin_fail_no_exception; + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war; + atomic_long_unchecked_t tfh_stale_on_fault; + atomic_long_unchecked_t mmu_invalidate_range; + atomic_long_unchecked_t mmu_invalidate_page; + atomic_long_unchecked_t mmu_clear_flush_young; + atomic_long_unchecked_t flush_tlb; + atomic_long_unchecked_t flush_tlb_gru; + atomic_long_unchecked_t flush_tlb_gru_tgh; + atomic_long_unchecked_t flush_tlb_gru_zero_asid; + + atomic_long_unchecked_t copy_gpa; + + atomic_long_unchecked_t mesq_receive; + atomic_long_unchecked_t mesq_receive_none; + atomic_long_unchecked_t mesq_send; + atomic_long_unchecked_t mesq_send_failed; + atomic_long_unchecked_t mesq_noop; + atomic_long_unchecked_t mesq_send_unexpected_error; + atomic_long_unchecked_t mesq_send_lb_overflow; + atomic_long_unchecked_t mesq_send_qlimit_reached; + atomic_long_unchecked_t mesq_send_amo_nacked; + atomic_long_unchecked_t mesq_send_put_nacked; + atomic_long_unchecked_t mesq_qf_not_full; + atomic_long_unchecked_t mesq_qf_locked; + atomic_long_unchecked_t mesq_qf_noop_not_full; + atomic_long_unchecked_t mesq_qf_switch_head_failed; + atomic_long_unchecked_t mesq_qf_unexpected_error; + atomic_long_unchecked_t mesq_noop_unexpected_error; + atomic_long_unchecked_t mesq_noop_lb_overflow; + atomic_long_unchecked_t mesq_noop_qlimit_reached; + atomic_long_unchecked_t mesq_noop_amo_nacked; + atomic_long_unchecked_t mesq_noop_put_nacked; }; @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start cchop_deallocate, tghop_invalidate, mcsop_last}; struct mcs_op_statistic { - atomic_long_t count; - atomic_long_t total; + atomic_long_unchecked_t count; + atomic_long_unchecked_t total; unsigned long max; }; @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st #define STAT(id) do { \ if (gru_options & OPT_STATS) \ - atomic_long_inc(&gru_stats.id); \ + atomic_long_inc_unchecked(&gru_stats.id); \ } while (0) #ifdef CONFIG_SGI_GRU_DEBUG diff -purN a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c --- a/drivers/oprofile/buffer_sync.c 2012-02-14 08:38:09.863358270 -0800 +++ b/drivers/oprofile/buffer_sync.c 2012-02-14 10:50:15.927243363 -0800 @@ -342,7 +342,7 @@ static void add_data(struct op_entry *en if (cookie == NO_COOKIE) offset = pc; if (cookie == INVALID_COOKIE) { - atomic_inc(&oprofile_stats.sample_lost_no_mapping); + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); offset = pc; } if (cookie != last_cookie) { @@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct /* add userspace sample */ if (!mm) { - atomic_inc(&oprofile_stats.sample_lost_no_mm); + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm); return 0; } cookie = lookup_dcookie(mm, s->eip, &offset); if (cookie == INVALID_COOKIE) { - atomic_inc(&oprofile_stats.sample_lost_no_mapping); + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); return 0; } @@ -562,7 +562,7 @@ void sync_buffer(int cpu) /* ignore backtraces if failed to add a sample */ if (state == sb_bt_start) { state = sb_bt_ignore; - atomic_inc(&oprofile_stats.bt_lost_no_mapping); + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping); } } release_mm(mm); diff -purN a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c --- a/drivers/oprofile/event_buffer.c 2012-02-14 08:38:09.863358270 -0800 +++ b/drivers/oprofile/event_buffer.c 2012-02-14 10:50:15.927243363 -0800 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value } if (buffer_pos == buffer_size) { - atomic_inc(&oprofile_stats.event_lost_overflow); + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow); return; } diff -purN a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c --- a/drivers/oprofile/oprof.c 2012-02-14 08:38:09.863358270 -0800 +++ b/drivers/oprofile/oprof.c 2012-02-14 10:50:15.927243363 -0800 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st if (oprofile_ops.switch_events()) return; - atomic_inc(&oprofile_stats.multiplex_counter); + atomic_inc_unchecked(&oprofile_stats.multiplex_counter); start_switch_worker(); } diff -purN a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c --- a/drivers/oprofile/oprofilefs.c 2012-02-14 08:38:09.863358270 -0800 +++ b/drivers/oprofile/oprofilefs.c 2012-02-14 10:50:15.935243361 -0800 @@ -187,7 +187,7 @@ static const struct file_operations atom int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root, - char const *name, atomic_t *val) + char const *name, atomic_unchecked_t *val) { struct dentry *d = __oprofilefs_create_file(sb, root, name, &atomic_ro_fops, 0444); diff -purN a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c --- a/drivers/oprofile/oprofile_stats.c 2012-02-14 08:38:09.863358270 -0800 +++ b/drivers/oprofile/oprofile_stats.c 2012-02-14 10:50:15.939243361 -0800 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void) cpu_buf->sample_invalid_eip = 0; } - atomic_set(&oprofile_stats.sample_lost_no_mm, 0); - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); - atomic_set(&oprofile_stats.event_lost_overflow, 0); - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); - atomic_set(&oprofile_stats.multiplex_counter, 0); + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0); + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0); + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0); + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0); + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0); } diff -purN a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h --- a/drivers/oprofile/oprofile_stats.h 2012-02-14 08:38:09.863358270 -0800 +++ b/drivers/oprofile/oprofile_stats.h 2012-02-14 10:50:15.939243361 -0800 @@ -13,11 +13,11 @@ #include struct oprofile_stat_struct { - atomic_t sample_lost_no_mm; - atomic_t sample_lost_no_mapping; - atomic_t bt_lost_no_mapping; - atomic_t event_lost_overflow; - atomic_t multiplex_counter; + atomic_unchecked_t sample_lost_no_mm; + atomic_unchecked_t sample_lost_no_mapping; + atomic_unchecked_t bt_lost_no_mapping; + atomic_unchecked_t event_lost_overflow; + atomic_unchecked_t multiplex_counter; }; extern struct oprofile_stat_struct oprofile_stats; diff -purN a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c --- a/drivers/s390/cio/qdio_perf.c 2012-02-14 08:38:09.811358274 -0800 +++ b/drivers/s390/cio/qdio_perf.c 2012-02-14 10:50:15.939243361 -0800 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_ static int qdio_perf_proc_show(struct seq_file *m, void *v) { seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n", - (long)atomic_long_read(&perf_stats.qdio_int)); + (long)atomic_long_read_unchecked(&perf_stats.qdio_int)); seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n", - (long)atomic_long_read(&perf_stats.pci_int)); + (long)atomic_long_read_unchecked(&perf_stats.pci_int)); seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n", - (long)atomic_long_read(&perf_stats.thin_int)); + (long)atomic_long_read_unchecked(&perf_stats.thin_int)); seq_printf(m, "\n"); seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n", - (long)atomic_long_read(&perf_stats.tasklet_inbound)); + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound)); seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n", - (long)atomic_long_read(&perf_stats.tasklet_outbound)); + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound)); seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n", - (long)atomic_long_read(&perf_stats.tasklet_thinint), - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop)); + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint), + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop)); seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n", - (long)atomic_long_read(&perf_stats.thinint_inbound), - (long)atomic_long_read(&perf_stats.thinint_inbound_loop)); + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound), + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop)); seq_printf(m, "\n"); seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n", - (long)atomic_long_read(&perf_stats.siga_in)); + (long)atomic_long_read_unchecked(&perf_stats.siga_in)); seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n", - (long)atomic_long_read(&perf_stats.siga_out)); + (long)atomic_long_read_unchecked(&perf_stats.siga_out)); seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n", - (long)atomic_long_read(&perf_stats.siga_sync)); + (long)atomic_long_read_unchecked(&perf_stats.siga_sync)); seq_printf(m, "\n"); seq_printf(m, "Number of inbound transfers\t\t\t: %li\n", - (long)atomic_long_read(&perf_stats.inbound_handler)); + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler)); seq_printf(m, "Number of outbound transfers\t\t\t: %li\n", - (long)atomic_long_read(&perf_stats.outbound_handler)); + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler)); seq_printf(m, "\n"); seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n", - (long)atomic_long_read(&perf_stats.fast_requeue)); + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue)); seq_printf(m, "Number of outbound target full condition\t: %li\n", - (long)atomic_long_read(&perf_stats.outbound_target_full)); + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full)); seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", - (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer)); seq_printf(m, "Number of stop polling calls\t\t\t: %li\n", - (long)atomic_long_read(&perf_stats.debug_stop_polling)); + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling)); seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n", - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2)); + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2)); seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n", - (long)atomic_long_read(&perf_stats.debug_eqbs_all), - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete)); + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all), + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete)); seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n", - (long)atomic_long_read(&perf_stats.debug_sqbs_all), - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete)); + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all), + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete)); seq_printf(m, "\n"); return 0; } diff -purN a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h --- a/drivers/s390/cio/qdio_perf.h 2012-02-14 08:38:09.811358274 -0800 +++ b/drivers/s390/cio/qdio_perf.h 2012-02-14 10:50:15.939243361 -0800 @@ -13,46 +13,46 @@ struct qdio_perf_stats { /* interrupt handler calls */ - atomic_long_t qdio_int; - atomic_long_t pci_int; - atomic_long_t thin_int; + atomic_long_unchecked_t qdio_int; + atomic_long_unchecked_t pci_int; + atomic_long_unchecked_t thin_int; /* tasklet runs */ - atomic_long_t tasklet_inbound; - atomic_long_t tasklet_outbound; - atomic_long_t tasklet_thinint; - atomic_long_t tasklet_thinint_loop; - atomic_long_t thinint_inbound; - atomic_long_t thinint_inbound_loop; - atomic_long_t thinint_inbound_loop2; + atomic_long_unchecked_t tasklet_inbound; + atomic_long_unchecked_t tasklet_outbound; + atomic_long_unchecked_t tasklet_thinint; + atomic_long_unchecked_t tasklet_thinint_loop; + atomic_long_unchecked_t thinint_inbound; + atomic_long_unchecked_t thinint_inbound_loop; + atomic_long_unchecked_t thinint_inbound_loop2; /* signal adapter calls */ - atomic_long_t siga_out; - atomic_long_t siga_in; - atomic_long_t siga_sync; + atomic_long_unchecked_t siga_out; + atomic_long_unchecked_t siga_in; + atomic_long_unchecked_t siga_sync; /* misc */ - atomic_long_t inbound_handler; - atomic_long_t outbound_handler; - atomic_long_t fast_requeue; - atomic_long_t outbound_target_full; + atomic_long_unchecked_t inbound_handler; + atomic_long_unchecked_t outbound_handler; + atomic_long_unchecked_t fast_requeue; + atomic_long_unchecked_t outbound_target_full; /* for debugging */ - atomic_long_t debug_tl_out_timer; - atomic_long_t debug_stop_polling; - atomic_long_t debug_eqbs_all; - atomic_long_t debug_eqbs_incomplete; - atomic_long_t debug_sqbs_all; - atomic_long_t debug_sqbs_incomplete; + atomic_long_unchecked_t debug_tl_out_timer; + atomic_long_unchecked_t debug_stop_polling; + atomic_long_unchecked_t debug_eqbs_all; + atomic_long_unchecked_t debug_eqbs_incomplete; + atomic_long_unchecked_t debug_sqbs_all; + atomic_long_unchecked_t debug_sqbs_incomplete; }; extern struct qdio_perf_stats perf_stats; extern int qdio_performance_stats; -static inline void qdio_perf_stat_inc(atomic_long_t *count) +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count) { if (qdio_performance_stats) - atomic_long_inc(count); + atomic_long_inc_unchecked(count); } int qdio_setup_perf_stats(void); diff -purN a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c --- a/drivers/scsi/hosts.c 2012-02-14 08:38:09.263358298 -0800 +++ b/drivers/scsi/hosts.c 2012-02-14 10:50:15.939243361 -0800 @@ -40,7 +40,7 @@ #include "scsi_logging.h" -static atomic_t scsi_host_next_hn; /* host_no for next new host */ +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */ static void scsi_host_cls_release(struct device *dev) @@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct * subtract one because we increment first then return, but we need to * know what the next host number was before increment */ - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1; + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1; shost->dma_channel = 0xff; /* These three are default values which can be overridden */ diff -purN a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c --- a/drivers/scsi/libfc/fc_exch.c 2012-02-14 08:38:09.183358302 -0800 +++ b/drivers/scsi/libfc/fc_exch.c 2012-02-14 10:50:15.943243361 -0800 @@ -86,12 +86,12 @@ struct fc_exch_mgr { * all together if not used XXX */ struct { - atomic_t no_free_exch; - atomic_t no_free_exch_xid; - atomic_t xid_not_found; - atomic_t xid_busy; - atomic_t seq_not_found; - atomic_t non_bls_resp; + atomic_unchecked_t no_free_exch; + atomic_unchecked_t no_free_exch_xid; + atomic_unchecked_t xid_not_found; + atomic_unchecked_t xid_busy; + atomic_unchecked_t seq_not_found; + atomic_unchecked_t non_bls_resp; } stats; }; #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc( /* allocate memory for exchange */ ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); if (!ep) { - atomic_inc(&mp->stats.no_free_exch); + atomic_inc_unchecked(&mp->stats.no_free_exch); goto out; } memset(ep, 0, sizeof(*ep)); @@ -557,7 +557,7 @@ out: return ep; err: spin_unlock_bh(&pool->lock); - atomic_inc(&mp->stats.no_free_exch_xid); + atomic_inc_unchecked(&mp->stats.no_free_exch_xid); mempool_free(ep, mp->ep_pool); return NULL; } @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look xid = ntohs(fh->fh_ox_id); /* we originated exch */ ep = fc_exch_find(mp, xid); if (!ep) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); reject = FC_RJT_OX_ID; goto out; } @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look ep = fc_exch_find(mp, xid); if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) { if (ep) { - atomic_inc(&mp->stats.xid_busy); + atomic_inc_unchecked(&mp->stats.xid_busy); reject = FC_RJT_RX_ID; goto rel; } @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look } xid = ep->xid; /* get our XID */ } else if (!ep) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); reject = FC_RJT_RX_ID; /* XID not found */ goto out; } @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look } else { sp = &ep->seq; if (sp->id != fh->fh_seq_id) { - atomic_inc(&mp->stats.seq_not_found); + atomic_inc_unchecked(&mp->stats.seq_not_found); reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */ goto rel; } @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); if (!ep) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); goto out; } if (ep->esb_stat & ESB_ST_COMPLETE) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); goto out; } if (ep->rxid == FC_XID_UNKNOWN) ep->rxid = ntohs(fh->fh_rx_id); if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); goto rel; } if (ep->did != ntoh24(fh->fh_s_id) && ep->did != FC_FID_FLOGI) { - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); goto rel; } sof = fr_sof(fp); @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct } else { sp = &ep->seq; if (sp->id != fh->fh_seq_id) { - atomic_inc(&mp->stats.seq_not_found); + atomic_inc_unchecked(&mp->stats.seq_not_found); goto rel; } } @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_ sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ if (!sp) - atomic_inc(&mp->stats.xid_not_found); + atomic_inc_unchecked(&mp->stats.xid_not_found); else - atomic_inc(&mp->stats.non_bls_resp); + atomic_inc_unchecked(&mp->stats.non_bls_resp); fc_frame_free(fp); } diff -purN a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c --- a/drivers/scsi/lpfc/lpfc_debugfs.c 2012-02-14 08:38:09.139358304 -0800 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c 2012-02-14 10:50:15.943243361 -0800 @@ -124,7 +124,7 @@ struct lpfc_debug { int len; }; -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); static unsigned long lpfc_debugfs_start_time = 0L; /** @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v lpfc_debugfs_enable = 0; len = 0; - index = (atomic_read(&vport->disc_trc_cnt) + 1) & + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) & (lpfc_debugfs_max_disc_trc - 1); for (i = index; i < lpfc_debugfs_max_disc_trc; i++) { dtp = vport->disc_trc + i; @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l lpfc_debugfs_enable = 0; len = 0; - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) & + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) & (lpfc_debugfs_max_slow_ring_trc - 1); for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) { dtp = phba->slow_ring_trc + i; @@ -634,14 +634,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport !vport || !vport->disc_trc) return; - index = atomic_inc_return(&vport->disc_trc_cnt) & + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) & (lpfc_debugfs_max_disc_trc - 1); dtp = vport->disc_trc + index; dtp->fmt = fmt; dtp->data1 = data1; dtp->data2 = data2; dtp->data3 = data3; - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt); dtp->jif = jiffies; #endif return; @@ -672,14 +672,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h !phba || !phba->slow_ring_trc) return; - index = atomic_inc_return(&phba->slow_ring_trc_cnt) & + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) & (lpfc_debugfs_max_slow_ring_trc - 1); dtp = phba->slow_ring_trc + index; dtp->fmt = fmt; dtp->data1 = data1; dtp->data2 = data2; dtp->data3 = data3; - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt); + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt); dtp->jif = jiffies; #endif return; @@ -1364,7 +1364,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor "slow_ring buffer\n"); goto debug_failed; } - atomic_set(&phba->slow_ring_trc_cnt, 0); + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0); memset(phba->slow_ring_trc, 0, (sizeof(struct lpfc_debugfs_trc) * lpfc_debugfs_max_slow_ring_trc)); @@ -1410,7 +1410,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor "buffer\n"); goto debug_failed; } - atomic_set(&vport->disc_trc_cnt, 0); + atomic_set_unchecked(&vport->disc_trc_cnt, 0); snprintf(name, sizeof(name), "discovery_trace"); vport->debug_disc_trc = diff -purN a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h --- a/drivers/scsi/lpfc/lpfc.h 2012-02-14 08:38:09.147358303 -0800 +++ b/drivers/scsi/lpfc/lpfc.h 2012-02-14 10:50:15.943243361 -0800 @@ -400,7 +400,7 @@ struct lpfc_vport { struct dentry *debug_nodelist; struct dentry *vport_debugfs_root; struct lpfc_debugfs_trc *disc_trc; - atomic_t disc_trc_cnt; + atomic_unchecked_t disc_trc_cnt; #endif uint8_t stat_data_enabled; uint8_t stat_data_blocked; @@ -725,8 +725,8 @@ struct lpfc_hba { struct timer_list fabric_block_timer; unsigned long bit_flags; #define FABRIC_COMANDS_BLOCKED 0 - atomic_t num_rsrc_err; - atomic_t num_cmd_success; + atomic_unchecked_t num_rsrc_err; + atomic_unchecked_t num_cmd_success; unsigned long last_rsrc_error_time; unsigned long last_ramp_down_time; unsigned long last_ramp_up_time; @@ -740,7 +740,7 @@ struct lpfc_hba { struct dentry *debug_dumpDif; /* BlockGuard BPL*/ struct dentry *debug_slow_ring_trc; struct lpfc_debugfs_trc *slow_ring_trc; - atomic_t slow_ring_trc_cnt; + atomic_unchecked_t slow_ring_trc_cnt; #endif /* Used for deferred freeing of ELS data buffers */ diff -purN a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c --- a/drivers/scsi/lpfc/lpfc_scsi.c 2012-02-14 08:38:09.147358303 -0800 +++ b/drivers/scsi/lpfc/lpfc_scsi.c 2012-02-14 10:50:15.951243361 -0800 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb uint32_t evt_posted; spin_lock_irqsave(&phba->hbalock, flags); - atomic_inc(&phba->num_rsrc_err); + atomic_inc_unchecked(&phba->num_rsrc_err); phba->last_rsrc_error_time = jiffies; if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) { @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor unsigned long flags; struct lpfc_hba *phba = vport->phba; uint32_t evt_posted; - atomic_inc(&phba->num_cmd_success); + atomic_inc_unchecked(&phba->num_cmd_success); if (vport->cfg_lun_queue_depth <= queue_depth) return; @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc int i; struct lpfc_rport_data *rdata; - num_rsrc_err = atomic_read(&phba->num_rsrc_err); - num_cmd_success = atomic_read(&phba->num_cmd_success); + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err); + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc } } lpfc_destroy_vport_work_array(phba, vports); - atomic_set(&phba->num_rsrc_err, 0); - atomic_set(&phba->num_cmd_success, 0); + atomic_set_unchecked(&phba->num_rsrc_err, 0); + atomic_set_unchecked(&phba->num_cmd_success, 0); } /** @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h } } lpfc_destroy_vport_work_array(phba, vports); - atomic_set(&phba->num_rsrc_err, 0); - atomic_set(&phba->num_cmd_success, 0); + atomic_set_unchecked(&phba->num_rsrc_err, 0); + atomic_set_unchecked(&phba->num_cmd_success, 0); } /** diff -purN a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c --- a/drivers/scsi/pmcraid.c 2012-02-14 08:38:09.115358304 -0800 +++ b/drivers/scsi/pmcraid.c 2012-02-14 10:50:15.951243361 -0800 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc res->scsi_dev = scsi_dev; scsi_dev->hostdata = res; res->change_detected = 0; - atomic_set(&res->read_failures, 0); - atomic_set(&res->write_failures, 0); + atomic_set_unchecked(&res->read_failures, 0); + atomic_set_unchecked(&res->write_failures, 0); rc = 0; } spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct /* If this was a SCSI read/write command keep count of errors */ if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD) - atomic_inc(&res->read_failures); + atomic_inc_unchecked(&res->read_failures); else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD) - atomic_inc(&res->write_failures); + atomic_inc_unchecked(&res->write_failures); if (!RES_IS_GSCSI(res->cfg_entry) && masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) { @@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(stru pinstance = container_of(workp, struct pmcraid_instance, worker_q); /* add resources only after host is added into system */ - if (!atomic_read(&pinstance->expose_resources)) + if (!atomic_read_unchecked(&pinstance->expose_resources)) return; spin_lock_irqsave(&pinstance->resource_lock, lock_flags); @@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instan init_waitqueue_head(&pinstance->reset_wait_q); atomic_set(&pinstance->outstanding_cmds, 0); - atomic_set(&pinstance->expose_resources, 0); + atomic_set_unchecked(&pinstance->expose_resources, 0); INIT_LIST_HEAD(&pinstance->free_res_q); INIT_LIST_HEAD(&pinstance->used_res_q); @@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe( /* Schedule worker thread to handle CCN and take care of adding and * removing devices to OS */ - atomic_set(&pinstance->expose_resources, 1); + atomic_set_unchecked(&pinstance->expose_resources, 1); schedule_work(&pinstance->worker_q); return rc; diff -purN a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h --- a/drivers/scsi/pmcraid.h 2012-02-14 08:38:09.263358298 -0800 +++ b/drivers/scsi/pmcraid.h 2012-02-14 10:50:15.955243361 -0800 @@ -690,7 +690,7 @@ struct pmcraid_instance { atomic_t outstanding_cmds; /* should add/delete resources to mid-layer now ?*/ - atomic_t expose_resources; + atomic_unchecked_t expose_resources; /* Tasklet to handle deferred processing */ struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS]; @@ -727,8 +727,8 @@ struct pmcraid_resource_entry { struct list_head queue; /* link to "to be exposed" resources */ struct pmcraid_config_table_entry cfg_entry; struct scsi_device *scsi_dev; /* Link scsi_device structure */ - atomic_t read_failures; /* count of failed READ commands */ - atomic_t write_failures; /* count of failed WRITE commands */ + atomic_unchecked_t read_failures; /* count of failed READ commands */ + atomic_unchecked_t write_failures; /* count of failed WRITE commands */ /* To indicate add/delete/modify during CCN */ u8 change_detected; diff -purN a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h --- a/drivers/scsi/qla4xxx/ql4_def.h 2012-02-14 08:38:09.227358299 -0800 +++ b/drivers/scsi/qla4xxx/ql4_def.h 2012-02-14 10:50:15.963243360 -0800 @@ -240,7 +240,7 @@ struct ddb_entry { atomic_t retry_relogin_timer; /* Min Time between relogins * (4000 only) */ atomic_t relogin_timer; /* Max Time to wait for relogin to complete */ - atomic_t relogin_retry_count; /* Num of times relogin has been + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been * retried */ uint16_t port; diff -purN a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c --- a/drivers/scsi/qla4xxx/ql4_init.c 2012-02-14 08:38:09.227358299 -0800 +++ b/drivers/scsi/qla4xxx/ql4_init.c 2012-02-14 10:50:15.963243360 -0800 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count); atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); atomic_set(&ddb_entry->relogin_timer, 0); - atomic_set(&ddb_entry->relogin_retry_count, 0); + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0); atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); list_add_tail(&ddb_entry->list, &ha->ddb_list); ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry; @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count); - atomic_set(&ddb_entry->relogin_retry_count, 0); + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0); atomic_set(&ddb_entry->relogin_timer, 0); clear_bit(DF_RELOGIN, &ddb_entry->flags); clear_bit(DF_NO_RELOGIN, &ddb_entry->flags); diff -purN a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c --- a/drivers/scsi/qla4xxx/ql4_os.c 2012-02-14 08:38:09.227358299 -0800 +++ b/drivers/scsi/qla4xxx/ql4_os.c 2012-02-14 10:50:15.967243360 -0800 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED) { /* Reset retry relogin timer */ - atomic_inc(&ddb_entry->relogin_retry_count); + atomic_inc_unchecked(&ddb_entry->relogin_retry_count); DEBUG2(printk("scsi%ld: index[%d] relogin" " timed out-retrying" " relogin (%d)\n", ha->host_no, ddb_entry->fw_ddb_index, - atomic_read(&ddb_entry-> + atomic_read_unchecked(&ddb_entry-> relogin_retry_count)) ); start_dpc++; diff -purN a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c --- a/drivers/scsi/scsi.c 2012-02-14 08:38:09.259358299 -0800 +++ b/drivers/scsi/scsi.c 2012-02-14 10:50:15.967243360 -0800 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd * unsigned long timeout; int rtn = 0; - atomic_inc(&cmd->device->iorequest_cnt); + atomic_inc_unchecked(&cmd->device->iorequest_cnt); /* check if the device is still usable */ if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { diff -purN a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c --- a/drivers/scsi/scsi_lib.c 2012-02-14 08:38:09.111358304 -0800 +++ b/drivers/scsi/scsi_lib.c 2012-02-14 10:50:15.971243360 -0800 @@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req shost = sdev->host; scsi_init_cmd_errh(cmd); cmd->result = DID_NO_CONNECT << 16; - atomic_inc(&cmd->device->iorequest_cnt); + atomic_inc_unchecked(&cmd->device->iorequest_cnt); /* * SCSI request completion path will do scsi_device_unbusy(), @@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req */ cmd->serial_number = 0; - atomic_inc(&cmd->device->iodone_cnt); + atomic_inc_unchecked(&cmd->device->iodone_cnt); if (cmd->result) - atomic_inc(&cmd->device->ioerr_cnt); + atomic_inc_unchecked(&cmd->device->ioerr_cnt); disposition = scsi_decide_disposition(cmd); if (disposition != SUCCESS && diff -purN a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c --- a/drivers/scsi/scsi_sysfs.c 2012-02-14 08:38:09.087358306 -0800 +++ b/drivers/scsi/scsi_sysfs.c 2012-02-14 10:50:15.971243360 -0800 @@ -667,7 +667,7 @@ show_iostat_##field(struct device *dev, char *buf) \ { \ struct scsi_device *sdev = to_scsi_device(dev); \ - unsigned long long count = atomic_read(&sdev->field); \ + unsigned long long count = atomic_read_unchecked(&sdev->field); \ return snprintf(buf, 20, "0x%llx\n", count); \ } \ static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL) diff -purN a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c --- a/drivers/scsi/scsi_transport_fc.c 2012-02-14 08:38:09.027358309 -0800 +++ b/drivers/scsi/scsi_transport_fc.c 2012-02-14 10:50:15.971243360 -0800 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo, * Netlink Infrastructure */ -static atomic_t fc_event_seq; +static atomic_unchecked_t fc_event_seq; /** * fc_get_event_number - Obtain the next sequential FC event number @@ -493,7 +493,7 @@ static atomic_t fc_event_seq; u32 fc_get_event_number(void) { - return atomic_add_return(1, &fc_event_seq); + return atomic_add_return_unchecked(1, &fc_event_seq); } EXPORT_SYMBOL(fc_get_event_number); @@ -641,7 +641,7 @@ static __init int fc_transport_init(void { int error; - atomic_set(&fc_event_seq, 0); + atomic_set_unchecked(&fc_event_seq, 0); error = transport_class_register(&fc_host_class); if (error) diff -purN a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c --- a/drivers/scsi/scsi_transport_iscsi.c 2012-02-14 08:38:09.003358311 -0800 +++ b/drivers/scsi/scsi_transport_iscsi.c 2012-02-14 10:50:15.979243360 -0800 @@ -81,7 +81,7 @@ struct iscsi_internal { struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1]; }; -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */ static struct workqueue_struct *iscsi_eh_timer_workq; /* @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s int err; ihost = shost->shost_data; - session->sid = atomic_add_return(1, &iscsi_session_nr); + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr); if (id == ISCSI_MAX_TARGET) { for (id = 0; id < ISCSI_MAX_TARGET; id++) { @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v printk(KERN_INFO "Loading iSCSI transport class v%s.\n", ISCSI_TRANSPORT_VERSION); - atomic_set(&iscsi_session_nr, 0); + atomic_set_unchecked(&iscsi_session_nr, 0); err = class_register(&iscsi_transport_class); if (err) diff -purN a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c --- a/drivers/scsi/scsi_transport_srp.c 2012-02-14 08:38:09.211358300 -0800 +++ b/drivers/scsi/scsi_transport_srp.c 2012-02-14 10:50:15.979243360 -0800 @@ -33,7 +33,7 @@ #include "scsi_transport_srp_internal.h" struct srp_host_attrs { - atomic_t next_port_id; + atomic_unchecked_t next_port_id; }; #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data) @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo struct Scsi_Host *shost = dev_to_shost(dev); struct srp_host_attrs *srp_host = to_srp_host_attrs(shost); - atomic_set(&srp_host->next_port_id, 0); + atomic_set_unchecked(&srp_host->next_port_id, 0); return 0; } @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id)); rport->roles = ids->roles; - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id); + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id); dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id); transport_setup_device(&rport->dev); diff -purN a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c --- a/drivers/staging/dst/dcore.c 2012-02-14 08:38:10.707358233 -0800 +++ b/drivers/staging/dst/dcore.c 2012-02-14 10:50:15.979243360 -0800 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s n->size = ctl->size; atomic_set(&n->refcnt, 1); - atomic_long_set(&n->gen, 0); + atomic_long_set_unchecked(&n->gen, 0); snprintf(n->name, sizeof(n->name), "%s", ctl->name); err = dst_node_sysfs_init(n); diff -purN a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c --- a/drivers/staging/dst/trans.c 2012-02-14 08:38:10.707358233 -0800 +++ b/drivers/staging/dst/trans.c 2012-02-14 10:50:15.979243360 -0800 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, t->error = 0; t->retries = 0; atomic_set(&t->refcnt, 1); - t->gen = atomic_long_inc_return(&n->gen); + t->gen = atomic_long_inc_return_unchecked(&n->gen); t->enc = bio_data_dir(bio); dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen); diff -purN a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c --- a/drivers/staging/et131x/et1310_tx.c 2012-02-14 08:38:10.887358224 -0800 +++ b/drivers/staging/et131x/et1310_tx.c 2012-02-14 10:50:15.983243359 -0800 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru struct net_device_stats *stats = &etdev->net_stats; if (pMpTcb->Flags & fMP_DEST_BROAD) - atomic_inc(&etdev->Stats.brdcstxmt); + atomic_inc_unchecked(&etdev->Stats.brdcstxmt); else if (pMpTcb->Flags & fMP_DEST_MULTI) - atomic_inc(&etdev->Stats.multixmt); + atomic_inc_unchecked(&etdev->Stats.multixmt); else - atomic_inc(&etdev->Stats.unixmt); + atomic_inc_unchecked(&etdev->Stats.unixmt); if (pMpTcb->Packet) { stats->tx_bytes += pMpTcb->Packet->len; diff -purN a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h --- a/drivers/staging/et131x/et131x_adapter.h 2012-02-14 08:38:10.887358224 -0800 +++ b/drivers/staging/et131x/et131x_adapter.h 2012-02-14 10:50:15.987243359 -0800 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t { * operations */ u32 unircv; /* # multicast packets received */ - atomic_t unixmt; /* # multicast packets for Tx */ + atomic_unchecked_t unixmt; /* # multicast packets for Tx */ u32 multircv; /* # multicast packets received */ - atomic_t multixmt; /* # multicast packets for Tx */ + atomic_unchecked_t multixmt; /* # multicast packets for Tx */ u32 brdcstrcv; /* # broadcast packets received */ - atomic_t brdcstxmt; /* # broadcast packets for Tx */ + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */ u32 norcvbuf; /* # Rx packets discarded */ u32 noxmtbuf; /* # Tx packets discarded */ diff -purN a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c --- a/drivers/staging/hv/Channel.c 2012-02-14 08:38:10.715358232 -0800 +++ b/drivers/staging/hv/Channel.c 2012-02-14 10:50:15.987243359 -0800 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm DPRINT_ENTER(VMBUS); - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle); - atomic_inc(&gVmbusConnection.NextGpadlHandle); + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle); + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle); VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount); ASSERT(msgInfo != NULL); diff -purN a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c --- a/drivers/staging/hv/vmbus_drv.c 2012-02-14 08:38:10.719358232 -0800 +++ b/drivers/staging/hv/vmbus_drv.c 2012-02-14 10:50:15.987243359 -0800 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(s to_device_context(root_device_obj); struct device_context *child_device_ctx = to_device_context(child_device_obj); - static atomic_t device_num = ATOMIC_INIT(0); + static atomic_unchecked_t device_num = ATOMIC_INIT(0); DPRINT_ENTER(VMBUS_DRV); @@ -541,7 +541,7 @@ static int vmbus_child_device_register(s /* Set the device name. Otherwise, device_register() will fail. */ dev_set_name(&child_device_ctx->device, "vmbus_0_%d", - atomic_inc_return(&device_num)); + atomic_inc_return_unchecked(&device_num)); /* The new device belongs to this bus */ child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */ diff -purN a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h --- a/drivers/staging/hv/VmbusPrivate.h 2012-02-14 08:38:10.715358232 -0800 +++ b/drivers/staging/hv/VmbusPrivate.h 2012-02-14 10:50:15.995243359 -0800 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE { struct VMBUS_CONNECTION { enum VMBUS_CONNECT_STATE ConnectState; - atomic_t NextGpadlHandle; + atomic_unchecked_t NextGpadlHandle; /* * Represents channel interrupts. Each bit position represents a diff -purN a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c --- a/drivers/staging/octeon/ethernet.c 2012-02-14 08:38:10.667358234 -0800 +++ b/drivers/staging/octeon/ethernet.c 2012-02-14 10:50:15.995243359 -0800 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_ * since the RX tasklet also increments it. */ #ifdef CONFIG_64BIT - atomic64_add(rx_status.dropped_packets, - (atomic64_t *)&priv->stats.rx_dropped); + atomic64_add_unchecked(rx_status.dropped_packets, + (atomic64_unchecked_t *)&priv->stats.rx_dropped); #else - atomic_add(rx_status.dropped_packets, - (atomic_t *)&priv->stats.rx_dropped); + atomic_add_unchecked(rx_status.dropped_packets, + (atomic_unchecked_t *)&priv->stats.rx_dropped); #endif } diff -purN a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c --- a/drivers/staging/octeon/ethernet-rx.c 2012-02-14 08:38:10.671358235 -0800 +++ b/drivers/staging/octeon/ethernet-rx.c 2012-02-14 10:50:15.999243359 -0800 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un /* Increment RX stats for virtual ports */ if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) { #ifdef CONFIG_64BIT - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets); - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes); + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets); + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes); #else - atomic_add(1, (atomic_t *)&priv->stats.rx_packets); - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes); + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets); + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes); #endif } netif_receive_skb(skb); @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un dev->name); */ #ifdef CONFIG_64BIT - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped); #else - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped); + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped); #endif dev_kfree_skb_irq(skb); } diff -purN a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c --- a/drivers/staging/pohmelfs/inode.c 2012-02-14 08:38:10.927358222 -0800 +++ b/drivers/staging/pohmelfs/inode.c 2012-02-14 10:50:15.999243359 -0800 @@ -1840,7 +1840,7 @@ static int pohmelfs_fill_super(struct su mutex_init(&psb->mcache_lock); psb->mcache_root = RB_ROOT; psb->mcache_timeout = msecs_to_jiffies(5000); - atomic_long_set(&psb->mcache_gen, 0); + atomic_long_set_unchecked(&psb->mcache_gen, 0); psb->trans_max_pages = 100; @@ -1855,7 +1855,7 @@ static int pohmelfs_fill_super(struct su INIT_LIST_HEAD(&psb->crypto_ready_list); INIT_LIST_HEAD(&psb->crypto_active_list); - atomic_set(&psb->trans_gen, 1); + atomic_set_unchecked(&psb->trans_gen, 1); atomic_long_set(&psb->total_inodes, 0); mutex_init(&psb->state_lock); diff -purN a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c --- a/drivers/staging/pohmelfs/mcache.c 2012-02-14 08:38:10.923358223 -0800 +++ b/drivers/staging/pohmelfs/mcache.c 2012-02-14 10:50:15.999243359 -0800 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_ m->data = data; m->start = start; m->size = size; - m->gen = atomic_long_inc_return(&psb->mcache_gen); + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen); mutex_lock(&psb->mcache_lock); err = pohmelfs_mcache_insert(psb, m); diff -purN a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h --- a/drivers/staging/pohmelfs/netfs.h 2012-02-14 08:38:10.927358222 -0800 +++ b/drivers/staging/pohmelfs/netfs.h 2012-02-14 10:50:15.999243359 -0800 @@ -570,14 +570,14 @@ struct pohmelfs_config; struct pohmelfs_sb { struct rb_root mcache_root; struct mutex mcache_lock; - atomic_long_t mcache_gen; + atomic_long_unchecked_t mcache_gen; unsigned long mcache_timeout; unsigned int idx; unsigned int trans_retries; - atomic_t trans_gen; + atomic_unchecked_t trans_gen; unsigned int crypto_attached_size; unsigned int crypto_align_size; diff -purN a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c --- a/drivers/staging/pohmelfs/trans.c 2012-02-14 08:38:10.923358223 -0800 +++ b/drivers/staging/pohmelfs/trans.c 2012-02-14 10:50:16.007243358 -0800 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran int err; struct netfs_cmd *cmd = t->iovec.iov_base; - t->gen = atomic_inc_return(&psb->trans_gen); + t->gen = atomic_inc_return_unchecked(&psb->trans_gen); cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) + t->attached_size + t->attached_pages * sizeof(struct netfs_cmd); diff -purN a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h --- a/drivers/staging/usbip/vhci.h 2012-02-14 08:38:10.723358233 -0800 +++ b/drivers/staging/usbip/vhci.h 2012-02-14 10:50:16.007243358 -0800 @@ -92,7 +92,7 @@ struct vhci_hcd { unsigned resuming:1; unsigned long re_timeout; - atomic_t seqnum; + atomic_unchecked_t seqnum; /* * NOTE: diff -purN a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c --- a/drivers/staging/usbip/vhci_hcd.c 2012-02-14 08:38:10.723358233 -0800 +++ b/drivers/staging/usbip/vhci_hcd.c 2012-02-14 10:50:16.007243358 -0800 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb) return; } - priv->seqnum = atomic_inc_return(&the_controller->seqnum); + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum); if (priv->seqnum == 0xffff) usbip_uinfo("seqnum max\n"); @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h return -ENOMEM; } - unlink->seqnum = atomic_inc_return(&the_controller->seqnum); + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum); if (unlink->seqnum == 0xffff) usbip_uinfo("seqnum max\n"); @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc vdev->rhport = rhport; } - atomic_set(&vhci->seqnum, 0); + atomic_set_unchecked(&vhci->seqnum, 0); spin_lock_init(&vhci->lock); diff -purN a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c --- a/drivers/staging/usbip/vhci_rx.c 2012-02-14 08:38:10.723358233 -0800 +++ b/drivers/staging/usbip/vhci_rx.c 2012-02-14 10:50:16.007243358 -0800 @@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct usbip_uerr("cannot find a urb of seqnum %u\n", pdu->base.seqnum); usbip_uinfo("max seqnum %d\n", - atomic_read(&the_controller->seqnum)); + atomic_read_unchecked(&the_controller->seqnum)); usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return; } diff -purN a/drivers/uio/uio.c b/drivers/uio/uio.c --- a/drivers/uio/uio.c 2012-02-14 08:38:09.611358282 -0800 +++ b/drivers/uio/uio.c 2012-02-14 10:50:16.011243358 -0800 @@ -30,7 +30,7 @@ struct uio_device { struct module *owner; struct device *dev; int minor; - atomic_t event; + atomic_unchecked_t event; struct fasync_struct *async_queue; wait_queue_head_t wait; int vma_count; @@ -255,7 +255,7 @@ static ssize_t show_event(struct device struct uio_device *idev = dev_get_drvdata(dev); if (idev) return sprintf(buf, "%u\n", - (unsigned int)atomic_read(&idev->event)); + (unsigned int)atomic_read_unchecked(&idev->event)); else return -ENODEV; } @@ -424,7 +424,7 @@ void uio_event_notify(struct uio_info *i { struct uio_device *idev = info->uio_dev; - atomic_inc(&idev->event); + atomic_inc_unchecked(&idev->event); wake_up_interruptible(&idev->wait); kill_fasync(&idev->async_queue, SIGIO, POLL_IN); } @@ -477,7 +477,7 @@ static int uio_open(struct inode *inode, } listener->dev = idev; - listener->event_count = atomic_read(&idev->event); + listener->event_count = atomic_read_unchecked(&idev->event); filep->private_data = listener; if (idev->info->open) { @@ -528,7 +528,7 @@ static unsigned int uio_poll(struct file return -EIO; poll_wait(filep, &idev->wait, wait); - if (listener->event_count != atomic_read(&idev->event)) + if (listener->event_count != atomic_read_unchecked(&idev->event)) return POLLIN | POLLRDNORM; return 0; } @@ -553,7 +553,7 @@ static ssize_t uio_read(struct file *fil do { set_current_state(TASK_INTERRUPTIBLE); - event_count = atomic_read(&idev->event); + event_count = atomic_read_unchecked(&idev->event); if (event_count != listener->event_count) { if (copy_to_user(buf, &event_count, count)) retval = -EFAULT; @@ -840,7 +840,7 @@ int __uio_register_device(struct module idev->owner = owner; idev->info = info; init_waitqueue_head(&idev->wait); - atomic_set(&idev->event, 0); + atomic_set_unchecked(&idev->event, 0); ret = uio_get_minor(idev); if (ret) diff -purN a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c --- a/drivers/usb/atm/usbatm.c 2012-02-14 08:38:09.475358288 -0800 +++ b/drivers/usb/atm/usbatm.c 2012-02-14 10:50:16.011243358 -0800 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru if (printk_ratelimit()) atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n", __func__, vpi, vci); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); return; } @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru if (length > ATM_MAX_AAL5_PDU) { atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n", __func__, length, vcc); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto out; } @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru if (sarb->len < pdu_length) { atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n", __func__, pdu_length, sarb->len, vcc); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto out; } if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) { atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", __func__, vcc); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto out; } @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru if (printk_ratelimit()) atm_err(instance, "%s: no memory for skb (length: %u)!\n", __func__, length); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); goto out; } @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); out: skb_trim(sarb, 0); } @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc; usbatm_pop(vcc, skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); skb = skb_dequeue(&instance->sndqueue); } @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a if (!left--) return sprintf(page, "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n", - atomic_read(&atm_dev->stats.aal5.tx), - atomic_read(&atm_dev->stats.aal5.tx_err), - atomic_read(&atm_dev->stats.aal5.rx), - atomic_read(&atm_dev->stats.aal5.rx_err), - atomic_read(&atm_dev->stats.aal5.rx_drop)); + atomic_read_unchecked(&atm_dev->stats.aal5.tx), + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err), + atomic_read_unchecked(&atm_dev->stats.aal5.rx), + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err), + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop)); if (!left--) { if (instance->disconnected) diff -purN a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h --- a/drivers/usb/wusbcore/wa-hc.h 2012-02-14 08:38:09.507358287 -0800 +++ b/drivers/usb/wusbcore/wa-hc.h 2012-02-14 10:50:16.019243358 -0800 @@ -192,7 +192,7 @@ struct wahc { struct list_head xfer_delayed_list; spinlock_t xfer_list_lock; struct work_struct xfer_work; - atomic_t xfer_id_count; + atomic_unchecked_t xfer_id_count; }; @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc * INIT_LIST_HEAD(&wa->xfer_delayed_list); spin_lock_init(&wa->xfer_list_lock); INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run); - atomic_set(&wa->xfer_id_count, 1); + atomic_set_unchecked(&wa->xfer_id_count, 1); } /** diff -purN a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c --- a/drivers/usb/wusbcore/wa-xfer.c 2012-02-14 08:38:09.507358287 -0800 +++ b/drivers/usb/wusbcore/wa-xfer.c 2012-02-14 10:50:16.019243358 -0800 @@ -293,7 +293,7 @@ out: */ static void wa_xfer_id_init(struct wa_xfer *xfer) { - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count); } /* diff -purN a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h --- a/fs/cachefiles/internal.h 2012-02-14 08:38:12.851358136 -0800 +++ b/fs/cachefiles/internal.h 2012-02-14 10:50:16.019243358 -0800 @@ -56,7 +56,7 @@ struct cachefiles_cache { wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */ struct rb_root active_nodes; /* active nodes (can't be culled) */ rwlock_t active_lock; /* lock for active_nodes */ - atomic_t gravecounter; /* graveyard uniquifier */ + atomic_unchecked_t gravecounter; /* graveyard uniquifier */ unsigned frun_percent; /* when to stop culling (% files) */ unsigned fcull_percent; /* when to start culling (% files) */ unsigned fstop_percent; /* when to stop allocating (% files) */ @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc * proc.c */ #ifdef CONFIG_CACHEFILES_HISTOGRAM -extern atomic_t cachefiles_lookup_histogram[HZ]; -extern atomic_t cachefiles_mkdir_histogram[HZ]; -extern atomic_t cachefiles_create_histogram[HZ]; +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ]; +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ]; +extern atomic_unchecked_t cachefiles_create_histogram[HZ]; extern int __init cachefiles_proc_init(void); extern void cachefiles_proc_cleanup(void); static inline -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif) +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif) { unsigned long jif = jiffies - start_jif; if (jif >= HZ) jif = HZ - 1; - atomic_inc(&histogram[jif]); + atomic_inc_unchecked(&histogram[jif]); } #else diff -purN a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c --- a/fs/cachefiles/namei.c 2012-02-14 08:38:12.851358136 -0800 +++ b/fs/cachefiles/namei.c 2012-02-14 10:50:16.023243358 -0800 @@ -250,7 +250,7 @@ try_again: /* first step is to make up a grave dentry in the graveyard */ sprintf(nbuffer, "%08x%08x", (uint32_t) get_seconds(), - (uint32_t) atomic_inc_return(&cache->gravecounter)); + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter)); /* do the multiway lock magic */ trap = lock_rename(cache->graveyard, dir); diff -purN a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c --- a/fs/cachefiles/proc.c 2012-02-14 08:38:12.847358136 -0800 +++ b/fs/cachefiles/proc.c 2012-02-14 10:50:16.023243358 -0800 @@ -14,9 +14,9 @@ #include #include "internal.h" -atomic_t cachefiles_lookup_histogram[HZ]; -atomic_t cachefiles_mkdir_histogram[HZ]; -atomic_t cachefiles_create_histogram[HZ]; +atomic_unchecked_t cachefiles_lookup_histogram[HZ]; +atomic_unchecked_t cachefiles_mkdir_histogram[HZ]; +atomic_unchecked_t cachefiles_create_histogram[HZ]; /* * display the latency histogram @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str return 0; default: index = (unsigned long) v - 3; - x = atomic_read(&cachefiles_lookup_histogram[index]); - y = atomic_read(&cachefiles_mkdir_histogram[index]); - z = atomic_read(&cachefiles_create_histogram[index]); + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]); + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]); + z = atomic_read_unchecked(&cachefiles_create_histogram[index]); if (x == 0 && y == 0 && z == 0) return 0; diff -purN a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c --- a/fs/cifs/cifs_debug.c 2012-02-14 08:38:12.951358133 -0800 +++ b/fs/cifs/cifs_debug.c 2012-02-14 10:50:16.027243357 -0800 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str tcon = list_entry(tmp3, struct cifsTconInfo, tcon_list); - atomic_set(&tcon->num_smbs_sent, 0); - atomic_set(&tcon->num_writes, 0); - atomic_set(&tcon->num_reads, 0); - atomic_set(&tcon->num_oplock_brks, 0); - atomic_set(&tcon->num_opens, 0); - atomic_set(&tcon->num_posixopens, 0); - atomic_set(&tcon->num_posixmkdirs, 0); - atomic_set(&tcon->num_closes, 0); - atomic_set(&tcon->num_deletes, 0); - atomic_set(&tcon->num_mkdirs, 0); - atomic_set(&tcon->num_rmdirs, 0); - atomic_set(&tcon->num_renames, 0); - atomic_set(&tcon->num_t2renames, 0); - atomic_set(&tcon->num_ffirst, 0); - atomic_set(&tcon->num_fnext, 0); - atomic_set(&tcon->num_fclose, 0); - atomic_set(&tcon->num_hardlinks, 0); - atomic_set(&tcon->num_symlinks, 0); - atomic_set(&tcon->num_locks, 0); + atomic_set_unchecked(&tcon->num_smbs_sent, 0); + atomic_set_unchecked(&tcon->num_writes, 0); + atomic_set_unchecked(&tcon->num_reads, 0); + atomic_set_unchecked(&tcon->num_oplock_brks, 0); + atomic_set_unchecked(&tcon->num_opens, 0); + atomic_set_unchecked(&tcon->num_posixopens, 0); + atomic_set_unchecked(&tcon->num_posixmkdirs, 0); + atomic_set_unchecked(&tcon->num_closes, 0); + atomic_set_unchecked(&tcon->num_deletes, 0); + atomic_set_unchecked(&tcon->num_mkdirs, 0); + atomic_set_unchecked(&tcon->num_rmdirs, 0); + atomic_set_unchecked(&tcon->num_renames, 0); + atomic_set_unchecked(&tcon->num_t2renames, 0); + atomic_set_unchecked(&tcon->num_ffirst, 0); + atomic_set_unchecked(&tcon->num_fnext, 0); + atomic_set_unchecked(&tcon->num_fclose, 0); + atomic_set_unchecked(&tcon->num_hardlinks, 0); + atomic_set_unchecked(&tcon->num_symlinks, 0); + atomic_set_unchecked(&tcon->num_locks, 0); } } } @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s if (tcon->need_reconnect) seq_puts(m, "\tDISCONNECTED "); seq_printf(m, "\nSMBs: %d Oplock Breaks: %d", - atomic_read(&tcon->num_smbs_sent), - atomic_read(&tcon->num_oplock_brks)); + atomic_read_unchecked(&tcon->num_smbs_sent), + atomic_read_unchecked(&tcon->num_oplock_brks)); seq_printf(m, "\nReads: %d Bytes: %lld", - atomic_read(&tcon->num_reads), + atomic_read_unchecked(&tcon->num_reads), (long long)(tcon->bytes_read)); seq_printf(m, "\nWrites: %d Bytes: %lld", - atomic_read(&tcon->num_writes), + atomic_read_unchecked(&tcon->num_writes), (long long)(tcon->bytes_written)); seq_printf(m, "\nFlushes: %d", - atomic_read(&tcon->num_flushes)); + atomic_read_unchecked(&tcon->num_flushes)); seq_printf(m, "\nLocks: %d HardLinks: %d " "Symlinks: %d", - atomic_read(&tcon->num_locks), - atomic_read(&tcon->num_hardlinks), - atomic_read(&tcon->num_symlinks)); + atomic_read_unchecked(&tcon->num_locks), + atomic_read_unchecked(&tcon->num_hardlinks), + atomic_read_unchecked(&tcon->num_symlinks)); seq_printf(m, "\nOpens: %d Closes: %d " "Deletes: %d", - atomic_read(&tcon->num_opens), - atomic_read(&tcon->num_closes), - atomic_read(&tcon->num_deletes)); + atomic_read_unchecked(&tcon->num_opens), + atomic_read_unchecked(&tcon->num_closes), + atomic_read_unchecked(&tcon->num_deletes)); seq_printf(m, "\nPosix Opens: %d " "Posix Mkdirs: %d", - atomic_read(&tcon->num_posixopens), - atomic_read(&tcon->num_posixmkdirs)); + atomic_read_unchecked(&tcon->num_posixopens), + atomic_read_unchecked(&tcon->num_posixmkdirs)); seq_printf(m, "\nMkdirs: %d Rmdirs: %d", - atomic_read(&tcon->num_mkdirs), - atomic_read(&tcon->num_rmdirs)); + atomic_read_unchecked(&tcon->num_mkdirs), + atomic_read_unchecked(&tcon->num_rmdirs)); seq_printf(m, "\nRenames: %d T2 Renames %d", - atomic_read(&tcon->num_renames), - atomic_read(&tcon->num_t2renames)); + atomic_read_unchecked(&tcon->num_renames), + atomic_read_unchecked(&tcon->num_t2renames)); seq_printf(m, "\nFindFirst: %d FNext %d " "FClose %d", - atomic_read(&tcon->num_ffirst), - atomic_read(&tcon->num_fnext), - atomic_read(&tcon->num_fclose)); + atomic_read_unchecked(&tcon->num_ffirst), + atomic_read_unchecked(&tcon->num_fnext), + atomic_read_unchecked(&tcon->num_fclose)); } } } diff -purN a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c --- a/fs/cifs/cifsfs.c 2012-02-14 08:38:12.943358132 -0800 +++ b/fs/cifs/cifsfs.c 2012-02-14 10:50:16.031243357 -0800 @@ -991,8 +991,8 @@ init_cifs(void) atomic_set(&bufAllocCount, 0); atomic_set(&smBufAllocCount, 0); #ifdef CONFIG_CIFS_STATS2 - atomic_set(&totBufAllocCount, 0); - atomic_set(&totSmBufAllocCount, 0); + atomic_set_unchecked(&totBufAllocCount, 0); + atomic_set_unchecked(&totSmBufAllocCount, 0); #endif /* CONFIG_CIFS_STATS2 */ atomic_set(&midCount, 0); diff -purN a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h --- a/fs/cifs/cifsglob.h 2012-02-14 08:38:12.955358133 -0800 +++ b/fs/cifs/cifsglob.h 2012-02-14 10:50:16.035243357 -0800 @@ -252,28 +252,28 @@ struct cifsTconInfo { __u16 Flags; /* optional support bits */ enum statusEnum tidStatus; #ifdef CONFIG_CIFS_STATS - atomic_t num_smbs_sent; - atomic_t num_writes; - atomic_t num_reads; - atomic_t num_flushes; - atomic_t num_oplock_brks; - atomic_t num_opens; - atomic_t num_closes; - atomic_t num_deletes; - atomic_t num_mkdirs; - atomic_t num_posixopens; - atomic_t num_posixmkdirs; - atomic_t num_rmdirs; - atomic_t num_renames; - atomic_t num_t2renames; - atomic_t num_ffirst; - atomic_t num_fnext; - atomic_t num_fclose; - atomic_t num_hardlinks; - atomic_t num_symlinks; - atomic_t num_locks; - atomic_t num_acl_get; - atomic_t num_acl_set; + atomic_unchecked_t num_smbs_sent; + atomic_unchecked_t num_writes; + atomic_unchecked_t num_reads; + atomic_unchecked_t num_flushes; + atomic_unchecked_t num_oplock_brks; + atomic_unchecked_t num_opens; + atomic_unchecked_t num_closes; + atomic_unchecked_t num_deletes; + atomic_unchecked_t num_mkdirs; + atomic_unchecked_t num_posixopens; + atomic_unchecked_t num_posixmkdirs; + atomic_unchecked_t num_rmdirs; + atomic_unchecked_t num_renames; + atomic_unchecked_t num_t2renames; + atomic_unchecked_t num_ffirst; + atomic_unchecked_t num_fnext; + atomic_unchecked_t num_fclose; + atomic_unchecked_t num_hardlinks; + atomic_unchecked_t num_symlinks; + atomic_unchecked_t num_locks; + atomic_unchecked_t num_acl_get; + atomic_unchecked_t num_acl_set; #ifdef CONFIG_CIFS_STATS2 unsigned long long time_writes; unsigned long long time_reads; @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st } #ifdef CONFIG_CIFS_STATS -#define cifs_stats_inc atomic_inc +#define cifs_stats_inc atomic_inc_unchecked static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon, unsigned int bytes) @@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnect /* Various Debug counters */ GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */ #ifdef CONFIG_CIFS_STATS2 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */ -GLOBAL_EXTERN atomic_t totSmBufAllocCount; +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */ +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount; #endif GLOBAL_EXTERN atomic_t smBufAllocCount; GLOBAL_EXTERN atomic_t midCount; diff -purN a/fs/cifs/misc.c b/fs/cifs/misc.c --- a/fs/cifs/misc.c 2012-02-14 08:38:12.955358133 -0800 +++ b/fs/cifs/misc.c 2012-02-14 10:50:16.039243357 -0800 @@ -155,7 +155,7 @@ cifs_buf_get(void) memset(ret_buf, 0, sizeof(struct smb_hdr) + 3); atomic_inc(&bufAllocCount); #ifdef CONFIG_CIFS_STATS2 - atomic_inc(&totBufAllocCount); + atomic_inc_unchecked(&totBufAllocCount); #endif /* CONFIG_CIFS_STATS2 */ } @@ -190,7 +190,7 @@ cifs_small_buf_get(void) /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ atomic_inc(&smBufAllocCount); #ifdef CONFIG_CIFS_STATS2 - atomic_inc(&totSmBufAllocCount); + atomic_inc_unchecked(&totSmBufAllocCount); #endif /* CONFIG_CIFS_STATS2 */ } diff -purN a/fs/coda/cache.c b/fs/coda/cache.c --- a/fs/coda/cache.c 2012-02-14 08:38:13.387358113 -0800 +++ b/fs/coda/cache.c 2012-02-14 10:50:16.039243357 -0800 @@ -24,14 +24,14 @@ #include #include -static atomic_t permission_epoch = ATOMIC_INIT(0); +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0); /* replace or extend an acl cache hit */ void coda_cache_enter(struct inode *inode, int mask) { struct coda_inode_info *cii = ITOC(inode); - cii->c_cached_epoch = atomic_read(&permission_epoch); + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch); if (cii->c_uid != current_fsuid()) { cii->c_uid = current_fsuid(); cii->c_cached_perm = mask; @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod void coda_cache_clear_inode(struct inode *inode) { struct coda_inode_info *cii = ITOC(inode); - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1; + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1; } /* remove all acl caches */ void coda_cache_clear_all(struct super_block *sb) { - atomic_inc(&permission_epoch); + atomic_inc_unchecked(&permission_epoch); } @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode hit = (mask & cii->c_cached_perm) == mask && cii->c_uid == current_fsuid() && - cii->c_cached_epoch == atomic_read(&permission_epoch); + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch); return hit; } diff -purN a/fs/exec.c b/fs/exec.c --- a/fs/exec.c 2012-02-14 08:38:12.991358131 -0800 +++ b/fs/exec.c 2012-02-14 10:50:16.047243357 -0800 @@ -59,6 +59,11 @@ #include +#ifdef CONFIG_PAX_REFCOUNT +#include +#include +#endif + #include #include #include @@ -1156,7 +1161,7 @@ int check_unsafe_exec(struct linux_binpr } rcu_read_unlock(); - if (p->fs->users > n_fs) { + if (atomic_read(&p->fs->users) > n_fs) { bprm->unsafe |= LSM_UNSAFE_SHARE; } else { res = -EAGAIN; @@ -1595,6 +1600,21 @@ out: return ispipe; } +#ifdef CONFIG_PAX_REFCOUNT +void pax_report_refcount_overflow(struct pt_regs *regs) +{ + if (current->signal->curr_ip) + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", + ¤t->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid()); + else + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", + current->comm, task_pid_nr(current), current_uid(), current_euid()); + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs)); + show_regs(regs); + force_sig_specific(SIGKILL, current); +} +#endif + static int zap_process(struct task_struct *start) { struct task_struct *t; @@ -1797,17 +1817,17 @@ static void wait_for_dump_helpers(struct pipe = file->f_path.dentry->d_inode->i_pipe; pipe_lock(pipe); - pipe->readers++; - pipe->writers--; + atomic_inc(&pipe->readers); + atomic_dec(&pipe->writers); - while ((pipe->readers > 1) && (!signal_pending(current))) { + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) { wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); pipe_wait(pipe); } - pipe->readers--; - pipe->writers++; + atomic_dec(&pipe->readers); + atomic_inc(&pipe->writers); pipe_unlock(pipe); } @@ -1830,7 +1850,7 @@ void do_coredump(long signr, int exit_co char **helper_argv = NULL; int helper_argc = 0; int dump_count = 0; - static atomic_t core_dump_count = ATOMIC_INIT(0); + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0); audit_core_dumps(signr); @@ -1912,7 +1932,7 @@ void do_coredump(long signr, int exit_co goto fail_unlock; } - dump_count = atomic_inc_return(&core_dump_count); + dump_count = atomic_inc_return_unchecked(&core_dump_count); if (core_pipe_limit && (core_pipe_limit < dump_count)) { printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", task_tgid_vnr(current), current->comm); @@ -1976,7 +1996,7 @@ close_fail: filp_close(file, NULL); fail_dropcount: if (dump_count) - atomic_dec(&core_dump_count); + atomic_dec_unchecked(&core_dump_count); fail_unlock: if (helper_argv) argv_free(helper_argv); diff -purN a/fs/ext4/ext4.h b/fs/ext4/ext4.h --- a/fs/ext4/ext4.h 2012-02-14 08:38:13.003358129 -0800 +++ b/fs/ext4/ext4.h 2012-02-14 10:50:16.047243357 -0800 @@ -1077,19 +1077,19 @@ struct ext4_sb_info { /* stats for buddy allocator */ spinlock_t s_mb_pa_lock; - atomic_t s_bal_reqs; /* number of reqs with len > 1 */ - atomic_t s_bal_success; /* we found long enough chunks */ - atomic_t s_bal_allocated; /* in blocks */ - atomic_t s_bal_ex_scanned; /* total extents scanned */ - atomic_t s_bal_goals; /* goal hits */ - atomic_t s_bal_breaks; /* too long searches */ - atomic_t s_bal_2orders; /* 2^order hits */ + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */ + atomic_unchecked_t s_bal_success; /* we found long enough chunks */ + atomic_unchecked_t s_bal_allocated; /* in blocks */ + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */ + atomic_unchecked_t s_bal_goals; /* goal hits */ + atomic_unchecked_t s_bal_breaks; /* too long searches */ + atomic_unchecked_t s_bal_2orders; /* 2^order hits */ spinlock_t s_bal_lock; unsigned long s_mb_buddies_generated; unsigned long long s_mb_generation_time; - atomic_t s_mb_lost_chunks; - atomic_t s_mb_preallocated; - atomic_t s_mb_discarded; + atomic_unchecked_t s_mb_lost_chunks; + atomic_unchecked_t s_mb_preallocated; + atomic_unchecked_t s_mb_discarded; atomic_t s_lock_busy; /* locality groups */ diff -purN a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c --- a/fs/ext4/mballoc.c 2012-02-14 08:38:13.011358130 -0800 +++ b/fs/ext4/mballoc.c 2012-02-14 10:50:16.051243356 -0800 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len); if (EXT4_SB(sb)->s_mb_stats) - atomic_inc(&EXT4_SB(sb)->s_bal_2orders); + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders); break; } @@ -2131,7 +2131,7 @@ repeat: ac->ac_status = AC_STATUS_CONTINUE; ac->ac_flags |= EXT4_MB_HINT_FIRST; cr = 3; - atomic_inc(&sbi->s_mb_lost_chunks); + atomic_inc_unchecked(&sbi->s_mb_lost_chunks); goto repeat; } } @@ -2534,25 +2534,25 @@ int ext4_mb_release(struct super_block * if (sbi->s_mb_stats) { printk(KERN_INFO "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n", - atomic_read(&sbi->s_bal_allocated), - atomic_read(&sbi->s_bal_reqs), - atomic_read(&sbi->s_bal_success)); + atomic_read_unchecked(&sbi->s_bal_allocated), + atomic_read_unchecked(&sbi->s_bal_reqs), + atomic_read_unchecked(&sbi->s_bal_success)); printk(KERN_INFO "EXT4-fs: mballoc: %u extents scanned, %u goal hits, " "%u 2^N hits, %u breaks, %u lost\n", - atomic_read(&sbi->s_bal_ex_scanned), - atomic_read(&sbi->s_bal_goals), - atomic_read(&sbi->s_bal_2orders), - atomic_read(&sbi->s_bal_breaks), - atomic_read(&sbi->s_mb_lost_chunks)); + atomic_read_unchecked(&sbi->s_bal_ex_scanned), + atomic_read_unchecked(&sbi->s_bal_goals), + atomic_read_unchecked(&sbi->s_bal_2orders), + atomic_read_unchecked(&sbi->s_bal_breaks), + atomic_read_unchecked(&sbi->s_mb_lost_chunks)); printk(KERN_INFO "EXT4-fs: mballoc: %lu generated and it took %Lu\n", sbi->s_mb_buddies_generated++, sbi->s_mb_generation_time); printk(KERN_INFO "EXT4-fs: mballoc: %u preallocated, %u discarded\n", - atomic_read(&sbi->s_mb_preallocated), - atomic_read(&sbi->s_mb_discarded)); + atomic_read_unchecked(&sbi->s_mb_preallocated), + atomic_read_unchecked(&sbi->s_mb_discarded)); } free_percpu(sbi->s_locality_groups); @@ -3034,16 +3034,16 @@ static void ext4_mb_collect_stats(struct struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { - atomic_inc(&sbi->s_bal_reqs); - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); + atomic_inc_unchecked(&sbi->s_bal_reqs); + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len) - atomic_inc(&sbi->s_bal_success); - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); + atomic_inc_unchecked(&sbi->s_bal_success); + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned); if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) - atomic_inc(&sbi->s_bal_goals); + atomic_inc_unchecked(&sbi->s_bal_goals); if (ac->ac_found > sbi->s_mb_max_to_scan) - atomic_inc(&sbi->s_bal_breaks); + atomic_inc_unchecked(&sbi->s_bal_breaks); } if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) @@ -3443,7 +3443,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat trace_ext4_mb_new_inode_pa(ac, pa); ext4_mb_use_inode_pa(ac, pa); - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); ei = EXT4_I(ac->ac_inode); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); @@ -3503,7 +3503,7 @@ ext4_mb_new_group_pa(struct ext4_allocat trace_ext4_mb_new_group_pa(ac, pa); ext4_mb_use_group_pa(ac, pa); - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); lg = ac->ac_lg; @@ -3607,7 +3607,7 @@ ext4_mb_release_inode_pa(struct ext4_bud * from the bitmap and continue. */ } - atomic_add(free, &sbi->s_mb_discarded); + atomic_add_unchecked(free, &sbi->s_mb_discarded); return err; } @@ -3626,7 +3626,7 @@ ext4_mb_release_group_pa(struct ext4_bud ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); BUG_ON(group != e4b->bd_group && pa->pa_len != 0); mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); if (ac) { ac->ac_sb = sb; diff -purN a/fs/fscache/cookie.c b/fs/fscache/cookie.c --- a/fs/fscache/cookie.c 2012-02-14 08:38:13.031358129 -0800 +++ b/fs/fscache/cookie.c 2012-02-14 10:50:16.055243356 -0800 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire parent ? (char *) parent->def->name : "", def->name, netfs_data); - fscache_stat(&fscache_n_acquires); + fscache_stat_unchecked(&fscache_n_acquires); /* if there's no parent cookie, then we don't create one here either */ if (!parent) { - fscache_stat(&fscache_n_acquires_null); + fscache_stat_unchecked(&fscache_n_acquires_null); _leave(" [no parent]"); return NULL; } @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire /* allocate and initialise a cookie */ cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL); if (!cookie) { - fscache_stat(&fscache_n_acquires_oom); + fscache_stat_unchecked(&fscache_n_acquires_oom); _leave(" [ENOMEM]"); return NULL; } @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire switch (cookie->def->type) { case FSCACHE_COOKIE_TYPE_INDEX: - fscache_stat(&fscache_n_cookie_index); + fscache_stat_unchecked(&fscache_n_cookie_index); break; case FSCACHE_COOKIE_TYPE_DATAFILE: - fscache_stat(&fscache_n_cookie_data); + fscache_stat_unchecked(&fscache_n_cookie_data); break; default: - fscache_stat(&fscache_n_cookie_special); + fscache_stat_unchecked(&fscache_n_cookie_special); break; } @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire if (fscache_acquire_non_index_cookie(cookie) < 0) { atomic_dec(&parent->n_children); __fscache_cookie_put(cookie); - fscache_stat(&fscache_n_acquires_nobufs); + fscache_stat_unchecked(&fscache_n_acquires_nobufs); _leave(" = NULL"); return NULL; } } - fscache_stat(&fscache_n_acquires_ok); + fscache_stat_unchecked(&fscache_n_acquires_ok); _leave(" = %p", cookie); return cookie; } @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo cache = fscache_select_cache_for_object(cookie->parent); if (!cache) { up_read(&fscache_addremove_sem); - fscache_stat(&fscache_n_acquires_no_cache); + fscache_stat_unchecked(&fscache_n_acquires_no_cache); _leave(" = -ENOMEDIUM [no cache]"); return -ENOMEDIUM; } @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f object = cache->ops->alloc_object(cache, cookie); fscache_stat_d(&fscache_n_cop_alloc_object); if (IS_ERR(object)) { - fscache_stat(&fscache_n_object_no_alloc); + fscache_stat_unchecked(&fscache_n_object_no_alloc); ret = PTR_ERR(object); goto error; } - fscache_stat(&fscache_n_object_alloc); + fscache_stat_unchecked(&fscache_n_object_alloc); object->debug_id = atomic_inc_return(&fscache_object_debug_id); @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca struct fscache_object *object; struct hlist_node *_p; - fscache_stat(&fscache_n_updates); + fscache_stat_unchecked(&fscache_n_updates); if (!cookie) { - fscache_stat(&fscache_n_updates_null); + fscache_stat_unchecked(&fscache_n_updates_null); _leave(" [no cookie]"); return; } @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct struct fscache_object *object; unsigned long event; - fscache_stat(&fscache_n_relinquishes); + fscache_stat_unchecked(&fscache_n_relinquishes); if (retire) - fscache_stat(&fscache_n_relinquishes_retire); + fscache_stat_unchecked(&fscache_n_relinquishes_retire); if (!cookie) { - fscache_stat(&fscache_n_relinquishes_null); + fscache_stat_unchecked(&fscache_n_relinquishes_null); _leave(" [no cookie]"); return; } @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct /* wait for the cookie to finish being instantiated (or to fail) */ if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) { - fscache_stat(&fscache_n_relinquishes_waitcrt); + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt); wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING, fscache_wait_bit, TASK_UNINTERRUPTIBLE); } diff -purN a/fs/fscache/internal.h b/fs/fscache/internal.h --- a/fs/fscache/internal.h 2012-02-14 08:38:13.031358129 -0800 +++ b/fs/fscache/internal.h 2012-02-14 10:50:16.059243356 -0800 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void); extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS]; extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS]; -extern atomic_t fscache_n_op_pend; -extern atomic_t fscache_n_op_run; -extern atomic_t fscache_n_op_enqueue; -extern atomic_t fscache_n_op_deferred_release; -extern atomic_t fscache_n_op_release; -extern atomic_t fscache_n_op_gc; -extern atomic_t fscache_n_op_cancelled; -extern atomic_t fscache_n_op_rejected; - -extern atomic_t fscache_n_attr_changed; -extern atomic_t fscache_n_attr_changed_ok; -extern atomic_t fscache_n_attr_changed_nobufs; -extern atomic_t fscache_n_attr_changed_nomem; -extern atomic_t fscache_n_attr_changed_calls; - -extern atomic_t fscache_n_allocs; -extern atomic_t fscache_n_allocs_ok; -extern atomic_t fscache_n_allocs_wait; -extern atomic_t fscache_n_allocs_nobufs; -extern atomic_t fscache_n_allocs_intr; -extern atomic_t fscache_n_allocs_object_dead; -extern atomic_t fscache_n_alloc_ops; -extern atomic_t fscache_n_alloc_op_waits; - -extern atomic_t fscache_n_retrievals; -extern atomic_t fscache_n_retrievals_ok; -extern atomic_t fscache_n_retrievals_wait; -extern atomic_t fscache_n_retrievals_nodata; -extern atomic_t fscache_n_retrievals_nobufs; -extern atomic_t fscache_n_retrievals_intr; -extern atomic_t fscache_n_retrievals_nomem; -extern atomic_t fscache_n_retrievals_object_dead; -extern atomic_t fscache_n_retrieval_ops; -extern atomic_t fscache_n_retrieval_op_waits; - -extern atomic_t fscache_n_stores; -extern atomic_t fscache_n_stores_ok; -extern atomic_t fscache_n_stores_again; -extern atomic_t fscache_n_stores_nobufs; -extern atomic_t fscache_n_stores_oom; -extern atomic_t fscache_n_store_ops; -extern atomic_t fscache_n_store_calls; -extern atomic_t fscache_n_store_pages; -extern atomic_t fscache_n_store_radix_deletes; -extern atomic_t fscache_n_store_pages_over_limit; - -extern atomic_t fscache_n_store_vmscan_not_storing; -extern atomic_t fscache_n_store_vmscan_gone; -extern atomic_t fscache_n_store_vmscan_busy; -extern atomic_t fscache_n_store_vmscan_cancelled; - -extern atomic_t fscache_n_marks; -extern atomic_t fscache_n_uncaches; - -extern atomic_t fscache_n_acquires; -extern atomic_t fscache_n_acquires_null; -extern atomic_t fscache_n_acquires_no_cache; -extern atomic_t fscache_n_acquires_ok; -extern atomic_t fscache_n_acquires_nobufs; -extern atomic_t fscache_n_acquires_oom; - -extern atomic_t fscache_n_updates; -extern atomic_t fscache_n_updates_null; -extern atomic_t fscache_n_updates_run; - -extern atomic_t fscache_n_relinquishes; -extern atomic_t fscache_n_relinquishes_null; -extern atomic_t fscache_n_relinquishes_waitcrt; -extern atomic_t fscache_n_relinquishes_retire; - -extern atomic_t fscache_n_cookie_index; -extern atomic_t fscache_n_cookie_data; -extern atomic_t fscache_n_cookie_special; - -extern atomic_t fscache_n_object_alloc; -extern atomic_t fscache_n_object_no_alloc; -extern atomic_t fscache_n_object_lookups; -extern atomic_t fscache_n_object_lookups_negative; -extern atomic_t fscache_n_object_lookups_positive; -extern atomic_t fscache_n_object_lookups_timed_out; -extern atomic_t fscache_n_object_created; -extern atomic_t fscache_n_object_avail; -extern atomic_t fscache_n_object_dead; - -extern atomic_t fscache_n_checkaux_none; -extern atomic_t fscache_n_checkaux_okay; -extern atomic_t fscache_n_checkaux_update; -extern atomic_t fscache_n_checkaux_obsolete; +extern atomic_unchecked_t fscache_n_op_pend; +extern atomic_unchecked_t fscache_n_op_run; +extern atomic_unchecked_t fscache_n_op_enqueue; +extern atomic_unchecked_t fscache_n_op_deferred_release; +extern atomic_unchecked_t fscache_n_op_release; +extern atomic_unchecked_t fscache_n_op_gc; +extern atomic_unchecked_t fscache_n_op_cancelled; +extern atomic_unchecked_t fscache_n_op_rejected; + +extern atomic_unchecked_t fscache_n_attr_changed; +extern atomic_unchecked_t fscache_n_attr_changed_ok; +extern atomic_unchecked_t fscache_n_attr_changed_nobufs; +extern atomic_unchecked_t fscache_n_attr_changed_nomem; +extern atomic_unchecked_t fscache_n_attr_changed_calls; + +extern atomic_unchecked_t fscache_n_allocs; +extern atomic_unchecked_t fscache_n_allocs_ok; +extern atomic_unchecked_t fscache_n_allocs_wait; +extern atomic_unchecked_t fscache_n_allocs_nobufs; +extern atomic_unchecked_t fscache_n_allocs_intr; +extern atomic_unchecked_t fscache_n_allocs_object_dead; +extern atomic_unchecked_t fscache_n_alloc_ops; +extern atomic_unchecked_t fscache_n_alloc_op_waits; + +extern atomic_unchecked_t fscache_n_retrievals; +extern atomic_unchecked_t fscache_n_retrievals_ok; +extern atomic_unchecked_t fscache_n_retrievals_wait; +extern atomic_unchecked_t fscache_n_retrievals_nodata; +extern atomic_unchecked_t fscache_n_retrievals_nobufs; +extern atomic_unchecked_t fscache_n_retrievals_intr; +extern atomic_unchecked_t fscache_n_retrievals_nomem; +extern atomic_unchecked_t fscache_n_retrievals_object_dead; +extern atomic_unchecked_t fscache_n_retrieval_ops; +extern atomic_unchecked_t fscache_n_retrieval_op_waits; + +extern atomic_unchecked_t fscache_n_stores; +extern atomic_unchecked_t fscache_n_stores_ok; +extern atomic_unchecked_t fscache_n_stores_again; +extern atomic_unchecked_t fscache_n_stores_nobufs; +extern atomic_unchecked_t fscache_n_stores_oom; +extern atomic_unchecked_t fscache_n_store_ops; +extern atomic_unchecked_t fscache_n_store_calls; +extern atomic_unchecked_t fscache_n_store_pages; +extern atomic_unchecked_t fscache_n_store_radix_deletes; +extern atomic_unchecked_t fscache_n_store_pages_over_limit; + +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing; +extern atomic_unchecked_t fscache_n_store_vmscan_gone; +extern atomic_unchecked_t fscache_n_store_vmscan_busy; +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled; + +extern atomic_unchecked_t fscache_n_marks; +extern atomic_unchecked_t fscache_n_uncaches; + +extern atomic_unchecked_t fscache_n_acquires; +extern atomic_unchecked_t fscache_n_acquires_null; +extern atomic_unchecked_t fscache_n_acquires_no_cache; +extern atomic_unchecked_t fscache_n_acquires_ok; +extern atomic_unchecked_t fscache_n_acquires_nobufs; +extern atomic_unchecked_t fscache_n_acquires_oom; + +extern atomic_unchecked_t fscache_n_updates; +extern atomic_unchecked_t fscache_n_updates_null; +extern atomic_unchecked_t fscache_n_updates_run; + +extern atomic_unchecked_t fscache_n_relinquishes; +extern atomic_unchecked_t fscache_n_relinquishes_null; +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt; +extern atomic_unchecked_t fscache_n_relinquishes_retire; + +extern atomic_unchecked_t fscache_n_cookie_index; +extern atomic_unchecked_t fscache_n_cookie_data; +extern atomic_unchecked_t fscache_n_cookie_special; + +extern atomic_unchecked_t fscache_n_object_alloc; +extern atomic_unchecked_t fscache_n_object_no_alloc; +extern atomic_unchecked_t fscache_n_object_lookups; +extern atomic_unchecked_t fscache_n_object_lookups_negative; +extern atomic_unchecked_t fscache_n_object_lookups_positive; +extern atomic_unchecked_t fscache_n_object_lookups_timed_out; +extern atomic_unchecked_t fscache_n_object_created; +extern atomic_unchecked_t fscache_n_object_avail; +extern atomic_unchecked_t fscache_n_object_dead; + +extern atomic_unchecked_t fscache_n_checkaux_none; +extern atomic_unchecked_t fscache_n_checkaux_okay; +extern atomic_unchecked_t fscache_n_checkaux_update; +extern atomic_unchecked_t fscache_n_checkaux_obsolete; extern atomic_t fscache_n_cop_alloc_object; extern atomic_t fscache_n_cop_lookup_object; @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t atomic_inc(stat); } +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat) +{ + atomic_inc_unchecked(stat); +} + static inline void fscache_stat_d(atomic_t *stat) { atomic_dec(stat); @@ -259,6 +264,7 @@ extern const struct file_operations fsca #define __fscache_stat(stat) (NULL) #define fscache_stat(stat) do {} while (0) +#define fscache_stat_unchecked(stat) do {} while (0) #define fscache_stat_d(stat) do {} while (0) #endif diff -purN a/fs/fscache/object.c b/fs/fscache/object.c --- a/fs/fscache/object.c 2012-02-14 08:38:13.031358129 -0800 +++ b/fs/fscache/object.c 2012-02-14 10:50:16.059243356 -0800 @@ -144,7 +144,7 @@ static void fscache_object_state_machine /* update the object metadata on disk */ case FSCACHE_OBJECT_UPDATING: clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events); - fscache_stat(&fscache_n_updates_run); + fscache_stat_unchecked(&fscache_n_updates_run); fscache_stat(&fscache_n_cop_update_object); object->cache->ops->update_object(object); fscache_stat_d(&fscache_n_cop_update_object); @@ -233,7 +233,7 @@ static void fscache_object_state_machine spin_lock(&object->lock); object->state = FSCACHE_OBJECT_DEAD; spin_unlock(&object->lock); - fscache_stat(&fscache_n_object_dead); + fscache_stat_unchecked(&fscache_n_object_dead); goto terminal_transit; /* handle the parent cache of this object being withdrawn from @@ -248,7 +248,7 @@ static void fscache_object_state_machine spin_lock(&object->lock); object->state = FSCACHE_OBJECT_DEAD; spin_unlock(&object->lock); - fscache_stat(&fscache_n_object_dead); + fscache_stat_unchecked(&fscache_n_object_dead); goto terminal_transit; /* complain about the object being woken up once it is @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct parent->cookie->def->name, cookie->def->name, object->cache->tag->name); - fscache_stat(&fscache_n_object_lookups); + fscache_stat_unchecked(&fscache_n_object_lookups); fscache_stat(&fscache_n_cop_lookup_object); ret = object->cache->ops->lookup_object(object); fscache_stat_d(&fscache_n_cop_lookup_object); @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct if (ret == -ETIMEDOUT) { /* probably stuck behind another object, so move this one to * the back of the queue */ - fscache_stat(&fscache_n_object_lookups_timed_out); + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out); set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); } @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru spin_lock(&object->lock); if (object->state == FSCACHE_OBJECT_LOOKING_UP) { - fscache_stat(&fscache_n_object_lookups_negative); + fscache_stat_unchecked(&fscache_n_object_lookups_negative); /* transit here to allow write requests to begin stacking up * and read requests to begin returning ENODATA */ @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca * result, in which case there may be data available */ spin_lock(&object->lock); if (object->state == FSCACHE_OBJECT_LOOKING_UP) { - fscache_stat(&fscache_n_object_lookups_positive); + fscache_stat_unchecked(&fscache_n_object_lookups_positive); clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags); @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events); } else { ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING); - fscache_stat(&fscache_n_object_created); + fscache_stat_unchecked(&fscache_n_object_created); object->state = FSCACHE_OBJECT_AVAILABLE; spin_unlock(&object->lock); @@ -633,7 +633,7 @@ static void fscache_object_available(str fscache_enqueue_dependents(object); fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif); - fscache_stat(&fscache_n_object_avail); + fscache_stat_unchecked(&fscache_n_object_avail); _leave(""); } @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux( enum fscache_checkaux result; if (!object->cookie->def->check_aux) { - fscache_stat(&fscache_n_checkaux_none); + fscache_stat_unchecked(&fscache_n_checkaux_none); return FSCACHE_CHECKAUX_OKAY; } @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux( switch (result) { /* entry okay as is */ case FSCACHE_CHECKAUX_OKAY: - fscache_stat(&fscache_n_checkaux_okay); + fscache_stat_unchecked(&fscache_n_checkaux_okay); break; /* entry requires update */ case FSCACHE_CHECKAUX_NEEDS_UPDATE: - fscache_stat(&fscache_n_checkaux_update); + fscache_stat_unchecked(&fscache_n_checkaux_update); break; /* entry requires deletion */ case FSCACHE_CHECKAUX_OBSOLETE: - fscache_stat(&fscache_n_checkaux_obsolete); + fscache_stat_unchecked(&fscache_n_checkaux_obsolete); break; default: diff -purN a/fs/fscache/operation.c b/fs/fscache/operation.c --- a/fs/fscache/operation.c 2012-02-14 08:38:13.023358128 -0800 +++ b/fs/fscache/operation.c 2012-02-14 10:50:16.059243356 -0800 @@ -16,7 +16,7 @@ #include #include "internal.h" -atomic_t fscache_op_debug_id; +atomic_unchecked_t fscache_op_debug_id; EXPORT_SYMBOL(fscache_op_debug_id); /** @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); ASSERTCMP(atomic_read(&op->usage), >, 0); - fscache_stat(&fscache_n_op_enqueue); + fscache_stat_unchecked(&fscache_n_op_enqueue); switch (op->flags & FSCACHE_OP_TYPE) { case FSCACHE_OP_FAST: _debug("queue fast"); @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach wake_up_bit(&op->flags, FSCACHE_OP_WAITING); if (op->processor) fscache_enqueue_operation(op); - fscache_stat(&fscache_n_op_run); + fscache_stat_unchecked(&fscache_n_op_run); } /* @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f if (object->n_ops > 0) { atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); - fscache_stat(&fscache_n_op_pend); + fscache_stat_unchecked(&fscache_n_op_pend); } else if (!list_empty(&object->pending_ops)) { atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); - fscache_stat(&fscache_n_op_pend); + fscache_stat_unchecked(&fscache_n_op_pend); fscache_start_operations(object); } else { ASSERTCMP(object->n_in_progress, ==, 0); @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f object->n_exclusive++; /* reads and writes must wait */ atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); - fscache_stat(&fscache_n_op_pend); + fscache_stat_unchecked(&fscache_n_op_pend); ret = 0; } else { /* not allowed to submit ops in any other state */ @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj if (object->n_exclusive > 0) { atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); - fscache_stat(&fscache_n_op_pend); + fscache_stat_unchecked(&fscache_n_op_pend); } else if (!list_empty(&object->pending_ops)) { atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); - fscache_stat(&fscache_n_op_pend); + fscache_stat_unchecked(&fscache_n_op_pend); fscache_start_operations(object); } else { ASSERTCMP(object->n_exclusive, ==, 0); @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj object->n_ops++; atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); - fscache_stat(&fscache_n_op_pend); + fscache_stat_unchecked(&fscache_n_op_pend); ret = 0; } else if (object->state == FSCACHE_OBJECT_DYING || object->state == FSCACHE_OBJECT_LC_DYING || object->state == FSCACHE_OBJECT_WITHDRAWING) { - fscache_stat(&fscache_n_op_rejected); + fscache_stat_unchecked(&fscache_n_op_rejected); ret = -ENOBUFS; } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { fscache_report_unexpected_submission(object, op, ostate); @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope ret = -EBUSY; if (!list_empty(&op->pend_link)) { - fscache_stat(&fscache_n_op_cancelled); + fscache_stat_unchecked(&fscache_n_op_cancelled); list_del_init(&op->pend_link); object->n_ops--; if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) BUG(); - fscache_stat(&fscache_n_op_release); + fscache_stat_unchecked(&fscache_n_op_release); if (op->release) { op->release(op); @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach * lock, and defer it otherwise */ if (!spin_trylock(&object->lock)) { _debug("defer put"); - fscache_stat(&fscache_n_op_deferred_release); + fscache_stat_unchecked(&fscache_n_op_deferred_release); cache = object->cache; spin_lock(&cache->op_gc_list_lock); @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st _debug("GC DEFERRED REL OBJ%x OP%x", object->debug_id, op->debug_id); - fscache_stat(&fscache_n_op_gc); + fscache_stat_unchecked(&fscache_n_op_gc); ASSERTCMP(atomic_read(&op->usage), ==, 0); diff -purN a/fs/fscache/page.c b/fs/fscache/page.c --- a/fs/fscache/page.c 2012-02-14 08:38:13.023358128 -0800 +++ b/fs/fscache/page.c 2012-02-14 10:50:16.063243356 -0800 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct val = radix_tree_lookup(&cookie->stores, page->index); if (!val) { rcu_read_unlock(); - fscache_stat(&fscache_n_store_vmscan_not_storing); + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing); __fscache_uncache_page(cookie, page); return true; } @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct spin_unlock(&cookie->stores_lock); if (xpage) { - fscache_stat(&fscache_n_store_vmscan_cancelled); - fscache_stat(&fscache_n_store_radix_deletes); + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled); + fscache_stat_unchecked(&fscache_n_store_radix_deletes); ASSERTCMP(xpage, ==, page); } else { - fscache_stat(&fscache_n_store_vmscan_gone); + fscache_stat_unchecked(&fscache_n_store_vmscan_gone); } wake_up_bit(&cookie->flags, 0); @@ -106,7 +106,7 @@ page_busy: /* we might want to wait here, but that could deadlock the allocator as * the slow-work threads writing to the cache may all end up sleeping * on memory allocation */ - fscache_stat(&fscache_n_store_vmscan_busy); + fscache_stat_unchecked(&fscache_n_store_vmscan_busy); return false; } EXPORT_SYMBOL(__fscache_maybe_release_page); @@ -130,7 +130,7 @@ static void fscache_end_page_write(struc FSCACHE_COOKIE_STORING_TAG); if (!radix_tree_tag_get(&cookie->stores, page->index, FSCACHE_COOKIE_PENDING_TAG)) { - fscache_stat(&fscache_n_store_radix_deletes); + fscache_stat_unchecked(&fscache_n_store_radix_deletes); xpage = radix_tree_delete(&cookie->stores, page->index); } spin_unlock(&cookie->stores_lock); @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); - fscache_stat(&fscache_n_attr_changed_calls); + fscache_stat_unchecked(&fscache_n_attr_changed_calls); if (fscache_object_is_active(object)) { fscache_set_op_state(op, "CallFS"); @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); - fscache_stat(&fscache_n_attr_changed); + fscache_stat_unchecked(&fscache_n_attr_changed); op = kzalloc(sizeof(*op), GFP_KERNEL); if (!op) { - fscache_stat(&fscache_n_attr_changed_nomem); + fscache_stat_unchecked(&fscache_n_attr_changed_nomem); _leave(" = -ENOMEM"); return -ENOMEM; } @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach if (fscache_submit_exclusive_op(object, op) < 0) goto nobufs; spin_unlock(&cookie->lock); - fscache_stat(&fscache_n_attr_changed_ok); + fscache_stat_unchecked(&fscache_n_attr_changed_ok); fscache_put_operation(op); _leave(" = 0"); return 0; @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach nobufs: spin_unlock(&cookie->lock); kfree(op); - fscache_stat(&fscache_n_attr_changed_nobufs); + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs); _leave(" = %d", -ENOBUFS); return -ENOBUFS; } @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache /* allocate a retrieval operation and attempt to submit it */ op = kzalloc(sizeof(*op), GFP_NOIO); if (!op) { - fscache_stat(&fscache_n_retrievals_nomem); + fscache_stat_unchecked(&fscache_n_retrievals_nomem); return NULL; } @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo return 0; } - fscache_stat(&fscache_n_retrievals_wait); + fscache_stat_unchecked(&fscache_n_retrievals_wait); jif = jiffies; if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP, fscache_wait_bit_interruptible, TASK_INTERRUPTIBLE) != 0) { - fscache_stat(&fscache_n_retrievals_intr); + fscache_stat_unchecked(&fscache_n_retrievals_intr); _leave(" = -ERESTARTSYS"); return -ERESTARTSYS; } @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo */ static int fscache_wait_for_retrieval_activation(struct fscache_object *object, struct fscache_retrieval *op, - atomic_t *stat_op_waits, - atomic_t *stat_object_dead) + atomic_unchecked_t *stat_op_waits, + atomic_unchecked_t *stat_object_dead) { int ret; @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac goto check_if_dead; _debug(">>> WT"); - fscache_stat(stat_op_waits); + fscache_stat_unchecked(stat_op_waits); if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, fscache_wait_bit_interruptible, TASK_INTERRUPTIBLE) < 0) { @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac check_if_dead: if (unlikely(fscache_object_is_dead(object))) { - fscache_stat(stat_object_dead); + fscache_stat_unchecked(stat_object_dead); return -ENOBUFS; } return 0; @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct _enter("%p,%p,,,", cookie, page); - fscache_stat(&fscache_n_retrievals); + fscache_stat_unchecked(&fscache_n_retrievals); if (hlist_empty(&cookie->backing_objects)) goto nobufs; @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct goto nobufs_unlock; spin_unlock(&cookie->lock); - fscache_stat(&fscache_n_retrieval_ops); + fscache_stat_unchecked(&fscache_n_retrieval_ops); /* pin the netfs read context in case we need to do the actual netfs * read because we've encountered a cache read failure */ @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct error: if (ret == -ENOMEM) - fscache_stat(&fscache_n_retrievals_nomem); + fscache_stat_unchecked(&fscache_n_retrievals_nomem); else if (ret == -ERESTARTSYS) - fscache_stat(&fscache_n_retrievals_intr); + fscache_stat_unchecked(&fscache_n_retrievals_intr); else if (ret == -ENODATA) - fscache_stat(&fscache_n_retrievals_nodata); + fscache_stat_unchecked(&fscache_n_retrievals_nodata); else if (ret < 0) - fscache_stat(&fscache_n_retrievals_nobufs); + fscache_stat_unchecked(&fscache_n_retrievals_nobufs); else - fscache_stat(&fscache_n_retrievals_ok); + fscache_stat_unchecked(&fscache_n_retrievals_ok); fscache_put_retrieval(op); _leave(" = %d", ret); @@ -453,7 +453,7 @@ nobufs_unlock: spin_unlock(&cookie->lock); kfree(op); nobufs: - fscache_stat(&fscache_n_retrievals_nobufs); + fscache_stat_unchecked(&fscache_n_retrievals_nobufs); _leave(" = -ENOBUFS"); return -ENOBUFS; } @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct _enter("%p,,%d,,,", cookie, *nr_pages); - fscache_stat(&fscache_n_retrievals); + fscache_stat_unchecked(&fscache_n_retrievals); if (hlist_empty(&cookie->backing_objects)) goto nobufs; @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct goto nobufs_unlock; spin_unlock(&cookie->lock); - fscache_stat(&fscache_n_retrieval_ops); + fscache_stat_unchecked(&fscache_n_retrieval_ops); /* pin the netfs read context in case we need to do the actual netfs * read because we've encountered a cache read failure */ @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct error: if (ret == -ENOMEM) - fscache_stat(&fscache_n_retrievals_nomem); + fscache_stat_unchecked(&fscache_n_retrievals_nomem); else if (ret == -ERESTARTSYS) - fscache_stat(&fscache_n_retrievals_intr); + fscache_stat_unchecked(&fscache_n_retrievals_intr); else if (ret == -ENODATA) - fscache_stat(&fscache_n_retrievals_nodata); + fscache_stat_unchecked(&fscache_n_retrievals_nodata); else if (ret < 0) - fscache_stat(&fscache_n_retrievals_nobufs); + fscache_stat_unchecked(&fscache_n_retrievals_nobufs); else - fscache_stat(&fscache_n_retrievals_ok); + fscache_stat_unchecked(&fscache_n_retrievals_ok); fscache_put_retrieval(op); _leave(" = %d", ret); @@ -570,7 +570,7 @@ nobufs_unlock: spin_unlock(&cookie->lock); kfree(op); nobufs: - fscache_stat(&fscache_n_retrievals_nobufs); + fscache_stat_unchecked(&fscache_n_retrievals_nobufs); _leave(" = -ENOBUFS"); return -ENOBUFS; } @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_ _enter("%p,%p,,,", cookie, page); - fscache_stat(&fscache_n_allocs); + fscache_stat_unchecked(&fscache_n_allocs); if (hlist_empty(&cookie->backing_objects)) goto nobufs; @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_ goto nobufs_unlock; spin_unlock(&cookie->lock); - fscache_stat(&fscache_n_alloc_ops); + fscache_stat_unchecked(&fscache_n_alloc_ops); ret = fscache_wait_for_retrieval_activation( object, op, @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_ error: if (ret == -ERESTARTSYS) - fscache_stat(&fscache_n_allocs_intr); + fscache_stat_unchecked(&fscache_n_allocs_intr); else if (ret < 0) - fscache_stat(&fscache_n_allocs_nobufs); + fscache_stat_unchecked(&fscache_n_allocs_nobufs); else - fscache_stat(&fscache_n_allocs_ok); + fscache_stat_unchecked(&fscache_n_allocs_ok); fscache_put_retrieval(op); _leave(" = %d", ret); @@ -651,7 +651,7 @@ nobufs_unlock: spin_unlock(&cookie->lock); kfree(op); nobufs: - fscache_stat(&fscache_n_allocs_nobufs); + fscache_stat_unchecked(&fscache_n_allocs_nobufs); _leave(" = -ENOBUFS"); return -ENOBUFS; } @@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca spin_lock(&cookie->stores_lock); - fscache_stat(&fscache_n_store_calls); + fscache_stat_unchecked(&fscache_n_store_calls); /* find a page to store */ page = NULL; @@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca page = results[0]; _debug("gang %d [%lx]", n, page->index); if (page->index > op->store_limit) { - fscache_stat(&fscache_n_store_pages_over_limit); + fscache_stat_unchecked(&fscache_n_store_pages_over_limit); goto superseded; } @@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca if (page) { fscache_set_op_state(&op->op, "Store"); - fscache_stat(&fscache_n_store_pages); + fscache_stat_unchecked(&fscache_n_store_pages); fscache_stat(&fscache_n_cop_write_page); ret = object->cache->ops->write_page(op, page); fscache_stat_d(&fscache_n_cop_write_page); @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); ASSERT(PageFsCache(page)); - fscache_stat(&fscache_n_stores); + fscache_stat_unchecked(&fscache_n_stores); op = kzalloc(sizeof(*op), GFP_NOIO); if (!op) @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_ spin_unlock(&cookie->stores_lock); spin_unlock(&object->lock); - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id); op->store_limit = object->store_limit; if (fscache_submit_op(object, &op->op) < 0) @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_ spin_unlock(&cookie->lock); radix_tree_preload_end(); - fscache_stat(&fscache_n_store_ops); - fscache_stat(&fscache_n_stores_ok); + fscache_stat_unchecked(&fscache_n_store_ops); + fscache_stat_unchecked(&fscache_n_stores_ok); /* the slow work queue now carries its own ref on the object */ fscache_put_operation(&op->op); @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_ return 0; already_queued: - fscache_stat(&fscache_n_stores_again); + fscache_stat_unchecked(&fscache_n_stores_again); already_pending: spin_unlock(&cookie->stores_lock); spin_unlock(&object->lock); spin_unlock(&cookie->lock); radix_tree_preload_end(); kfree(op); - fscache_stat(&fscache_n_stores_ok); + fscache_stat_unchecked(&fscache_n_stores_ok); _leave(" = 0"); return 0; @@ -886,14 +886,14 @@ nobufs: spin_unlock(&cookie->lock); radix_tree_preload_end(); kfree(op); - fscache_stat(&fscache_n_stores_nobufs); + fscache_stat_unchecked(&fscache_n_stores_nobufs); _leave(" = -ENOBUFS"); return -ENOBUFS; nomem_free: kfree(op); nomem: - fscache_stat(&fscache_n_stores_oom); + fscache_stat_unchecked(&fscache_n_stores_oom); _leave(" = -ENOMEM"); return -ENOMEM; } @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); ASSERTCMP(page, !=, NULL); - fscache_stat(&fscache_n_uncaches); + fscache_stat_unchecked(&fscache_n_uncaches); /* cache withdrawal may beat us to it */ if (!PageFsCache(page)) @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs unsigned long loop; #ifdef CONFIG_FSCACHE_STATS - atomic_add(pagevec->nr, &fscache_n_marks); + atomic_add_unchecked(pagevec->nr, &fscache_n_marks); #endif for (loop = 0; loop < pagevec->nr; loop++) { diff -purN a/fs/fscache/stats.c b/fs/fscache/stats.c --- a/fs/fscache/stats.c 2012-02-14 08:38:13.031358129 -0800 +++ b/fs/fscache/stats.c 2012-02-14 10:50:16.067243356 -0800 @@ -18,95 +18,95 @@ /* * operation counters */ -atomic_t fscache_n_op_pend; -atomic_t fscache_n_op_run; -atomic_t fscache_n_op_enqueue; -atomic_t fscache_n_op_requeue; -atomic_t fscache_n_op_deferred_release; -atomic_t fscache_n_op_release; -atomic_t fscache_n_op_gc; -atomic_t fscache_n_op_cancelled; -atomic_t fscache_n_op_rejected; - -atomic_t fscache_n_attr_changed; -atomic_t fscache_n_attr_changed_ok; -atomic_t fscache_n_attr_changed_nobufs; -atomic_t fscache_n_attr_changed_nomem; -atomic_t fscache_n_attr_changed_calls; - -atomic_t fscache_n_allocs; -atomic_t fscache_n_allocs_ok; -atomic_t fscache_n_allocs_wait; -atomic_t fscache_n_allocs_nobufs; -atomic_t fscache_n_allocs_intr; -atomic_t fscache_n_allocs_object_dead; -atomic_t fscache_n_alloc_ops; -atomic_t fscache_n_alloc_op_waits; - -atomic_t fscache_n_retrievals; -atomic_t fscache_n_retrievals_ok; -atomic_t fscache_n_retrievals_wait; -atomic_t fscache_n_retrievals_nodata; -atomic_t fscache_n_retrievals_nobufs; -atomic_t fscache_n_retrievals_intr; -atomic_t fscache_n_retrievals_nomem; -atomic_t fscache_n_retrievals_object_dead; -atomic_t fscache_n_retrieval_ops; -atomic_t fscache_n_retrieval_op_waits; - -atomic_t fscache_n_stores; -atomic_t fscache_n_stores_ok; -atomic_t fscache_n_stores_again; -atomic_t fscache_n_stores_nobufs; -atomic_t fscache_n_stores_oom; -atomic_t fscache_n_store_ops; -atomic_t fscache_n_store_calls; -atomic_t fscache_n_store_pages; -atomic_t fscache_n_store_radix_deletes; -atomic_t fscache_n_store_pages_over_limit; - -atomic_t fscache_n_store_vmscan_not_storing; -atomic_t fscache_n_store_vmscan_gone; -atomic_t fscache_n_store_vmscan_busy; -atomic_t fscache_n_store_vmscan_cancelled; - -atomic_t fscache_n_marks; -atomic_t fscache_n_uncaches; - -atomic_t fscache_n_acquires; -atomic_t fscache_n_acquires_null; -atomic_t fscache_n_acquires_no_cache; -atomic_t fscache_n_acquires_ok; -atomic_t fscache_n_acquires_nobufs; -atomic_t fscache_n_acquires_oom; - -atomic_t fscache_n_updates; -atomic_t fscache_n_updates_null; -atomic_t fscache_n_updates_run; - -atomic_t fscache_n_relinquishes; -atomic_t fscache_n_relinquishes_null; -atomic_t fscache_n_relinquishes_waitcrt; -atomic_t fscache_n_relinquishes_retire; - -atomic_t fscache_n_cookie_index; -atomic_t fscache_n_cookie_data; -atomic_t fscache_n_cookie_special; - -atomic_t fscache_n_object_alloc; -atomic_t fscache_n_object_no_alloc; -atomic_t fscache_n_object_lookups; -atomic_t fscache_n_object_lookups_negative; -atomic_t fscache_n_object_lookups_positive; -atomic_t fscache_n_object_lookups_timed_out; -atomic_t fscache_n_object_created; -atomic_t fscache_n_object_avail; -atomic_t fscache_n_object_dead; - -atomic_t fscache_n_checkaux_none; -atomic_t fscache_n_checkaux_okay; -atomic_t fscache_n_checkaux_update; -atomic_t fscache_n_checkaux_obsolete; +atomic_unchecked_t fscache_n_op_pend; +atomic_unchecked_t fscache_n_op_run; +atomic_unchecked_t fscache_n_op_enqueue; +atomic_unchecked_t fscache_n_op_requeue; +atomic_unchecked_t fscache_n_op_deferred_release; +atomic_unchecked_t fscache_n_op_release; +atomic_unchecked_t fscache_n_op_gc; +atomic_unchecked_t fscache_n_op_cancelled; +atomic_unchecked_t fscache_n_op_rejected; + +atomic_unchecked_t fscache_n_attr_changed; +atomic_unchecked_t fscache_n_attr_changed_ok; +atomic_unchecked_t fscache_n_attr_changed_nobufs; +atomic_unchecked_t fscache_n_attr_changed_nomem; +atomic_unchecked_t fscache_n_attr_changed_calls; + +atomic_unchecked_t fscache_n_allocs; +atomic_unchecked_t fscache_n_allocs_ok; +atomic_unchecked_t fscache_n_allocs_wait; +atomic_unchecked_t fscache_n_allocs_nobufs; +atomic_unchecked_t fscache_n_allocs_intr; +atomic_unchecked_t fscache_n_allocs_object_dead; +atomic_unchecked_t fscache_n_alloc_ops; +atomic_unchecked_t fscache_n_alloc_op_waits; + +atomic_unchecked_t fscache_n_retrievals; +atomic_unchecked_t fscache_n_retrievals_ok; +atomic_unchecked_t fscache_n_retrievals_wait; +atomic_unchecked_t fscache_n_retrievals_nodata; +atomic_unchecked_t fscache_n_retrievals_nobufs; +atomic_unchecked_t fscache_n_retrievals_intr; +atomic_unchecked_t fscache_n_retrievals_nomem; +atomic_unchecked_t fscache_n_retrievals_object_dead; +atomic_unchecked_t fscache_n_retrieval_ops; +atomic_unchecked_t fscache_n_retrieval_op_waits; + +atomic_unchecked_t fscache_n_stores; +atomic_unchecked_t fscache_n_stores_ok; +atomic_unchecked_t fscache_n_stores_again; +atomic_unchecked_t fscache_n_stores_nobufs; +atomic_unchecked_t fscache_n_stores_oom; +atomic_unchecked_t fscache_n_store_ops; +atomic_unchecked_t fscache_n_store_calls; +atomic_unchecked_t fscache_n_store_pages; +atomic_unchecked_t fscache_n_store_radix_deletes; +atomic_unchecked_t fscache_n_store_pages_over_limit; + +atomic_unchecked_t fscache_n_store_vmscan_not_storing; +atomic_unchecked_t fscache_n_store_vmscan_gone; +atomic_unchecked_t fscache_n_store_vmscan_busy; +atomic_unchecked_t fscache_n_store_vmscan_cancelled; + +atomic_unchecked_t fscache_n_marks; +atomic_unchecked_t fscache_n_uncaches; + +atomic_unchecked_t fscache_n_acquires; +atomic_unchecked_t fscache_n_acquires_null; +atomic_unchecked_t fscache_n_acquires_no_cache; +atomic_unchecked_t fscache_n_acquires_ok; +atomic_unchecked_t fscache_n_acquires_nobufs; +atomic_unchecked_t fscache_n_acquires_oom; + +atomic_unchecked_t fscache_n_updates; +atomic_unchecked_t fscache_n_updates_null; +atomic_unchecked_t fscache_n_updates_run; + +atomic_unchecked_t fscache_n_relinquishes; +atomic_unchecked_t fscache_n_relinquishes_null; +atomic_unchecked_t fscache_n_relinquishes_waitcrt; +atomic_unchecked_t fscache_n_relinquishes_retire; + +atomic_unchecked_t fscache_n_cookie_index; +atomic_unchecked_t fscache_n_cookie_data; +atomic_unchecked_t fscache_n_cookie_special; + +atomic_unchecked_t fscache_n_object_alloc; +atomic_unchecked_t fscache_n_object_no_alloc; +atomic_unchecked_t fscache_n_object_lookups; +atomic_unchecked_t fscache_n_object_lookups_negative; +atomic_unchecked_t fscache_n_object_lookups_positive; +atomic_unchecked_t fscache_n_object_lookups_timed_out; +atomic_unchecked_t fscache_n_object_created; +atomic_unchecked_t fscache_n_object_avail; +atomic_unchecked_t fscache_n_object_dead; + +atomic_unchecked_t fscache_n_checkaux_none; +atomic_unchecked_t fscache_n_checkaux_okay; +atomic_unchecked_t fscache_n_checkaux_update; +atomic_unchecked_t fscache_n_checkaux_obsolete; atomic_t fscache_n_cop_alloc_object; atomic_t fscache_n_cop_lookup_object; @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq seq_puts(m, "FS-Cache statistics\n"); seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n", - atomic_read(&fscache_n_cookie_index), - atomic_read(&fscache_n_cookie_data), - atomic_read(&fscache_n_cookie_special)); + atomic_read_unchecked(&fscache_n_cookie_index), + atomic_read_unchecked(&fscache_n_cookie_data), + atomic_read_unchecked(&fscache_n_cookie_special)); seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n", - atomic_read(&fscache_n_object_alloc), - atomic_read(&fscache_n_object_no_alloc), - atomic_read(&fscache_n_object_avail), - atomic_read(&fscache_n_object_dead)); + atomic_read_unchecked(&fscache_n_object_alloc), + atomic_read_unchecked(&fscache_n_object_no_alloc), + atomic_read_unchecked(&fscache_n_object_avail), + atomic_read_unchecked(&fscache_n_object_dead)); seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n", - atomic_read(&fscache_n_checkaux_none), - atomic_read(&fscache_n_checkaux_okay), - atomic_read(&fscache_n_checkaux_update), - atomic_read(&fscache_n_checkaux_obsolete)); + atomic_read_unchecked(&fscache_n_checkaux_none), + atomic_read_unchecked(&fscache_n_checkaux_okay), + atomic_read_unchecked(&fscache_n_checkaux_update), + atomic_read_unchecked(&fscache_n_checkaux_obsolete)); seq_printf(m, "Pages : mrk=%u unc=%u\n", - atomic_read(&fscache_n_marks), - atomic_read(&fscache_n_uncaches)); + atomic_read_unchecked(&fscache_n_marks), + atomic_read_unchecked(&fscache_n_uncaches)); seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u" " oom=%u\n", - atomic_read(&fscache_n_acquires), - atomic_read(&fscache_n_acquires_null), - atomic_read(&fscache_n_acquires_no_cache), - atomic_read(&fscache_n_acquires_ok), - atomic_read(&fscache_n_acquires_nobufs), - atomic_read(&fscache_n_acquires_oom)); + atomic_read_unchecked(&fscache_n_acquires), + atomic_read_unchecked(&fscache_n_acquires_null), + atomic_read_unchecked(&fscache_n_acquires_no_cache), + atomic_read_unchecked(&fscache_n_acquires_ok), + atomic_read_unchecked(&fscache_n_acquires_nobufs), + atomic_read_unchecked(&fscache_n_acquires_oom)); seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n", - atomic_read(&fscache_n_object_lookups), - atomic_read(&fscache_n_object_lookups_negative), - atomic_read(&fscache_n_object_lookups_positive), - atomic_read(&fscache_n_object_lookups_timed_out), - atomic_read(&fscache_n_object_created)); + atomic_read_unchecked(&fscache_n_object_lookups), + atomic_read_unchecked(&fscache_n_object_lookups_negative), + atomic_read_unchecked(&fscache_n_object_lookups_positive), + atomic_read_unchecked(&fscache_n_object_lookups_timed_out), + atomic_read_unchecked(&fscache_n_object_created)); seq_printf(m, "Updates: n=%u nul=%u run=%u\n", - atomic_read(&fscache_n_updates), - atomic_read(&fscache_n_updates_null), - atomic_read(&fscache_n_updates_run)); + atomic_read_unchecked(&fscache_n_updates), + atomic_read_unchecked(&fscache_n_updates_null), + atomic_read_unchecked(&fscache_n_updates_run)); seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n", - atomic_read(&fscache_n_relinquishes), - atomic_read(&fscache_n_relinquishes_null), - atomic_read(&fscache_n_relinquishes_waitcrt), - atomic_read(&fscache_n_relinquishes_retire)); + atomic_read_unchecked(&fscache_n_relinquishes), + atomic_read_unchecked(&fscache_n_relinquishes_null), + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt), + atomic_read_unchecked(&fscache_n_relinquishes_retire)); seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n", - atomic_read(&fscache_n_attr_changed), - atomic_read(&fscache_n_attr_changed_ok), - atomic_read(&fscache_n_attr_changed_nobufs), - atomic_read(&fscache_n_attr_changed_nomem), - atomic_read(&fscache_n_attr_changed_calls)); + atomic_read_unchecked(&fscache_n_attr_changed), + atomic_read_unchecked(&fscache_n_attr_changed_ok), + atomic_read_unchecked(&fscache_n_attr_changed_nobufs), + atomic_read_unchecked(&fscache_n_attr_changed_nomem), + atomic_read_unchecked(&fscache_n_attr_changed_calls)); seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n", - atomic_read(&fscache_n_allocs), - atomic_read(&fscache_n_allocs_ok), - atomic_read(&fscache_n_allocs_wait), - atomic_read(&fscache_n_allocs_nobufs), - atomic_read(&fscache_n_allocs_intr)); + atomic_read_unchecked(&fscache_n_allocs), + atomic_read_unchecked(&fscache_n_allocs_ok), + atomic_read_unchecked(&fscache_n_allocs_wait), + atomic_read_unchecked(&fscache_n_allocs_nobufs), + atomic_read_unchecked(&fscache_n_allocs_intr)); seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n", - atomic_read(&fscache_n_alloc_ops), - atomic_read(&fscache_n_alloc_op_waits), - atomic_read(&fscache_n_allocs_object_dead)); + atomic_read_unchecked(&fscache_n_alloc_ops), + atomic_read_unchecked(&fscache_n_alloc_op_waits), + atomic_read_unchecked(&fscache_n_allocs_object_dead)); seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u" " int=%u oom=%u\n", - atomic_read(&fscache_n_retrievals), - atomic_read(&fscache_n_retrievals_ok), - atomic_read(&fscache_n_retrievals_wait), - atomic_read(&fscache_n_retrievals_nodata), - atomic_read(&fscache_n_retrievals_nobufs), - atomic_read(&fscache_n_retrievals_intr), - atomic_read(&fscache_n_retrievals_nomem)); + atomic_read_unchecked(&fscache_n_retrievals), + atomic_read_unchecked(&fscache_n_retrievals_ok), + atomic_read_unchecked(&fscache_n_retrievals_wait), + atomic_read_unchecked(&fscache_n_retrievals_nodata), + atomic_read_unchecked(&fscache_n_retrievals_nobufs), + atomic_read_unchecked(&fscache_n_retrievals_intr), + atomic_read_unchecked(&fscache_n_retrievals_nomem)); seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n", - atomic_read(&fscache_n_retrieval_ops), - atomic_read(&fscache_n_retrieval_op_waits), - atomic_read(&fscache_n_retrievals_object_dead)); + atomic_read_unchecked(&fscache_n_retrieval_ops), + atomic_read_unchecked(&fscache_n_retrieval_op_waits), + atomic_read_unchecked(&fscache_n_retrievals_object_dead)); seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n", - atomic_read(&fscache_n_stores), - atomic_read(&fscache_n_stores_ok), - atomic_read(&fscache_n_stores_again), - atomic_read(&fscache_n_stores_nobufs), - atomic_read(&fscache_n_stores_oom)); + atomic_read_unchecked(&fscache_n_stores), + atomic_read_unchecked(&fscache_n_stores_ok), + atomic_read_unchecked(&fscache_n_stores_again), + atomic_read_unchecked(&fscache_n_stores_nobufs), + atomic_read_unchecked(&fscache_n_stores_oom)); seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n", - atomic_read(&fscache_n_store_ops), - atomic_read(&fscache_n_store_calls), - atomic_read(&fscache_n_store_pages), - atomic_read(&fscache_n_store_radix_deletes), - atomic_read(&fscache_n_store_pages_over_limit)); + atomic_read_unchecked(&fscache_n_store_ops), + atomic_read_unchecked(&fscache_n_store_calls), + atomic_read_unchecked(&fscache_n_store_pages), + atomic_read_unchecked(&fscache_n_store_radix_deletes), + atomic_read_unchecked(&fscache_n_store_pages_over_limit)); seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n", - atomic_read(&fscache_n_store_vmscan_not_storing), - atomic_read(&fscache_n_store_vmscan_gone), - atomic_read(&fscache_n_store_vmscan_busy), - atomic_read(&fscache_n_store_vmscan_cancelled)); + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing), + atomic_read_unchecked(&fscache_n_store_vmscan_gone), + atomic_read_unchecked(&fscache_n_store_vmscan_busy), + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled)); seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n", - atomic_read(&fscache_n_op_pend), - atomic_read(&fscache_n_op_run), - atomic_read(&fscache_n_op_enqueue), - atomic_read(&fscache_n_op_cancelled), - atomic_read(&fscache_n_op_rejected)); + atomic_read_unchecked(&fscache_n_op_pend), + atomic_read_unchecked(&fscache_n_op_run), + atomic_read_unchecked(&fscache_n_op_enqueue), + atomic_read_unchecked(&fscache_n_op_cancelled), + atomic_read_unchecked(&fscache_n_op_rejected)); seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n", - atomic_read(&fscache_n_op_deferred_release), - atomic_read(&fscache_n_op_release), - atomic_read(&fscache_n_op_gc)); + atomic_read_unchecked(&fscache_n_op_deferred_release), + atomic_read_unchecked(&fscache_n_op_release), + atomic_read_unchecked(&fscache_n_op_gc)); seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n", atomic_read(&fscache_n_cop_alloc_object), diff -purN a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c --- a/fs/lockd/clntproc.c 2012-02-14 08:38:12.935358133 -0800 +++ b/fs/lockd/clntproc.c 2012-02-14 10:50:16.067243356 -0800 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt /* * Cookie counter for NLM requests */ -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234); +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234); void nlmclnt_next_cookie(struct nlm_cookie *c) { - u32 cookie = atomic_inc_return(&nlm_cookie); + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie); memcpy(c->data, &cookie, 4); c->len=4; diff -purN a/fs/notify/notification.c b/fs/notify/notification.c --- a/fs/notify/notification.c 2012-02-14 08:38:12.827358137 -0800 +++ b/fs/notify/notification.c 2012-02-14 10:50:16.071243355 -0800 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event * get set to 0 so it will never get 'freed' */ static struct fsnotify_event q_overflow_event; -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0); /** * fsnotify_get_cookie - return a unique cookie for use in synchronizing events. @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A */ u32 fsnotify_get_cookie(void) { - return atomic_inc_return(&fsnotify_sync_cookie); + return atomic_inc_return_unchecked(&fsnotify_sync_cookie); } EXPORT_SYMBOL_GPL(fsnotify_get_cookie); diff -purN a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c --- a/fs/ocfs2/localalloc.c 2012-02-14 08:38:12.707358143 -0800 +++ b/fs/ocfs2/localalloc.c 2012-02-14 10:50:16.071243355 -0800 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo goto bail; } - atomic_inc(&osb->alloc_stats.moves); + atomic_inc_unchecked(&osb->alloc_stats.moves); status = 0; bail: diff -purN a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h --- a/fs/ocfs2/ocfs2.h 2012-02-14 08:38:12.699358144 -0800 +++ b/fs/ocfs2/ocfs2.h 2012-02-14 10:50:16.075243355 -0800 @@ -217,11 +217,11 @@ enum ocfs2_vol_state struct ocfs2_alloc_stats { - atomic_t moves; - atomic_t local_data; - atomic_t bitmap_data; - atomic_t bg_allocs; - atomic_t bg_extends; + atomic_unchecked_t moves; + atomic_unchecked_t local_data; + atomic_unchecked_t bitmap_data; + atomic_unchecked_t bg_allocs; + atomic_unchecked_t bg_extends; }; enum ocfs2_local_alloc_state diff -purN a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c --- a/fs/ocfs2/suballoc.c 2012-02-14 08:38:12.699358144 -0800 +++ b/fs/ocfs2/suballoc.c 2012-02-14 10:50:16.079243356 -0800 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s mlog_errno(status); goto bail; } - atomic_inc(&osb->alloc_stats.bg_extends); + atomic_inc_unchecked(&osb->alloc_stats.bg_extends); /* You should never ask for this much metadata */ BUG_ON(bits_wanted > @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su mlog_errno(status); goto bail; } - atomic_inc(&osb->alloc_stats.bg_allocs); + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs); *blkno_start = bg_blkno + (u64) *suballoc_bit_start; ac->ac_bits_given += (*num_bits); @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s mlog_errno(status); goto bail; } - atomic_inc(&osb->alloc_stats.bg_allocs); + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs); BUG_ON(num_bits != 1); @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_ cluster_start, num_clusters); if (!status) - atomic_inc(&osb->alloc_stats.local_data); + atomic_inc_unchecked(&osb->alloc_stats.local_data); } else { if (min_clusters > (osb->bitmap_cpg - 1)) { /* The only paths asking for contiguousness @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_ ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode, bg_blkno, bg_bit_off); - atomic_inc(&osb->alloc_stats.bitmap_data); + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data); } } if (status < 0) { diff -purN a/fs/ocfs2/super.c b/fs/ocfs2/super.c --- a/fs/ocfs2/super.c 2012-02-14 08:38:12.691358143 -0800 +++ b/fs/ocfs2/super.c 2012-02-14 10:50:16.079243356 -0800 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s "%10s => GlobalAllocs: %d LocalAllocs: %d " "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n", "Stats", - atomic_read(&osb->alloc_stats.bitmap_data), - atomic_read(&osb->alloc_stats.local_data), - atomic_read(&osb->alloc_stats.bg_allocs), - atomic_read(&osb->alloc_stats.moves), - atomic_read(&osb->alloc_stats.bg_extends)); + atomic_read_unchecked(&osb->alloc_stats.bitmap_data), + atomic_read_unchecked(&osb->alloc_stats.local_data), + atomic_read_unchecked(&osb->alloc_stats.bg_allocs), + atomic_read_unchecked(&osb->alloc_stats.moves), + atomic_read_unchecked(&osb->alloc_stats.bg_extends)); out += snprintf(buf + out, len - out, "%10s => State: %u Descriptor: %llu Size: %u bits " @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct spin_lock_init(&osb->osb_xattr_lock); ocfs2_init_inode_steal_slot(osb); - atomic_set(&osb->alloc_stats.moves, 0); - atomic_set(&osb->alloc_stats.local_data, 0); - atomic_set(&osb->alloc_stats.bitmap_data, 0); - atomic_set(&osb->alloc_stats.bg_allocs, 0); - atomic_set(&osb->alloc_stats.bg_extends, 0); + atomic_set_unchecked(&osb->alloc_stats.moves, 0); + atomic_set_unchecked(&osb->alloc_stats.local_data, 0); + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0); + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0); + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0); /* Copy the blockcheck stats from the superblock probe */ osb->osb_ecc_stats = *stats; diff -purN a/fs/proc/meminfo.c b/fs/proc/meminfo.c --- a/fs/proc/meminfo.c 2012-02-14 08:38:13.391358113 -0800 +++ b/fs/proc/meminfo.c 2012-02-14 10:50:16.079243356 -0800 @@ -149,7 +149,7 @@ static int meminfo_proc_show(struct seq_ vmi.used >> 10, vmi.largest_chunk >> 10 #ifdef CONFIG_MEMORY_FAILURE - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10) + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10) #endif ); diff -purN a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c --- a/fs/reiserfs/do_balan.c 2012-02-14 08:38:12.755358141 -0800 +++ b/fs/reiserfs/do_balan.c 2012-02-14 10:50:16.079243356 -0800 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, return; } - atomic_inc(&(fs_generation(tb->tb_sb))); + atomic_inc_unchecked(&(fs_generation(tb->tb_sb))); do_balance_starts(tb); /* balance leaf returns 0 except if combining L R and S into diff -purN a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c --- a/fs/reiserfs/procfs.c 2012-02-14 08:38:12.755358141 -0800 +++ b/fs/reiserfs/procfs.c 2012-02-14 10:50:16.087243355 -0800 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m "SMALL_TAILS " : "NO_TAILS ", replay_only(sb) ? "REPLAY_ONLY " : "", convert_reiserfs(sb) ? "CONV " : "", - atomic_read(&r->s_generation_counter), + atomic_read_unchecked(&r->s_generation_counter), SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes), SF(s_do_balance), SF(s_unneeded_left_neighbor), SF(s_good_search_by_key_reada), SF(s_bmaps), diff -purN a/fs/sysfs/file.c b/fs/sysfs/file.c --- a/fs/sysfs/file.c 2012-02-14 08:38:13.279358118 -0800 +++ b/fs/sysfs/file.c 2012-02-14 10:50:16.091243354 -0800 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent struct sysfs_open_dirent { atomic_t refcnt; - atomic_t event; + atomic_unchecked_t event; wait_queue_head_t poll; struct list_head buffers; /* goes through sysfs_buffer.list */ }; @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr if (!sysfs_get_active_two(attr_sd)) return -ENODEV; - buffer->event = atomic_read(&attr_sd->s_attr.open->event); + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event); count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page); sysfs_put_active_two(attr_sd); @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct return -ENOMEM; atomic_set(&new_od->refcnt, 0); - atomic_set(&new_od->event, 1); + atomic_set_unchecked(&new_od->event, 1); init_waitqueue_head(&new_od->poll); INIT_LIST_HEAD(&new_od->buffers); goto retry; @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi sysfs_put_active_two(attr_sd); - if (buffer->event != atomic_read(&od->event)) + if (buffer->event != atomic_read_unchecked(&od->event)) goto trigger; return DEFAULT_POLLMASK; @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di od = sd->s_attr.open; if (od) { - atomic_inc(&od->event); + atomic_inc_unchecked(&od->event); wake_up_interruptible(&od->poll); } diff -purN a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h --- a/include/asm-generic/atomic64.h 2012-02-14 08:38:07.675358370 -0800 +++ b/include/asm-generic/atomic64.h 2012-02-14 10:50:16.091243354 -0800 @@ -16,6 +16,8 @@ typedef struct { long long counter; } atomic64_t; +typedef atomic64_t atomic64_unchecked_t; + #define ATOMIC64_INIT(i) { (i) } extern long long atomic64_read(const atomic64_t *v); @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) +#define atomic64_read_unchecked(v) atomic64_read(v) +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) +#define atomic64_inc_unchecked(v) atomic64_inc(v) +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) +#define atomic64_dec_unchecked(v) atomic64_dec(v) +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) + #endif /* _ASM_GENERIC_ATOMIC64_H */ diff -purN a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h --- a/include/asm-generic/atomic-long.h 2012-02-14 08:38:07.671358370 -0800 +++ b/include/asm-generic/atomic-long.h 2012-02-14 10:50:16.091243354 -0800 @@ -22,6 +22,12 @@ typedef atomic64_t atomic_long_t; +#ifdef CONFIG_PAX_REFCOUNT +typedef atomic64_unchecked_t atomic_long_unchecked_t; +#else +typedef atomic64_t atomic_long_unchecked_t; +#endif + #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) static inline long atomic_long_read(atomic_long_t *l) @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom return (long)atomic64_read(v); } +#ifdef CONFIG_PAX_REFCOUNT +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l) +{ + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; + + return (long)atomic64_read_unchecked(v); +} +#endif + static inline void atomic_long_set(atomic_long_t *l, long i) { atomic64_t *v = (atomic64_t *)l; @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi atomic64_set(v, i); } +#ifdef CONFIG_PAX_REFCOUNT +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i) +{ + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; + + atomic64_set_unchecked(v, i); +} +#endif + static inline void atomic_long_inc(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi atomic64_inc(v); } +#ifdef CONFIG_PAX_REFCOUNT +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l) +{ + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; + + atomic64_inc_unchecked(v); +} +#endif + static inline void atomic_long_dec(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi atomic64_dec(v); } +#ifdef CONFIG_PAX_REFCOUNT +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l) +{ + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; + + atomic64_dec_unchecked(v); +} +#endif + static inline void atomic_long_add(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; @@ -59,6 +101,15 @@ static inline void atomic_long_add(long atomic64_add(i, v); } +#ifdef CONFIG_PAX_REFCOUNT +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l) +{ + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; + + atomic64_add_unchecked(i, v); +} +#endif + static inline void atomic_long_sub(long i, atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur return (long)atomic64_inc_return(v); } +#ifdef CONFIG_PAX_REFCOUNT +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l) +{ + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; + + return (long)atomic64_inc_return_unchecked(v); +} +#endif + static inline long atomic_long_dec_return(atomic_long_t *l) { atomic64_t *v = (atomic64_t *)l; @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles typedef atomic_t atomic_long_t; +#ifdef CONFIG_PAX_REFCOUNT +typedef atomic_unchecked_t atomic_long_unchecked_t; +#else +typedef atomic_t atomic_long_unchecked_t; +#endif + #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) static inline long atomic_long_read(atomic_long_t *l) { @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom return (long)atomic_read(v); } +#ifdef CONFIG_PAX_REFCOUNT +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l) +{ + atomic_unchecked_t *v = (atomic_unchecked_t *)l; + + return (long)atomic_read_unchecked(v); +} +#endif + static inline void atomic_long_set(atomic_long_t *l, long i) { atomic_t *v = (atomic_t *)l; @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi atomic_set(v, i); } +#ifdef CONFIG_PAX_REFCOUNT +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i) +{ + atomic_unchecked_t *v = (atomic_unchecked_t *)l; + + atomic_set_unchecked(v, i); +} +#endif + static inline void atomic_long_inc(atomic_long_t *l) { atomic_t *v = (atomic_t *)l; @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi atomic_inc(v); } +#ifdef CONFIG_PAX_REFCOUNT +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l) +{ + atomic_unchecked_t *v = (atomic_unchecked_t *)l; + + atomic_inc_unchecked(v); +} +#endif + static inline void atomic_long_dec(atomic_long_t *l) { atomic_t *v = (atomic_t *)l; @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi atomic_dec(v); } +#ifdef CONFIG_PAX_REFCOUNT +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l) +{ + atomic_unchecked_t *v = (atomic_unchecked_t *)l; + + atomic_dec_unchecked(v); +} +#endif + static inline void atomic_long_add(long i, atomic_long_t *l) { atomic_t *v = (atomic_t *)l; @@ -176,6 +278,15 @@ static inline void atomic_long_add(long atomic_add(i, v); } +#ifdef CONFIG_PAX_REFCOUNT +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l) +{ + atomic_unchecked_t *v = (atomic_unchecked_t *)l; + + atomic_add_unchecked(i, v); +} +#endif + static inline void atomic_long_sub(long i, atomic_long_t *l) { atomic_t *v = (atomic_t *)l; @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur return (long)atomic_inc_return(v); } +#ifdef CONFIG_PAX_REFCOUNT +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l) +{ + atomic_unchecked_t *v = (atomic_unchecked_t *)l; + + return (long)atomic_inc_return_unchecked(v); +} +#endif + static inline long atomic_long_dec_return(atomic_long_t *l) { atomic_t *v = (atomic_t *)l; @@ -255,4 +375,47 @@ static inline long atomic_long_add_unles #endif /* BITS_PER_LONG == 64 */ +#ifdef CONFIG_PAX_REFCOUNT +static inline void pax_refcount_needs_these_functions(void) +{ + atomic_read_unchecked((atomic_unchecked_t *)NULL); + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0); + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL); + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL); + atomic_inc_unchecked((atomic_unchecked_t *)NULL); + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL); + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL); + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL); + atomic_dec_unchecked((atomic_unchecked_t *)NULL); + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0); + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0); + + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL); + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0); + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL); + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL); + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL); + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL); +} +#else +#define atomic_read_unchecked(v) atomic_read(v) +#define atomic_set_unchecked(v, i) atomic_set((v), (i)) +#define atomic_add_unchecked(i, v) atomic_add((i), (v)) +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v)) +#define atomic_inc_unchecked(v) atomic_inc(v) +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v) +#define atomic_inc_return_unchecked(v) atomic_inc_return(v) +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v)) +#define atomic_dec_unchecked(v) atomic_dec(v) +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n)) +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i)) + +#define atomic_long_read_unchecked(v) atomic_long_read(v) +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i)) +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v)) +#define atomic_long_inc_unchecked(v) atomic_long_inc(v) +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v) +#define atomic_long_dec_unchecked(v) atomic_long_dec(v) +#endif + #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ diff -purN a/include/drm/drmP.h b/include/drm/drmP.h --- a/include/drm/drmP.h 2012-02-14 08:38:07.711358368 -0800 +++ b/include/drm/drmP.h 2012-02-14 10:50:16.091243354 -0800 @@ -926,7 +926,7 @@ struct drm_device { /*@{ */ unsigned long counters; enum drm_stat_type types[15]; - atomic_t counts[15]; + atomic_unchecked_t counts[15]; /*@} */ struct list_head filelist; @@ -1046,11 +1046,11 @@ struct drm_device { spinlock_t object_name_lock; struct idr object_name_idr; atomic_t object_count; - atomic_t object_memory; + atomic_unchecked_t object_memory; atomic_t pin_count; - atomic_t pin_memory; + atomic_unchecked_t pin_memory; atomic_t gtt_count; - atomic_t gtt_memory; + atomic_unchecked_t gtt_memory; uint32_t gtt_total; uint32_t invalidate_domains; /* domains pending invalidation */ uint32_t flush_domains; /* domains pending flush */ diff -purN a/include/linux/atmdev.h b/include/linux/atmdev.h --- a/include/linux/atmdev.h 2012-02-14 08:38:07.815358363 -0800 +++ b/include/linux/atmdev.h 2012-02-14 10:50:16.091243354 -0800 @@ -237,7 +237,7 @@ struct compat_atm_iobuf { #endif struct k_atm_aal_stats { -#define __HANDLE_ITEM(i) atomic_t i +#define __HANDLE_ITEM(i) atomic_unchecked_t i __AAL_STAT_ITEMS #undef __HANDLE_ITEM }; diff -purN a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h --- a/include/linux/blktrace_api.h 2012-02-14 08:38:08.039358352 -0800 +++ b/include/linux/blktrace_api.h 2012-02-14 10:50:16.095243354 -0800 @@ -160,7 +160,7 @@ struct blk_trace { struct dentry *dir; struct dentry *dropped_file; struct dentry *msg_file; - atomic_t dropped; + atomic_unchecked_t dropped; }; extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); diff -purN a/include/linux/dst.h b/include/linux/dst.h --- a/include/linux/dst.h 2012-02-14 08:38:07.903358358 -0800 +++ b/include/linux/dst.h 2012-02-14 10:50:16.095243354 -0800 @@ -380,7 +380,7 @@ struct dst_node struct thread_pool *pool; /* Transaction IDs live here */ - atomic_long_t gen; + atomic_long_unchecked_t gen; /* * How frequently and how many times transaction diff -purN a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h --- a/include/linux/fscache-cache.h 2012-02-14 08:38:07.827358363 -0800 +++ b/include/linux/fscache-cache.h 2012-02-14 10:50:16.103243354 -0800 @@ -116,7 +116,7 @@ struct fscache_operation { #endif }; -extern atomic_t fscache_op_debug_id; +extern atomic_unchecked_t fscache_op_debug_id; extern const struct slow_work_ops fscache_op_slow_work_ops; extern void fscache_enqueue_operation(struct fscache_operation *); @@ -134,7 +134,7 @@ static inline void fscache_operation_ini fscache_operation_release_t release) { atomic_set(&op->usage, 1); - op->debug_id = atomic_inc_return(&fscache_op_debug_id); + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id); op->release = release; INIT_LIST_HEAD(&op->pend_link); fscache_set_op_state(op, "Init"); diff -purN a/include/linux/genhd.h b/include/linux/genhd.h --- a/include/linux/genhd.h 2012-02-14 08:38:07.915358358 -0800 +++ b/include/linux/genhd.h 2012-02-14 10:50:16.103243354 -0800 @@ -161,7 +161,7 @@ struct gendisk { struct timer_rand_state *random; - atomic_t sync_io; /* RAID */ + atomic_unchecked_t sync_io; /* RAID */ struct work_struct async_notify; #ifdef CONFIG_BLK_DEV_INTEGRITY struct blk_integrity *integrity; diff -purN a/include/linux/i2o.h b/include/linux/i2o.h --- a/include/linux/i2o.h 2012-02-14 08:38:07.911358359 -0800 +++ b/include/linux/i2o.h 2012-02-14 10:50:16.103243354 -0800 @@ -564,7 +564,7 @@ struct i2o_controller { struct i2o_device *exec; /* Executive */ #if BITS_PER_LONG == 64 spinlock_t context_list_lock; /* lock for context_list */ - atomic_t context_list_counter; /* needed for unique contexts */ + atomic_unchecked_t context_list_counter; /* needed for unique contexts */ struct list_head context_list; /* list of context id's and pointers */ #endif diff -purN a/include/linux/kgdb.h b/include/linux/kgdb.h --- a/include/linux/kgdb.h 2012-02-14 08:38:07.839358361 -0800 +++ b/include/linux/kgdb.h 2012-02-14 10:50:16.103243354 -0800 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void); extern int kgdb_connected; -extern atomic_t kgdb_setting_breakpoint; -extern atomic_t kgdb_cpu_doing_single_step; +extern atomic_unchecked_t kgdb_setting_breakpoint; +extern atomic_unchecked_t kgdb_cpu_doing_single_step; extern struct task_struct *kgdb_usethread; extern struct task_struct *kgdb_contthread; diff -purN a/include/linux/mm.h b/include/linux/mm.h --- a/include/linux/mm.h 2012-02-14 08:38:07.911358359 -0800 +++ b/include/linux/mm.h 2012-02-14 10:50:16.107243354 -0800 @@ -1340,7 +1340,7 @@ extern void memory_failure(unsigned long extern int __memory_failure(unsigned long pfn, int trapno, int ref); extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; -extern atomic_long_t mce_bad_pages; +extern atomic_long_unchecked_t mce_bad_pages; #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff -purN a/include/linux/mmzone.h b/include/linux/mmzone.h --- a/include/linux/mmzone.h 2012-02-14 08:38:07.959358356 -0800 +++ b/include/linux/mmzone.h 2012-02-14 10:50:16.111243354 -0800 @@ -350,7 +350,7 @@ struct zone { unsigned long flags; /* zone flags, see below */ /* Zone statistics */ - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; /* * prev_priority holds the scanning priority for this zone. It is diff -purN a/include/linux/oprofile.h b/include/linux/oprofile.h --- a/include/linux/oprofile.h 2012-02-14 08:38:07.931358358 -0800 +++ b/include/linux/oprofile.h 2012-02-14 10:50:16.115243353 -0800 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root, char const * name, ulong * val); -/** Create a file for read-only access to an atomic_t. */ +/** Create a file for read-only access to an atomic_unchecked_t. */ int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root, - char const * name, atomic_t * val); + char const * name, atomic_unchecked_t * val); /** create a directory */ struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root, diff -purN a/include/linux/perf_event.h b/include/linux/perf_event.h --- a/include/linux/perf_event.h 2012-02-14 08:38:08.003358355 -0800 +++ b/include/linux/perf_event.h 2012-02-14 10:50:16.115243353 -0800 @@ -476,7 +476,7 @@ struct hw_perf_event { struct hrtimer hrtimer; }; }; - atomic64_t prev_count; + atomic64_unchecked_t prev_count; u64 sample_period; u64 last_period; atomic64_t period_left; @@ -557,7 +557,7 @@ struct perf_event { const struct pmu *pmu; enum perf_event_active_state state; - atomic64_t count; + atomic64_unchecked_t count; /* * These are the total time in nanoseconds that the event @@ -595,8 +595,8 @@ struct perf_event { * These accumulate total time (in nanoseconds) that children * events have been enabled and running, respectively. */ - atomic64_t child_total_time_enabled; - atomic64_t child_total_time_running; + atomic64_unchecked_t child_total_time_enabled; + atomic64_unchecked_t child_total_time_running; /* * Protect attach/detach and child_list: diff -purN a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h --- a/include/linux/reiserfs_fs.h 2012-02-14 08:38:07.903358358 -0800 +++ b/include/linux/reiserfs_fs.h 2012-02-14 10:50:16.119243353 -0800 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */ #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter) -#define get_generation(s) atomic_read (&fs_generation(s)) +#define get_generation(s) atomic_read_unchecked (&fs_generation(s)) #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen) #define __fs_changed(gen,s) (gen != get_generation (s)) #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);}) diff -purN a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h --- a/include/linux/reiserfs_fs_sb.h 2012-02-14 08:38:08.039358352 -0800 +++ b/include/linux/reiserfs_fs_sb.h 2012-02-14 10:50:16.127243353 -0800 @@ -377,7 +377,7 @@ struct reiserfs_sb_info { /* Comment? -Hans */ wait_queue_head_t s_wait; /* To be obsoleted soon by per buffer seals.. -Hans */ - atomic_t s_generation_counter; // increased by one every time the + atomic_unchecked_t s_generation_counter; // increased by one every time the // tree gets re-balanced unsigned long s_properties; /* File system properties. Currently holds on-disk FS format */ diff -purN a/include/linux/sched.h b/include/linux/sched.h --- a/include/linux/sched.h 2012-02-14 08:38:07.931358358 -0800 +++ b/include/linux/sched.h 2012-02-14 11:49:40.106061490 -0800 @@ -669,6 +669,11 @@ struct signal_struct { struct tty_audit_buf *tty_audit_buf; #endif +#ifdef CONFIG_PAX_REFCOUNT + u32 curr_ip; + u32 saved_ip; +#endif + int oom_adj; /* OOM kill score adjustment (bit shift) */ }; @@ -1545,6 +1550,8 @@ struct task_struct { #endif /* CONFIG_TRACING */ }; +extern void pax_report_refcount_overflow(struct pt_regs *regs); + /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) diff -purN a/include/linux/slab_def.h b/include/linux/slab_def.h --- a/include/linux/slab_def.h 2012-02-14 08:38:07.903358358 -0800 +++ b/include/linux/slab_def.h 2012-02-14 10:50:16.127243353 -0800 @@ -69,10 +69,10 @@ struct kmem_cache { unsigned long node_allocs; unsigned long node_frees; unsigned long node_overflow; - atomic_t allochit; - atomic_t allocmiss; - atomic_t freehit; - atomic_t freemiss; + atomic_unchecked_t allochit; + atomic_unchecked_t allocmiss; + atomic_unchecked_t freehit; + atomic_unchecked_t freemiss; /* * If debugging is enabled, then the allocator can add additional diff -purN a/include/linux/sonet.h b/include/linux/sonet.h --- a/include/linux/sonet.h 2012-02-14 08:38:07.907358359 -0800 +++ b/include/linux/sonet.h 2012-02-14 10:50:16.127243353 -0800 @@ -61,7 +61,7 @@ struct sonet_stats { #include struct k_sonet_stats { -#define __HANDLE_ITEM(i) atomic_t i +#define __HANDLE_ITEM(i) atomic_unchecked_t i __SONET_ITEMS #undef __HANDLE_ITEM }; diff -purN a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h --- a/include/linux/sunrpc/svc_rdma.h 2012-02-14 08:38:07.835358362 -0800 +++ b/include/linux/sunrpc/svc_rdma.h 2012-02-14 10:50:16.127243353 -0800 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord; extern unsigned int svcrdma_max_requests; extern unsigned int svcrdma_max_req_size; -extern atomic_t rdma_stat_recv; -extern atomic_t rdma_stat_read; -extern atomic_t rdma_stat_write; -extern atomic_t rdma_stat_sq_starve; -extern atomic_t rdma_stat_rq_starve; -extern atomic_t rdma_stat_rq_poll; -extern atomic_t rdma_stat_rq_prod; -extern atomic_t rdma_stat_sq_poll; -extern atomic_t rdma_stat_sq_prod; +extern atomic_unchecked_t rdma_stat_recv; +extern atomic_unchecked_t rdma_stat_read; +extern atomic_unchecked_t rdma_stat_write; +extern atomic_unchecked_t rdma_stat_sq_starve; +extern atomic_unchecked_t rdma_stat_rq_starve; +extern atomic_unchecked_t rdma_stat_rq_poll; +extern atomic_unchecked_t rdma_stat_rq_prod; +extern atomic_unchecked_t rdma_stat_sq_poll; +extern atomic_unchecked_t rdma_stat_sq_prod; #define RPCRDMA_VERSION 1 diff -purN a/include/linux/types.h b/include/linux/types.h --- a/include/linux/types.h 2012-02-14 08:38:07.915358358 -0800 +++ b/include/linux/types.h 2012-02-14 10:50:16.131243353 -0800 @@ -191,10 +191,26 @@ typedef struct { volatile int counter; } atomic_t; +#ifdef CONFIG_PAX_REFCOUNT +typedef struct { + volatile int counter; +} atomic_unchecked_t; +#else +typedef atomic_t atomic_unchecked_t; +#endif + #ifdef CONFIG_64BIT typedef struct { volatile long counter; } atomic64_t; + +#ifdef CONFIG_PAX_REFCOUNT +typedef struct { + volatile long counter; +} atomic64_unchecked_t; +#else +typedef atomic64_t atomic64_unchecked_t; +#endif #endif struct ustat { diff -purN a/include/linux/vermagic.h b/include/linux/vermagic.h --- a/include/linux/vermagic.h 2012-02-14 08:38:07.995358354 -0800 +++ b/include/linux/vermagic.h 2012-02-14 11:26:16.706071995 -0800 @@ -26,9 +26,34 @@ #define MODULE_ARCH_VERMAGIC "" #endif +#ifdef CONFIG_PAX_REFCOUNT +#define MODULE_PAX_REFCOUNT "REFCOUNT " +#else +#define MODULE_PAX_REFCOUNT "" +#endif + +#ifdef CONSTIFY_PLUGIN +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN " +#else +#define MODULE_CONSTIFY_PLUGIN "" +#endif + +#ifdef STACKLEAK_PLUGIN +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN " +#else +#define MODULE_STACKLEAK_PLUGIN "" +#endif + +#ifdef CONFIG_GRKERNSEC +#define MODULE_GRSEC "GRSEC " +#else +#define MODULE_GRSEC "" +#endif + #define VERMAGIC_STRING \ UTS_RELEASE " " \ MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \ MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \ - MODULE_ARCH_VERMAGIC + MODULE_ARCH_VERMAGIC \ + MODULE_PAX_REFCOUNT diff -purN a/include/linux/vmstat.h b/include/linux/vmstat.h --- a/include/linux/vmstat.h 2012-02-14 08:38:07.939358357 -0800 +++ b/include/linux/vmstat.h 2012-02-14 10:50:16.139243352 -0800 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in /* * Zone based page accounting with per cpu differentials. */ -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; static inline void zone_page_state_add(long x, struct zone *zone, enum zone_stat_item item) { - atomic_long_add(x, &zone->vm_stat[item]); - atomic_long_add(x, &vm_stat[item]); + atomic_long_add_unchecked(x, &zone->vm_stat[item]); + atomic_long_add_unchecked(x, &vm_stat[item]); } static inline unsigned long global_page_state(enum zone_stat_item item) { - long x = atomic_long_read(&vm_stat[item]); + long x = atomic_long_read_unchecked(&vm_stat[item]); #ifdef CONFIG_SMP if (x < 0) x = 0; @@ -158,7 +158,7 @@ static inline unsigned long global_page_ static inline unsigned long zone_page_state(struct zone *zone, enum zone_stat_item item) { - long x = atomic_long_read(&zone->vm_stat[item]); + long x = atomic_long_read_unchecked(&zone->vm_stat[item]); #ifdef CONFIG_SMP if (x < 0) x = 0; @@ -175,7 +175,7 @@ static inline unsigned long zone_page_st static inline unsigned long zone_page_state_snapshot(struct zone *zone, enum zone_stat_item item) { - long x = atomic_long_read(&zone->vm_stat[item]); + long x = atomic_long_read_unchecked(&zone->vm_stat[item]); #ifdef CONFIG_SMP int cpu; @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { - atomic_long_inc(&zone->vm_stat[item]); - atomic_long_inc(&vm_stat[item]); + atomic_long_inc_unchecked(&zone->vm_stat[item]); + atomic_long_inc_unchecked(&vm_stat[item]); } static inline void __inc_zone_page_state(struct page *page, @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) { - atomic_long_dec(&zone->vm_stat[item]); - atomic_long_dec(&vm_stat[item]); + atomic_long_dec_unchecked(&zone->vm_stat[item]); + atomic_long_dec_unchecked(&vm_stat[item]); } static inline void __dec_zone_page_state(struct page *page, diff -purN a/include/media/v4l2-device.h b/include/media/v4l2-device.h --- a/include/media/v4l2-device.h 2012-02-14 08:38:07.723358366 -0800 +++ b/include/media/v4l2-device.h 2012-02-14 10:50:16.139243352 -0800 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st this function returns 0. If the name ends with a digit (e.g. cx18), then the name will be set to cx18-0 since cx180 looks really odd. */ int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename, - atomic_t *instance); + atomic_unchecked_t *instance); /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects. Since the parent disappears this ensures that v4l2_dev doesn't have an diff -purN a/include/net/flow.h b/include/net/flow.h --- a/include/net/flow.h 2012-02-14 08:38:07.779358364 -0800 +++ b/include/net/flow.h 2012-02-14 10:50:16.139243352 -0800 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, flow_resolve_t resolver); extern void flow_cache_flush(void); -extern atomic_t flow_cache_genid; +extern atomic_unchecked_t flow_cache_genid; static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2) { diff -purN a/include/net/inetpeer.h b/include/net/inetpeer.h --- a/include/net/inetpeer.h 2012-02-14 08:38:07.771358365 -0800 +++ b/include/net/inetpeer.h 2012-02-14 10:50:16.139243352 -0800 @@ -24,7 +24,7 @@ struct inet_peer __u32 dtime; /* the time of last use of not * referenced entries */ atomic_t refcnt; - atomic_t rid; /* Frag reception counter */ + atomic_unchecked_t rid; /* Frag reception counter */ __u32 tcp_ts; unsigned long tcp_ts_stamp; }; diff -purN a/include/net/ip_vs.h b/include/net/ip_vs.h --- a/include/net/ip_vs.h 2012-02-14 08:38:07.787358364 -0800 +++ b/include/net/ip_vs.h 2012-02-14 10:50:16.139243352 -0800 @@ -365,7 +365,7 @@ struct ip_vs_conn { struct ip_vs_conn *control; /* Master control connection */ atomic_t n_control; /* Number of controlled ones */ struct ip_vs_dest *dest; /* real server */ - atomic_t in_pkts; /* incoming packet counter */ + atomic_unchecked_t in_pkts; /* incoming packet counter */ /* packet transmitter for different forwarding methods. If it mangles the packet, it must return NF_DROP or better NF_STOLEN, @@ -466,7 +466,7 @@ struct ip_vs_dest { union nf_inet_addr addr; /* IP address of the server */ __be16 port; /* port number of the server */ volatile unsigned flags; /* dest status flags */ - atomic_t conn_flags; /* flags to copy to conn */ + atomic_unchecked_t conn_flags; /* flags to copy to conn */ atomic_t weight; /* server weight */ atomic_t refcnt; /* reference counter */ diff -purN a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h --- a/include/net/iucv/af_iucv.h 2012-02-14 08:38:07.775358365 -0800 +++ b/include/net/iucv/af_iucv.h 2012-02-14 10:50:16.143243352 -0800 @@ -87,7 +87,7 @@ struct iucv_sock { struct iucv_sock_list { struct hlist_head head; rwlock_t lock; - atomic_t autobind_name; + atomic_unchecked_t autobind_name; }; unsigned int iucv_sock_poll(struct file *file, struct socket *sock, diff -purN a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h --- a/include/net/netns/ipv4.h 2012-02-14 08:38:07.811358364 -0800 +++ b/include/net/netns/ipv4.h 2012-02-14 10:50:16.143243352 -0800 @@ -54,7 +54,7 @@ struct netns_ipv4 { int current_rt_cache_rebuild_count; struct timer_list rt_secret_timer; - atomic_t rt_genid; + atomic_unchecked_t rt_genid; #ifdef CONFIG_IP_MROUTE struct sock *mroute_sk; diff -purN a/include/net/sock.h b/include/net/sock.h --- a/include/net/sock.h 2012-02-14 08:38:07.771358365 -0800 +++ b/include/net/sock.h 2012-02-14 10:50:16.143243352 -0800 @@ -273,7 +273,7 @@ struct sock { rwlock_t sk_callback_lock; int sk_err, sk_err_soft; - atomic_t sk_drops; + atomic_unchecked_t sk_drops; unsigned short sk_ack_backlog; unsigned short sk_max_ack_backlog; __u32 sk_priority; diff -purN a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h --- a/include/scsi/scsi_device.h 2012-02-14 08:38:07.695358369 -0800 +++ b/include/scsi/scsi_device.h 2012-02-14 10:50:16.143243352 -0800 @@ -156,9 +156,9 @@ struct scsi_device { unsigned int max_device_blocked; /* what device_blocked counts down from */ #define SCSI_DEFAULT_DEVICE_BLOCKED 3 - atomic_t iorequest_cnt; - atomic_t iodone_cnt; - atomic_t ioerr_cnt; + atomic_unchecked_t iorequest_cnt; + atomic_unchecked_t iodone_cnt; + atomic_unchecked_t ioerr_cnt; struct device sdev_gendev, sdev_dev; diff -purN a/include/sound/ymfpci.h b/include/sound/ymfpci.h --- a/include/sound/ymfpci.h 2012-02-14 08:38:08.063358352 -0800 +++ b/include/sound/ymfpci.h 2012-02-14 10:50:16.143243352 -0800 @@ -358,7 +358,7 @@ struct snd_ymfpci { spinlock_t reg_lock; spinlock_t voice_lock; wait_queue_head_t interrupt_sleep; - atomic_t interrupt_sleep_count; + atomic_unchecked_t interrupt_sleep_count; struct snd_info_entry *proc_entry; const struct firmware *dsp_microcode; const struct firmware *controller_microcode; diff -purN a/kernel/audit.c b/kernel/audit.c --- a/kernel/audit.c 2012-02-14 08:38:08.759358320 -0800 +++ b/kernel/audit.c 2012-02-14 10:50:16.151243352 -0800 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0; 3) suppressed due to audit_rate_limit 4) suppressed due to audit_backlog_limit */ -static atomic_t audit_lost = ATOMIC_INIT(0); +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0); /* The netlink socket. */ static struct sock *audit_sock; @@ -232,7 +232,7 @@ void audit_log_lost(const char *message) unsigned long now; int print; - atomic_inc(&audit_lost); + atomic_inc_unchecked(&audit_lost); print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit); @@ -251,7 +251,7 @@ void audit_log_lost(const char *message) printk(KERN_WARNING "audit: audit_lost=%d audit_rate_limit=%d " "audit_backlog_limit=%d\n", - atomic_read(&audit_lost), + atomic_read_unchecked(&audit_lost), audit_rate_limit, audit_backlog_limit); audit_panic(message); @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b status_set.pid = audit_pid; status_set.rate_limit = audit_rate_limit; status_set.backlog_limit = audit_backlog_limit; - status_set.lost = atomic_read(&audit_lost); + status_set.lost = atomic_read_unchecked(&audit_lost); status_set.backlog = skb_queue_len(&audit_skb_queue); audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0, &status_set, sizeof(status_set)); diff -purN a/kernel/auditsc.c b/kernel/auditsc.c --- a/kernel/auditsc.c 2012-02-14 08:38:08.739358321 -0800 +++ b/kernel/auditsc.c 2012-02-14 10:50:16.151243352 -0800 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte } /* global counter which is incremented every time something logs in */ -static atomic_t session_id = ATOMIC_INIT(0); +static atomic_unchecked_t session_id = ATOMIC_INIT(0); /** * audit_set_loginuid - set a task's audit_context loginuid @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT */ int audit_set_loginuid(struct task_struct *task, uid_t loginuid) { - unsigned int sessionid = atomic_inc_return(&session_id); + unsigned int sessionid = atomic_inc_return_unchecked(&session_id); struct audit_context *context = task->audit_context; if (context && context->in_syscall) { diff -purN a/kernel/kgdb.c b/kernel/kgdb.c --- a/kernel/kgdb.c 2012-02-14 08:38:08.739358321 -0800 +++ b/kernel/kgdb.c 2012-02-14 10:50:16.151243352 -0800 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1) */ static atomic_t passive_cpu_wait[NR_CPUS]; static atomic_t cpu_in_kgdb[NR_CPUS]; -atomic_t kgdb_setting_breakpoint; +atomic_unchecked_t kgdb_setting_breakpoint; struct task_struct *kgdb_usethread; struct task_struct *kgdb_contthread; @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY sizeof(unsigned long)]; /* to keep track of the CPU which is doing the single stepping*/ -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1); /* * If you are debugging a problem where roundup (the collection of @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait) return 0; if (kgdb_connected) return 1; - if (atomic_read(&kgdb_setting_breakpoint)) + if (atomic_read_unchecked(&kgdb_setting_breakpoint)) return 1; if (print_wait) printk(KERN_CRIT "KGDB: Waiting for remote debugger\n"); @@ -1426,8 +1426,8 @@ acquirelock: * instance of the exception handler wanted to come into the * debugger on a different CPU via a single step */ - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && - atomic_read(&kgdb_cpu_doing_single_step) != cpu) { + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 && + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) { atomic_set(&kgdb_active, -1); touch_softlockup_watchdog(); @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod */ void kgdb_breakpoint(void) { - atomic_set(&kgdb_setting_breakpoint, 1); + atomic_set_unchecked(&kgdb_setting_breakpoint, 1); wmb(); /* Sync point before breakpoint */ arch_kgdb_breakpoint(); wmb(); /* Sync point after breakpoint */ - atomic_set(&kgdb_setting_breakpoint, 0); + atomic_set_unchecked(&kgdb_setting_breakpoint, 0); } EXPORT_SYMBOL_GPL(kgdb_breakpoint); diff -purN a/kernel/lockdep.c b/kernel/lockdep.c --- a/kernel/lockdep.c 2012-02-14 08:38:08.751358321 -0800 +++ b/kernel/lockdep.c 2012-02-14 10:50:16.163243351 -0800 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t /* * Various lockdep statistics: */ -atomic_t chain_lookup_hits; -atomic_t chain_lookup_misses; -atomic_t hardirqs_on_events; -atomic_t hardirqs_off_events; -atomic_t redundant_hardirqs_on; -atomic_t redundant_hardirqs_off; -atomic_t softirqs_on_events; -atomic_t softirqs_off_events; -atomic_t redundant_softirqs_on; -atomic_t redundant_softirqs_off; -atomic_t nr_unused_locks; -atomic_t nr_cyclic_checks; -atomic_t nr_find_usage_forwards_checks; -atomic_t nr_find_usage_backwards_checks; +atomic_unchecked_t chain_lookup_hits; +atomic_unchecked_t chain_lookup_misses; +atomic_unchecked_t hardirqs_on_events; +atomic_unchecked_t hardirqs_off_events; +atomic_unchecked_t redundant_hardirqs_on; +atomic_unchecked_t redundant_hardirqs_off; +atomic_unchecked_t softirqs_on_events; +atomic_unchecked_t softirqs_off_events; +atomic_unchecked_t redundant_softirqs_on; +atomic_unchecked_t redundant_softirqs_off; +atomic_unchecked_t nr_unused_locks; +atomic_unchecked_t nr_cyclic_checks; +atomic_unchecked_t nr_find_usage_forwards_checks; +atomic_unchecked_t nr_find_usage_backwards_checks; #endif /* @@ -2751,7 +2751,7 @@ static int __lock_acquire(struct lockdep if (!class) return 0; } - debug_atomic_inc((atomic_t *)&class->ops); + debug_atomic_inc((atomic_unchecked_t *)&class->ops); if (very_verbose(class)) { printk("\nacquire class [%p] %s", class->key, class->name); if (class->name_version > 1) diff -purN a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h --- a/kernel/lockdep_internals.h 2012-02-14 08:38:08.751358321 -0800 +++ b/kernel/lockdep_internals.h 2012-02-14 10:50:16.163243351 -0800 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_ /* * Various lockdep statistics: */ -extern atomic_t chain_lookup_hits; -extern atomic_t chain_lookup_misses; -extern atomic_t hardirqs_on_events; -extern atomic_t hardirqs_off_events; -extern atomic_t redundant_hardirqs_on; -extern atomic_t redundant_hardirqs_off; -extern atomic_t softirqs_on_events; -extern atomic_t softirqs_off_events; -extern atomic_t redundant_softirqs_on; -extern atomic_t redundant_softirqs_off; -extern atomic_t nr_unused_locks; -extern atomic_t nr_cyclic_checks; -extern atomic_t nr_cyclic_check_recursions; -extern atomic_t nr_find_usage_forwards_checks; -extern atomic_t nr_find_usage_forwards_recursions; -extern atomic_t nr_find_usage_backwards_checks; -extern atomic_t nr_find_usage_backwards_recursions; -# define debug_atomic_inc(ptr) atomic_inc(ptr) -# define debug_atomic_dec(ptr) atomic_dec(ptr) -# define debug_atomic_read(ptr) atomic_read(ptr) +extern atomic_unchecked_t chain_lookup_hits; +extern atomic_unchecked_t chain_lookup_misses; +extern atomic_unchecked_t hardirqs_on_events; +extern atomic_unchecked_t hardirqs_off_events; +extern atomic_unchecked_t redundant_hardirqs_on; +extern atomic_unchecked_t redundant_hardirqs_off; +extern atomic_unchecked_t softirqs_on_events; +extern atomic_unchecked_t softirqs_off_events; +extern atomic_unchecked_t redundant_softirqs_on; +extern atomic_unchecked_t redundant_softirqs_off; +extern atomic_unchecked_t nr_unused_locks; +extern atomic_unchecked_t nr_cyclic_checks; +extern atomic_unchecked_t nr_cyclic_check_recursions; +extern atomic_unchecked_t nr_find_usage_forwards_checks; +extern atomic_unchecked_t nr_find_usage_forwards_recursions; +extern atomic_unchecked_t nr_find_usage_backwards_checks; +extern atomic_unchecked_t nr_find_usage_backwards_recursions; +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr) +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr) +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr) #else # define debug_atomic_inc(ptr) do { } while (0) # define debug_atomic_dec(ptr) do { } while (0) diff -purN a/kernel/perf_event.c b/kernel/perf_event.c --- a/kernel/perf_event.c 2012-02-14 08:38:08.751358321 -0800 +++ b/kernel/perf_event.c 2012-02-14 10:50:16.163243351 -0800 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl */ int sysctl_perf_event_sample_rate __read_mostly = 100000; -static atomic64_t perf_event_id; +static atomic64_unchecked_t perf_event_id; /* * Lock for (sysadmin-configurable) event reservations: @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc * In order to keep per-task stats reliable we need to flip the event * values when we flip the contexts. */ - value = atomic64_read(&next_event->count); - value = atomic64_xchg(&event->count, value); - atomic64_set(&next_event->count, value); + value = atomic64_read_unchecked(&next_event->count); + value = atomic64_xchg_unchecked(&event->count, value); + atomic64_set_unchecked(&next_event->count, value); swap(event->total_time_enabled, next_event->total_time_enabled); swap(event->total_time_running, next_event->total_time_running); @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e update_event_times(event); } - return atomic64_read(&event->count); + return atomic64_read_unchecked(&event->count); } /* @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct values[n++] = 1 + leader->nr_siblings; if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { values[n++] = leader->total_time_enabled + - atomic64_read(&leader->child_total_time_enabled); + atomic64_read_unchecked(&leader->child_total_time_enabled); } if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { values[n++] = leader->total_time_running + - atomic64_read(&leader->child_total_time_running); + atomic64_read_unchecked(&leader->child_total_time_running); } size = n * sizeof(u64); @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe values[n++] = perf_event_read_value(event); if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { values[n++] = event->total_time_enabled + - atomic64_read(&event->child_total_time_enabled); + atomic64_read_unchecked(&event->child_total_time_enabled); } if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { values[n++] = event->total_time_running + - atomic64_read(&event->child_total_time_running); + atomic64_read_unchecked(&event->child_total_time_running); } if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(event); @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil static void perf_event_reset(struct perf_event *event) { (void)perf_event_read(event); - atomic64_set(&event->count, 0); + atomic64_set_unchecked(&event->count, 0); perf_event_update_userpage(event); } @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p ++userpg->lock; barrier(); userpg->index = perf_event_index(event); - userpg->offset = atomic64_read(&event->count); + userpg->offset = atomic64_read_unchecked(&event->count); if (event->state == PERF_EVENT_STATE_ACTIVE) - userpg->offset -= atomic64_read(&event->hw.prev_count); + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count); userpg->time_enabled = event->total_time_enabled + - atomic64_read(&event->child_total_time_enabled); + atomic64_read_unchecked(&event->child_total_time_enabled); userpg->time_running = event->total_time_running + - atomic64_read(&event->child_total_time_running); + atomic64_read_unchecked(&event->child_total_time_running); barrier(); ++userpg->lock; @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct u64 values[4]; int n = 0; - values[n++] = atomic64_read(&event->count); + values[n++] = atomic64_read_unchecked(&event->count); if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { values[n++] = event->total_time_enabled + - atomic64_read(&event->child_total_time_enabled); + atomic64_read_unchecked(&event->child_total_time_enabled); } if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { values[n++] = event->total_time_running + - atomic64_read(&event->child_total_time_running); + atomic64_read_unchecked(&event->child_total_time_running); } if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(event); @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc if (leader != event) leader->pmu->read(leader); - values[n++] = atomic64_read(&leader->count); + values[n++] = atomic64_read_unchecked(&leader->count); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(leader); @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc if (sub != event) sub->pmu->read(sub); - values[n++] = atomic64_read(&sub->count); + values[n++] = atomic64_read_unchecked(&sub->count); if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(sub); @@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf { struct hw_perf_event *hwc = &event->hw; - atomic64_add(nr, &event->count); + atomic64_add_unchecked(nr, &event->count); if (!hwc->sample_period) return; @@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update( u64 now; now = cpu_clock(cpu); - prev = atomic64_read(&event->hw.prev_count); - atomic64_set(&event->hw.prev_count, now); - atomic64_add(now - prev, &event->count); + prev = atomic64_read_unchecked(&event->hw.prev_count); + atomic64_set_unchecked(&event->hw.prev_count, now); + atomic64_add_unchecked(now - prev, &event->count); } static int cpu_clock_perf_event_enable(struct perf_event *event) @@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(s struct hw_perf_event *hwc = &event->hw; int cpu = raw_smp_processor_id(); - atomic64_set(&hwc->prev_count, cpu_clock(cpu)); + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu)); perf_swevent_start_hrtimer(event); return 0; @@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update u64 prev; s64 delta; - prev = atomic64_xchg(&event->hw.prev_count, now); + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now); delta = now - prev; - atomic64_add(delta, &event->count); + atomic64_add_unchecked(delta, &event->count); } static int task_clock_perf_event_enable(struct perf_event *event) @@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable( now = event->ctx->time; - atomic64_set(&hwc->prev_count, now); + atomic64_set_unchecked(&hwc->prev_count, now); perf_swevent_start_hrtimer(event); @@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr event->parent = parent_event; event->ns = get_pid_ns(current->nsproxy->pid_ns); - event->id = atomic64_inc_return(&perf_event_id); + event->id = atomic64_inc_return_unchecked(&perf_event_id); event->state = PERF_EVENT_STATE_INACTIVE; @@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf if (child_event->attr.inherit_stat) perf_event_read_event(child_event, child); - child_val = atomic64_read(&child_event->count); + child_val = atomic64_read_unchecked(&child_event->count); /* * Add back the child's count to the parent's count: */ - atomic64_add(child_val, &parent_event->count); - atomic64_add(child_event->total_time_enabled, + atomic64_add_unchecked(child_val, &parent_event->count); + atomic64_add_unchecked(child_event->total_time_enabled, &parent_event->child_total_time_enabled); - atomic64_add(child_event->total_time_running, + atomic64_add_unchecked(child_event->total_time_running, &parent_event->child_total_time_running); /* diff -purN a/kernel/profile.c b/kernel/profile.c --- a/kernel/profile.c 2012-02-14 08:38:08.751358321 -0800 +++ b/kernel/profile.c 2012-02-14 10:50:16.167243352 -0800 @@ -39,7 +39,7 @@ struct profile_hit { /* Oprofile timer tick hook */ static int (*timer_hook)(struct pt_regs *) __read_mostly; -static atomic_t *prof_buffer; +static atomic_unchecked_t *prof_buffer; static unsigned long prof_len, prof_shift; int prof_on __read_mostly; @@ -283,7 +283,7 @@ static void profile_flip_buffers(void) hits[i].pc = 0; continue; } - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]); hits[i].hits = hits[i].pc = 0; } } @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, * Add the current hit(s) and flush the write-queue out * to the global buffer: */ - atomic_add(nr_hits, &prof_buffer[pc]); + atomic_add_unchecked(nr_hits, &prof_buffer[pc]); for (i = 0; i < NR_PROFILE_HIT; ++i) { - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]); hits[i].pc = hits[i].hits = 0; } out: @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, if (prof_on != type || !prof_buffer) return; pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); } #endif /* !CONFIG_SMP */ EXPORT_SYMBOL_GPL(profile_hits); @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u return -EFAULT; buf++; p++; count--; read++; } - pnt = (char *)prof_buffer + p - sizeof(atomic_t); + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t); if (copy_to_user(buf, (void *)pnt, count)) return -EFAULT; read += count; @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file } #endif profile_discard_flip_buffers(); - memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t)); return count; } diff -purN a/kernel/rcutorture.c b/kernel/rcutorture.c --- a/kernel/rcutorture.c 2012-02-14 08:38:08.739358321 -0800 +++ b/kernel/rcutorture.c 2012-02-14 10:50:16.171243352 -0800 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_ { 0 }; static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) = { 0 }; -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; -static atomic_t n_rcu_torture_alloc; -static atomic_t n_rcu_torture_alloc_fail; -static atomic_t n_rcu_torture_free; -static atomic_t n_rcu_torture_mberror; -static atomic_t n_rcu_torture_error; +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; +static atomic_unchecked_t n_rcu_torture_alloc; +static atomic_unchecked_t n_rcu_torture_alloc_fail; +static atomic_unchecked_t n_rcu_torture_free; +static atomic_unchecked_t n_rcu_torture_mberror; +static atomic_unchecked_t n_rcu_torture_error; static long n_rcu_torture_timers; static struct list_head rcu_torture_removed; static cpumask_var_t shuffle_tmp_mask; @@ -187,11 +187,11 @@ rcu_torture_alloc(void) spin_lock_bh(&rcu_torture_lock); if (list_empty(&rcu_torture_freelist)) { - atomic_inc(&n_rcu_torture_alloc_fail); + atomic_inc_unchecked(&n_rcu_torture_alloc_fail); spin_unlock_bh(&rcu_torture_lock); return NULL; } - atomic_inc(&n_rcu_torture_alloc); + atomic_inc_unchecked(&n_rcu_torture_alloc); p = rcu_torture_freelist.next; list_del_init(p); spin_unlock_bh(&rcu_torture_lock); @@ -204,7 +204,7 @@ rcu_torture_alloc(void) static void rcu_torture_free(struct rcu_torture *p) { - atomic_inc(&n_rcu_torture_free); + atomic_inc_unchecked(&n_rcu_torture_free); spin_lock_bh(&rcu_torture_lock); list_add_tail(&p->rtort_free, &rcu_torture_freelist); spin_unlock_bh(&rcu_torture_lock); @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p) i = rp->rtort_pipe_count; if (i > RCU_TORTURE_PIPE_LEN) i = RCU_TORTURE_PIPE_LEN; - atomic_inc(&rcu_torture_wcount[i]); + atomic_inc_unchecked(&rcu_torture_wcount[i]); if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { rp->rtort_mbtest = 0; rcu_torture_free(rp); @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr i = rp->rtort_pipe_count; if (i > RCU_TORTURE_PIPE_LEN) i = RCU_TORTURE_PIPE_LEN; - atomic_inc(&rcu_torture_wcount[i]); + atomic_inc_unchecked(&rcu_torture_wcount[i]); if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { rp->rtort_mbtest = 0; list_del(&rp->rtort_free); @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg) i = old_rp->rtort_pipe_count; if (i > RCU_TORTURE_PIPE_LEN) i = RCU_TORTURE_PIPE_LEN; - atomic_inc(&rcu_torture_wcount[i]); + atomic_inc_unchecked(&rcu_torture_wcount[i]); old_rp->rtort_pipe_count++; cur_ops->deferred_free(old_rp); } @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l return; } if (p->rtort_mbtest == 0) - atomic_inc(&n_rcu_torture_mberror); + atomic_inc_unchecked(&n_rcu_torture_mberror); spin_lock(&rand_lock); cur_ops->read_delay(&rand); n_rcu_torture_timers++; @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg) continue; } if (p->rtort_mbtest == 0) - atomic_inc(&n_rcu_torture_mberror); + atomic_inc_unchecked(&n_rcu_torture_mberror); cur_ops->read_delay(&rand); preempt_disable(); pipe_count = p->rtort_pipe_count; @@ -834,17 +834,17 @@ rcu_torture_printk(char *page) rcu_torture_current, rcu_torture_current_version, list_empty(&rcu_torture_freelist), - atomic_read(&n_rcu_torture_alloc), - atomic_read(&n_rcu_torture_alloc_fail), - atomic_read(&n_rcu_torture_free), - atomic_read(&n_rcu_torture_mberror), + atomic_read_unchecked(&n_rcu_torture_alloc), + atomic_read_unchecked(&n_rcu_torture_alloc_fail), + atomic_read_unchecked(&n_rcu_torture_free), + atomic_read_unchecked(&n_rcu_torture_mberror), n_rcu_torture_timers); - if (atomic_read(&n_rcu_torture_mberror) != 0) + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0) cnt += sprintf(&page[cnt], " !!!"); cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG); if (i > 1) { cnt += sprintf(&page[cnt], "!!! "); - atomic_inc(&n_rcu_torture_error); + atomic_inc_unchecked(&n_rcu_torture_error); WARN_ON_ONCE(1); } cnt += sprintf(&page[cnt], "Reader Pipe: "); @@ -858,7 +858,7 @@ rcu_torture_printk(char *page) cnt += sprintf(&page[cnt], "Free-Block Circulation: "); for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { cnt += sprintf(&page[cnt], " %d", - atomic_read(&rcu_torture_wcount[i])); + atomic_read_unchecked(&rcu_torture_wcount[i])); } cnt += sprintf(&page[cnt], "\n"); if (cur_ops->stats) @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void) if (cur_ops->cleanup) cur_ops->cleanup(); - if (atomic_read(&n_rcu_torture_error)) + if (atomic_read_unchecked(&n_rcu_torture_error)) rcu_torture_print_module_parms("End of test: FAILURE"); else rcu_torture_print_module_parms("End of test: SUCCESS"); @@ -1138,13 +1138,13 @@ rcu_torture_init(void) rcu_torture_current = NULL; rcu_torture_current_version = 0; - atomic_set(&n_rcu_torture_alloc, 0); - atomic_set(&n_rcu_torture_alloc_fail, 0); - atomic_set(&n_rcu_torture_free, 0); - atomic_set(&n_rcu_torture_mberror, 0); - atomic_set(&n_rcu_torture_error, 0); + atomic_set_unchecked(&n_rcu_torture_alloc, 0); + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0); + atomic_set_unchecked(&n_rcu_torture_free, 0); + atomic_set_unchecked(&n_rcu_torture_mberror, 0); + atomic_set_unchecked(&n_rcu_torture_error, 0); for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) - atomic_set(&rcu_torture_wcount[i], 0); + atomic_set_unchecked(&rcu_torture_wcount[i], 0); for_each_possible_cpu(cpu) { for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { per_cpu(rcu_torture_count, cpu)[i] = 0; diff -purN a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c --- a/kernel/rtmutex-tester.c 2012-02-14 08:38:08.747358321 -0800 +++ b/kernel/rtmutex-tester.c 2012-02-14 10:50:16.175243351 -0800 @@ -21,7 +21,7 @@ #define MAX_RT_TEST_MUTEXES 8 static spinlock_t rttest_lock; -static atomic_t rttest_event; +static atomic_unchecked_t rttest_event; struct test_thread_data { int opcode; @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_ case RTTEST_LOCKCONT: td->mutexes[td->opdata] = 1; - td->event = atomic_add_return(1, &rttest_event); + td->event = atomic_add_return_unchecked(1, &rttest_event); return 0; case RTTEST_RESET: @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_ return 0; case RTTEST_RESETEVENT: - atomic_set(&rttest_event, 0); + atomic_set_unchecked(&rttest_event, 0); return 0; default: @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_ return ret; td->mutexes[id] = 1; - td->event = atomic_add_return(1, &rttest_event); + td->event = atomic_add_return_unchecked(1, &rttest_event); rt_mutex_lock(&mutexes[id]); - td->event = atomic_add_return(1, &rttest_event); + td->event = atomic_add_return_unchecked(1, &rttest_event); td->mutexes[id] = 4; return 0; @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_ return ret; td->mutexes[id] = 1; - td->event = atomic_add_return(1, &rttest_event); + td->event = atomic_add_return_unchecked(1, &rttest_event); ret = rt_mutex_lock_interruptible(&mutexes[id], 0); - td->event = atomic_add_return(1, &rttest_event); + td->event = atomic_add_return_unchecked(1, &rttest_event); td->mutexes[id] = ret ? 0 : 4; return ret ? -EINTR : 0; @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_ if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4) return ret; - td->event = atomic_add_return(1, &rttest_event); + td->event = atomic_add_return_unchecked(1, &rttest_event); rt_mutex_unlock(&mutexes[id]); - td->event = atomic_add_return(1, &rttest_event); + td->event = atomic_add_return_unchecked(1, &rttest_event); td->mutexes[id] = 0; return 0; @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu break; td->mutexes[dat] = 2; - td->event = atomic_add_return(1, &rttest_event); + td->event = atomic_add_return_unchecked(1, &rttest_event); break; case RTTEST_LOCKBKL: @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu return; td->mutexes[dat] = 3; - td->event = atomic_add_return(1, &rttest_event); + td->event = atomic_add_return_unchecked(1, &rttest_event); break; case RTTEST_LOCKNOWAIT: @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu return; td->mutexes[dat] = 1; - td->event = atomic_add_return(1, &rttest_event); + td->event = atomic_add_return_unchecked(1, &rttest_event); return; case RTTEST_LOCKBKL: diff -purN a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c --- a/kernel/time/timer_stats.c 2012-02-14 08:38:08.779358319 -0800 +++ b/kernel/time/timer_stats.c 2012-02-14 10:50:16.179243350 -0800 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop; static unsigned long nr_entries; static struct entry entries[MAX_ENTRIES]; -static atomic_t overflow_count; +static atomic_unchecked_t overflow_count; /* * The entries are in a hash-table, for fast lookup: @@ -140,7 +140,7 @@ static void reset_entries(void) nr_entries = 0; memset(entries, 0, sizeof(entries)); memset(tstat_hash_table, 0, sizeof(tstat_hash_table)); - atomic_set(&overflow_count, 0); + atomic_set_unchecked(&overflow_count, 0); } static struct entry *alloc_entry(void) @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time if (likely(entry)) entry->count++; else - atomic_inc(&overflow_count); + atomic_inc_unchecked(&overflow_count); out_unlock: spin_unlock_irqrestore(lock, flags); @@ -300,9 +300,9 @@ static int tstats_show(struct seq_file * seq_puts(m, "Timer Stats Version: v0.2\n"); seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms); - if (atomic_read(&overflow_count)) + if (atomic_read_unchecked(&overflow_count)) seq_printf(m, "Overflow: %d entries\n", - atomic_read(&overflow_count)); + atomic_read_unchecked(&overflow_count)); for (i = 0; i < nr_entries; i++) { entry = entries + i; diff -purN a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c --- a/kernel/trace/blktrace.c 2012-02-14 08:38:08.791358320 -0800 +++ b/kernel/trace/blktrace.c 2012-02-14 10:50:16.179243350 -0800 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f struct blk_trace *bt = filp->private_data; char buf[16]; - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped)); return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); } @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str return 1; bt = buf->chan->private_data; - atomic_inc(&bt->dropped); + atomic_inc_unchecked(&bt->dropped); return 0; } @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu bt->dir = dir; bt->dev = dev; - atomic_set(&bt->dropped, 0); + atomic_set_unchecked(&bt->dropped, 0); ret = -EIO; bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, diff -purN a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c --- a/kernel/trace/trace_mmiotrace.c 2012-02-14 08:38:08.787358320 -0800 +++ b/kernel/trace/trace_mmiotrace.c 2012-02-14 10:50:16.183243350 -0800 @@ -23,7 +23,7 @@ struct header_iter { static struct trace_array *mmio_trace_array; static bool overrun_detected; static unsigned long prev_overruns; -static atomic_t dropped_count; +static atomic_unchecked_t dropped_count; static void mmio_reset_data(struct trace_array *tr) { @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter static unsigned long count_overruns(struct trace_iterator *iter) { - unsigned long cnt = atomic_xchg(&dropped_count, 0); + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0); unsigned long over = ring_buffer_overruns(iter->tr->buffer); if (over > prev_overruns) @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW, sizeof(*entry), 0, pc); if (!event) { - atomic_inc(&dropped_count); + atomic_inc_unchecked(&dropped_count); return; } entry = ring_buffer_event_data(event); @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP, sizeof(*entry), 0, pc); if (!event) { - atomic_inc(&dropped_count); + atomic_inc_unchecked(&dropped_count); return; } entry = ring_buffer_event_data(event); diff -purN a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c --- a/kernel/trace/trace_workqueue.c 2012-02-14 08:38:08.791358320 -0800 +++ b/kernel/trace/trace_workqueue.c 2012-02-14 10:50:16.183243350 -0800 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats { int cpu; pid_t pid; /* Can be inserted from interrupt or user context, need to be atomic */ - atomic_t inserted; + atomic_unchecked_t inserted; /* * Don't need to be atomic, works are serialized in a single workqueue thread * on a single CPU. @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) { if (node->pid == wq_thread->pid) { - atomic_inc(&node->inserted); + atomic_inc_unchecked(&node->inserted); goto found; } } @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se tsk = get_pid_task(pid, PIDTYPE_PID); if (tsk) { seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, - atomic_read(&cws->inserted), cws->executed, + atomic_read_unchecked(&cws->inserted), cws->executed, tsk->comm); put_task_struct(tsk); } diff -purN a/mm/memory-failure.c b/mm/memory-failure.c --- a/mm/memory-failure.c 2012-02-14 08:38:03.927358538 -0800 +++ b/mm/memory-failure.c 2012-02-14 10:50:16.187243351 -0800 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r int sysctl_memory_failure_recovery __read_mostly = 1; -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); /* * Send all the processes who have the page mapped an ``action optional'' @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, return 0; } - atomic_long_add(1, &mce_bad_pages); + atomic_long_add_unchecked(1, &mce_bad_pages); /* * We need/can do nothing about count=0 pages. diff -purN a/mm/slab.c b/mm/slab.c --- a/mm/slab.c 2012-02-14 08:38:03.927358538 -0800 +++ b/mm/slab.c 2012-02-14 10:50:16.187243351 -0800 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_ if ((x)->max_freeable < i) \ (x)->max_freeable = i; \ } while (0) -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit) +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss) +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit) +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss) #else #define STATS_INC_ACTIVE(x) do { } while (0) #define STATS_DEC_ACTIVE(x) do { } while (0) @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo } /* cpu stats */ { - unsigned long allochit = atomic_read(&cachep->allochit); - unsigned long allocmiss = atomic_read(&cachep->allocmiss); - unsigned long freehit = atomic_read(&cachep->freehit); - unsigned long freemiss = atomic_read(&cachep->freemiss); + unsigned long allochit = atomic_read_unchecked(&cachep->allochit); + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss); + unsigned long freehit = atomic_read_unchecked(&cachep->freehit); + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss); seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", allochit, allocmiss, freehit, freemiss); diff -purN a/mm/vmstat.c b/mm/vmstat.c --- a/mm/vmstat.c 2012-02-14 08:38:03.935358537 -0800 +++ b/mm/vmstat.c 2012-02-14 10:50:16.191243351 -0800 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu) * * vm_stat contains the global counters */ -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; EXPORT_SYMBOL(vm_stat); #ifdef CONFIG_SMP @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu) v = p->vm_stat_diff[i]; p->vm_stat_diff[i] = 0; local_irq_restore(flags); - atomic_long_add(v, &zone->vm_stat[i]); + atomic_long_add_unchecked(v, &zone->vm_stat[i]); global_diff[i] += v; #ifdef CONFIG_NUMA /* 3 seconds idle till flush */ @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu) for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) if (global_diff[i]) - atomic_long_add(global_diff[i], &vm_stat[i]); + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]); } #endif diff -purN a/net/atm/atm_misc.c b/net/atm/atm_misc.c --- a/net/atm/atm_misc.c 2012-02-14 08:38:08.335358340 -0800 +++ b/net/atm/atm_misc.c 2012-02-14 10:50:16.195243350 -0800 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf) return 1; atm_return(vcc,truesize); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); return 0; } @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct } } atm_return(vcc,guess); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); return NULL; } @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to) { -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i) __SONET_ITEMS #undef __HANDLE_ITEM } @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to) { -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i) +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i) __SONET_ITEMS #undef __HANDLE_ITEM } diff -purN a/net/atm/proc.c b/net/atm/proc.c --- a/net/atm/proc.c 2012-02-14 08:38:08.335358340 -0800 +++ b/net/atm/proc.c 2012-02-14 10:50:16.195243350 -0800 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s const struct k_atm_aal_stats *stats) { seq_printf(seq, "%s ( %d %d %d %d %d )", aal, - atomic_read(&stats->tx),atomic_read(&stats->tx_err), - atomic_read(&stats->rx),atomic_read(&stats->rx_err), - atomic_read(&stats->rx_drop)); + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err), + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err), + atomic_read_unchecked(&stats->rx_drop)); } static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev) diff -purN a/net/atm/resources.c b/net/atm/resources.c --- a/net/atm/resources.c 2012-02-14 08:38:08.335358340 -0800 +++ b/net/atm/resources.c 2012-02-14 10:50:16.195243350 -0800 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev * static void copy_aal_stats(struct k_atm_aal_stats *from, struct atm_aal_stats *to) { -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i) __AAL_STAT_ITEMS #undef __HANDLE_ITEM } @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_ static void subtract_aal_stats(struct k_atm_aal_stats *from, struct atm_aal_stats *to) { -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i) +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i) __AAL_STAT_ITEMS #undef __HANDLE_ITEM } diff -purN a/net/core/flow.c b/net/core/flow.c --- a/net/core/flow.c 2012-02-14 08:38:08.467358334 -0800 +++ b/net/core/flow.c 2012-02-14 10:50:16.195243350 -0800 @@ -35,7 +35,7 @@ struct flow_cache_entry { atomic_t *object_ref; }; -atomic_t flow_cache_genid = ATOMIC_INIT(0); +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0); static u32 flow_hash_shift; #define flow_hash_size (1 << flow_hash_shift) @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, if (fle->family == family && fle->dir == dir && flow_key_compare(key, &fle->key) == 0) { - if (fle->genid == atomic_read(&flow_cache_genid)) { + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) { void *ret = fle->object; if (ret) @@ -228,7 +228,7 @@ nocache: err = resolver(net, key, family, dir, &obj, &obj_ref); if (fle && !err) { - fle->genid = atomic_read(&flow_cache_genid); + fle->genid = atomic_read_unchecked(&flow_cache_genid); if (fle->object) atomic_dec(fle->object_ref); @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns fle = flow_table(cpu)[i]; for (; fle; fle = fle->next) { - unsigned genid = atomic_read(&flow_cache_genid); + unsigned genid = atomic_read_unchecked(&flow_cache_genid); if (!fle->object || fle->genid == genid) continue; diff -purN a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c --- a/net/ieee802154/dgram.c 2012-02-14 08:38:08.695358324 -0800 +++ b/net/ieee802154/dgram.c 2012-02-14 10:50:16.195243350 -0800 @@ -318,7 +318,7 @@ out: static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb) { if (sock_queue_rcv_skb(sk, skb) < 0) { - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } diff -purN a/net/ieee802154/raw.c b/net/ieee802154/raw.c --- a/net/ieee802154/raw.c 2012-02-14 08:38:08.695358324 -0800 +++ b/net/ieee802154/raw.c 2012-02-14 10:50:16.203243349 -0800 @@ -206,7 +206,7 @@ out: static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) { if (sock_queue_rcv_skb(sk, skb) < 0) { - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } diff -purN a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c --- a/net/ipv4/inetpeer.c 2012-02-14 08:38:08.591358328 -0800 +++ b/net/ipv4/inetpeer.c 2012-02-14 10:50:16.203243349 -0800 @@ -390,7 +390,7 @@ struct inet_peer *inet_getpeer(__be32 da return NULL; n->v4daddr = daddr; atomic_set(&n->refcnt, 1); - atomic_set(&n->rid, 0); + atomic_set_unchecked(&n->rid, 0); n->ip_id_count = secure_ip_id(daddr); n->tcp_ts_stamp = 0; diff -purN a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c --- a/net/ipv4/ip_fragment.c 2012-02-14 08:38:08.591358328 -0800 +++ b/net/ipv4/ip_fragment.c 2012-02-14 10:50:16.203243349 -0800 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct return 0; start = qp->rid; - end = atomic_inc_return(&peer->rid); + end = atomic_inc_return_unchecked(&peer->rid); qp->rid = end; rc = qp->q.fragments && (end - start) > max; diff -purN a/net/ipv4/raw.c b/net/ipv4/raw.c --- a/net/ipv4/raw.c 2012-02-14 08:38:08.591358328 -0800 +++ b/net/ipv4/raw.c 2012-02-14 10:50:16.203243349 -0800 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, /* Charge it to the socket. */ if (sock_queue_rcv_skb(sk, skb) < 0) { - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, int raw_rcv(struct sock *sk, struct sk_buff *skb) { if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } @@ -954,7 +954,7 @@ static void raw_sock_seq_show(struct seq sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); + atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops)); } static int raw_seq_show(struct seq_file *seq, void *v) diff -purN a/net/ipv4/route.c b/net/ipv4/route.c --- a/net/ipv4/route.c 2012-02-14 08:38:08.615358327 -0800 +++ b/net/ipv4/route.c 2012-02-14 10:50:16.207243349 -0800 @@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be3 static inline int rt_genid(struct net *net) { - return atomic_read(&net->ipv4.rt_genid); + return atomic_read_unchecked(&net->ipv4.rt_genid); } #ifdef CONFIG_PROC_FS @@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct n unsigned char shuffle; get_random_bytes(&shuffle, sizeof(shuffle)); - atomic_add(shuffle + 1U, &net->ipv4.rt_genid); + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid); } /* @@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_oper static __net_init int rt_secret_timer_init(struct net *net) { - atomic_set(&net->ipv4.rt_genid, + atomic_set_unchecked(&net->ipv4.rt_genid, (int) ((num_physpages ^ (num_physpages>>8)) ^ (jiffies ^ (jiffies >> 7)))); diff -purN a/net/ipv4/udp.c b/net/ipv4/udp.c --- a/net/ipv4/udp.c 2012-02-14 08:38:08.579358329 -0800 +++ b/net/ipv4/udp.c 2012-02-14 10:50:16.211243349 -0800 @@ -1068,7 +1068,7 @@ static int __udp_queue_rcv_skb(struct so if (rc == -ENOMEM) { UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); } goto drop; } @@ -1799,7 +1799,7 @@ static void udp4_format_sock(struct sock sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, - atomic_read(&sp->sk_drops), len); + atomic_read_unchecked(&sp->sk_drops), len); } int udp4_seq_show(struct seq_file *seq, void *v) diff -purN a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c --- a/net/ipv6/inet6_connection_sock.c 2012-02-14 08:38:08.503358333 -0800 +++ b/net/ipv6/inet6_connection_sock.c 2012-02-14 10:50:16.211243349 -0800 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock * #ifdef CONFIG_XFRM { struct rt6_info *rt = (struct rt6_info *)dst; - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid); + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid); } #endif } @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check( #ifdef CONFIG_XFRM if (dst) { struct rt6_info *rt = (struct rt6_info *)dst; - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) { sk->sk_dst_cache = NULL; dst_release(dst); dst = NULL; diff -purN a/net/ipv6/raw.c b/net/ipv6/raw.c --- a/net/ipv6/raw.c 2012-02-14 08:38:08.503358333 -0800 +++ b/net/ipv6/raw.c 2012-02-14 10:50:16.215243349 -0800 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s { if ((raw6_sk(sk)->checksum || sk->sk_filter) && skb_checksum_complete(skb)) { - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } /* Charge it to the socket. */ if (sock_queue_rcv_skb(sk,skb)<0) { - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk struct raw6_sock *rp = raw6_sk(sk); if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk if (inet->hdrincl) { if (skb_checksum_complete(skb)) { - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } @@ -518,7 +518,7 @@ csum_copy_err: as some normal condition. */ err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); goto out; } @@ -1241,7 +1241,7 @@ static void raw6_sock_seq_show(struct se 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); + atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops)); } static int raw6_seq_show(struct seq_file *seq, void *v) diff -purN a/net/ipv6/udp.c b/net/ipv6/udp.c --- a/net/ipv6/udp.c 2012-02-14 08:38:08.499358333 -0800 +++ b/net/ipv6/udp.c 2012-02-14 10:50:16.219243349 -0800 @@ -391,7 +391,7 @@ int udpv6_queue_rcv_skb(struct sock * sk if (rc == -ENOMEM) { UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); } goto drop; } @@ -1255,7 +1255,7 @@ static void udp6_sock_seq_show(struct se sock_i_uid(sp), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, - atomic_read(&sp->sk_drops)); + atomic_read_unchecked(&sp->sk_drops)); } int udp6_seq_show(struct seq_file *seq, void *v) diff -purN a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c --- a/net/iucv/af_iucv.c 2012-02-14 08:38:08.571358328 -0800 +++ b/net/iucv/af_iucv.c 2012-02-14 10:50:16.219243349 -0800 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc write_lock_bh(&iucv_sk_list.lock); - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name)); while (__iucv_get_sock_by_name(name)) { sprintf(name, "%08x", - atomic_inc_return(&iucv_sk_list.autobind_name)); + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name)); } write_unlock_bh(&iucv_sk_list.lock); diff -purN a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c --- a/net/netfilter/ipvs/ip_vs_conn.c 2012-02-14 08:38:08.699358323 -0800 +++ b/net/netfilter/ipvs/ip_vs_conn.c 2012-02-14 10:50:16.227243348 -0800 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s /* if the connection is not template and is created * by sync, preserve the activity flag. */ - cp->flags |= atomic_read(&dest->conn_flags) & + cp->flags |= atomic_read_unchecked(&dest->conn_flags) & (~IP_VS_CONN_F_INACTIVE); else - cp->flags |= atomic_read(&dest->conn_flags); + cp->flags |= atomic_read_unchecked(&dest->conn_flags); cp->dest = dest; IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d " @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const atomic_set(&cp->refcnt, 1); atomic_set(&cp->n_control, 0); - atomic_set(&cp->in_pkts, 0); + atomic_set_unchecked(&cp->in_pkts, 0); atomic_inc(&ip_vs_conn_count); if (flags & IP_VS_CONN_F_NO_CPORT) @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip /* Don't drop the entry if its number of incoming packets is not located in [0, 8] */ - i = atomic_read(&cp->in_pkts); + i = atomic_read_unchecked(&cp->in_pkts); if (i > 8 || i < 0) return 0; if (!todrop_rate[i]) return 0; diff -purN a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c --- a/net/netfilter/ipvs/ip_vs_core.c 2012-02-14 08:38:08.703358322 -0800 +++ b/net/netfilter/ipvs/ip_vs_core.c 2012-02-14 10:50:16.227243348 -0800 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv ret = cp->packet_xmit(skb, cp, pp); /* do not touch skb anymore */ - atomic_inc(&cp->in_pkts); + atomic_inc_unchecked(&cp->in_pkts); ip_vs_conn_put(cp); return ret; } @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk * Sync connection if it is about to close to * encorage the standby servers to update the connections timeout */ - pkts = atomic_add_return(1, &cp->in_pkts); + pkts = atomic_add_return_unchecked(1, &cp->in_pkts); if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) && (((cp->protocol != IPPROTO_TCP || diff -purN a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c --- a/net/netfilter/ipvs/ip_vs_ctl.c 2012-02-14 08:38:08.703358322 -0800 +++ b/net/netfilter/ipvs/ip_vs_ctl.c 2012-02-14 10:50:16.231243348 -0800 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service ip_vs_rs_hash(dest); write_unlock_bh(&__ip_vs_rs_lock); } - atomic_set(&dest->conn_flags, conn_flags); + atomic_set_unchecked(&dest->conn_flags, conn_flags); /* bind the service */ if (!dest->svc) { @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se " %-7s %-6d %-10d %-10d\n", &dest->addr.in6, ntohs(dest->port), - ip_vs_fwd_name(atomic_read(&dest->conn_flags)), + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)), atomic_read(&dest->weight), atomic_read(&dest->activeconns), atomic_read(&dest->inactconns)); @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se "%-7s %-6d %-10d %-10d\n", ntohl(dest->addr.ip), ntohs(dest->port), - ip_vs_fwd_name(atomic_read(&dest->conn_flags)), + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)), atomic_read(&dest->weight), atomic_read(&dest->activeconns), atomic_read(&dest->inactconns)); @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip entry.addr = dest->addr.ip; entry.port = dest->port; - entry.conn_flags = atomic_read(&dest->conn_flags); + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags); entry.weight = atomic_read(&dest->weight); entry.u_threshold = dest->u_threshold; entry.l_threshold = dest->l_threshold; @@ -2802,7 +2802,7 @@ static int ip_vs_genl_fill_dest(struct s NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port); NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD, - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight)); NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold); NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold); diff -purN a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c --- a/net/netfilter/ipvs/ip_vs_sync.c 2012-02-14 08:38:08.703358322 -0800 +++ b/net/netfilter/ipvs/ip_vs_sync.c 2012-02-14 10:50:16.231243348 -0800 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const if (opt) memcpy(&cp->in_seq, opt, sizeof(*opt)); - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]); + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]); cp->state = state; cp->old_state = cp->state; /* diff -purN a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c --- a/net/netfilter/ipvs/ip_vs_xmit.c 2012-02-14 08:38:08.703358322 -0800 +++ b/net/netfilter/ipvs/ip_vs_xmit.c 2012-02-14 10:50:16.231243348 -0800 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str else rc = NF_ACCEPT; /* do not touch skb anymore */ - atomic_inc(&cp->in_pkts); + atomic_inc_unchecked(&cp->in_pkts); goto out; } @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, else rc = NF_ACCEPT; /* do not touch skb anymore */ - atomic_inc(&cp->in_pkts); + atomic_inc_unchecked(&cp->in_pkts); goto out; } diff -purN a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c --- a/net/netfilter/nfnetlink_log.c 2012-02-14 08:38:08.699358323 -0800 +++ b/net/netfilter/nfnetlink_log.c 2012-02-14 10:50:16.235243349 -0800 @@ -68,7 +68,7 @@ struct nfulnl_instance { }; static DEFINE_RWLOCK(instances_lock); -static atomic_t global_seq; +static atomic_unchecked_t global_seq; #define INSTANCE_BUCKETS 16 static struct hlist_head instance_table[INSTANCE_BUCKETS]; @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins /* global sequence number */ if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL, - htonl(atomic_inc_return(&global_seq))); + htonl(atomic_inc_return_unchecked(&global_seq))); if (data_len) { struct nlattr *nla; diff -purN a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c --- a/net/netlink/af_netlink.c 2012-02-14 08:38:08.419358336 -0800 +++ b/net/netlink/af_netlink.c 2012-02-14 10:50:16.239243349 -0800 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock sk->sk_error_report(sk); } } - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); } static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid) @@ -1972,7 +1972,7 @@ static int netlink_seq_show(struct seq_f sk_wmem_alloc_get(s), nlk->cb, atomic_read(&s->sk_refcnt), - atomic_read(&s->sk_drops) + atomic_read_unchecked(&s->sk_drops) ); } diff -purN a/net/phonet/datagram.c b/net/phonet/datagram.c --- a/net/phonet/datagram.c 2012-02-14 08:38:08.663358324 -0800 +++ b/net/phonet/datagram.c 2012-02-14 10:50:16.243243348 -0800 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *s if (err < 0) { kfree_skb(skb); if (err == -ENOMEM) - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); } return err ? NET_RX_DROP : NET_RX_SUCCESS; } diff -purN a/net/phonet/pep.c b/net/phonet/pep.c --- a/net/phonet/pep.c 2012-02-14 08:38:08.663358324 -0800 +++ b/net/phonet/pep.c 2012-02-14 10:50:16.247243347 -0800 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk, case PNS_PEP_CTRL_REQ: if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) { - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); break; } __skb_pull(skb, 4); @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk, if (!err) return 0; if (err == -ENOMEM) - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); break; } if (pn->rx_credits == 0) { - atomic_inc(&sk->sk_drops); + atomic_inc_unchecked(&sk->sk_drops); err = -ENOBUFS; break; } diff -purN a/net/phonet/socket.c b/net/phonet/socket.c --- a/net/phonet/socket.c 2012-02-14 08:38:08.663358324 -0800 +++ b/net/phonet/socket.c 2012-02-14 10:50:16.251243347 -0800 @@ -483,7 +483,7 @@ static int pn_sock_seq_show(struct seq_f sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), sock_i_uid(sk), sock_i_ino(sk), atomic_read(&sk->sk_refcnt), sk, - atomic_read(&sk->sk_drops), &len); + atomic_read_unchecked(&sk->sk_drops), &len); } seq_printf(seq, "%*s\n", 127 - len, ""); return 0; diff -purN a/net/rds/cong.c b/net/rds/cong.c --- a/net/rds/cong.c 2012-02-14 08:38:08.683358323 -0800 +++ b/net/rds/cong.c 2012-02-14 10:50:16.251243347 -0800 @@ -77,7 +77,7 @@ * finds that the saved generation number is smaller than the global generation * number, it wakes up the process. */ -static atomic_t rds_cong_generation = ATOMIC_INIT(0); +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0); /* * Congestion monitoring @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con rdsdebug("waking map %p for %pI4\n", map, &map->m_addr); rds_stats_inc(s_cong_update_received); - atomic_inc(&rds_cong_generation); + atomic_inc_unchecked(&rds_cong_generation); if (waitqueue_active(&map->m_waitq)) wake_up(&map->m_waitq); if (waitqueue_active(&rds_poll_waitq)) @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated); int rds_cong_updated_since(unsigned long *recent) { - unsigned long gen = atomic_read(&rds_cong_generation); + unsigned long gen = atomic_read_unchecked(&rds_cong_generation); if (likely(*recent == gen)) return 0; diff -purN a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c --- a/net/rxrpc/af_rxrpc.c 2012-02-14 08:38:08.367358337 -0800 +++ b/net/rxrpc/af_rxrpc.c 2012-02-14 10:50:16.251243347 -0800 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_ __be32 rxrpc_epoch; /* current debugging ID */ -atomic_t rxrpc_debug_id; +atomic_unchecked_t rxrpc_debug_id; /* count of skbs currently in use */ atomic_t rxrpc_n_skbs; diff -purN a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c --- a/net/rxrpc/ar-ack.c 2012-02-14 08:38:08.371358337 -0800 +++ b/net/rxrpc/ar-ack.c 2012-02-14 10:50:16.251243347 -0800 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_ca _enter("{%d,%d,%d,%d},", call->acks_hard, call->acks_unacked, - atomic_read(&call->sequence), + atomic_read_unchecked(&call->sequence), CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); stop = 0; @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_ca /* each Tx packet has a new serial number */ sp->hdr.serial = - htonl(atomic_inc_return(&call->conn->serial)); + htonl(atomic_inc_return_unchecked(&call->conn->serial)); hdr = (struct rxrpc_header *) txb->head; hdr->serial = sp->hdr.serial; @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struc */ static void rxrpc_clear_tx_window(struct rxrpc_call *call) { - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence)); + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence)); } /* @@ -627,7 +627,7 @@ process_further: latest = ntohl(sp->hdr.serial); hard = ntohl(ack.firstPacket); - tx = atomic_read(&call->sequence); + tx = atomic_read_unchecked(&call->sequence); _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", latest, @@ -1159,7 +1159,7 @@ void rxrpc_process_call(struct work_stru goto maybe_reschedule; send_ACK_with_skew: - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) - + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) - ntohl(ack.serial)); send_ACK: mtu = call->conn->trans->peer->if_mtu; @@ -1171,7 +1171,7 @@ send_ACK: ackinfo.rxMTU = htonl(5692); ackinfo.jumbo_max = htonl(4); - hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial)); _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }", ntohl(hdr.serial), ntohs(ack.maxSkew), @@ -1189,7 +1189,7 @@ send_ACK: send_message: _debug("send message"); - hdr.serial = htonl(atomic_inc_return(&call->conn->serial)); + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial)); _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial)); send_message_2: diff -purN a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c --- a/net/rxrpc/ar-call.c 2012-02-14 08:38:08.367358337 -0800 +++ b/net/rxrpc/ar-call.c 2012-02-14 10:50:16.251243347 -0800 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_ca spin_lock_init(&call->lock); rwlock_init(&call->state_lock); atomic_set(&call->usage, 1); - call->debug_id = atomic_inc_return(&rxrpc_debug_id); + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; memset(&call->sock_node, 0xed, sizeof(call->sock_node)); diff -purN a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c --- a/net/rxrpc/ar-connection.c 2012-02-14 08:38:08.367358337 -0800 +++ b/net/rxrpc/ar-connection.c 2012-02-14 10:50:16.259243347 -0800 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_al rwlock_init(&conn->lock); spin_lock_init(&conn->state_lock); atomic_set(&conn->usage, 1); - conn->debug_id = atomic_inc_return(&rxrpc_debug_id); + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); conn->avail_calls = RXRPC_MAXCALLS; conn->size_align = 4; conn->header_size = sizeof(struct rxrpc_header); diff -purN a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c --- a/net/rxrpc/ar-connevent.c 2012-02-14 08:38:08.367358337 -0800 +++ b/net/rxrpc/ar-connevent.c 2012-02-14 10:50:16.263243347 -0800 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct len = iov[0].iov_len + iov[1].iov_len; - hdr.serial = htonl(atomic_inc_return(&conn->serial)); + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial)); _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code); ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); diff -purN a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c --- a/net/rxrpc/ar-input.c 2012-02-14 08:38:08.367358337 -0800 +++ b/net/rxrpc/ar-input.c 2012-02-14 10:50:16.267243347 -0800 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rx /* track the latest serial number on this connection for ACK packet * information */ serial = ntohl(sp->hdr.serial); - hi_serial = atomic_read(&call->conn->hi_serial); + hi_serial = atomic_read_unchecked(&call->conn->hi_serial); while (serial > hi_serial) - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial, + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial, serial); /* request ACK generation for any ACK or DATA packet that requests diff -purN a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h --- a/net/rxrpc/ar-internal.h 2012-02-14 08:38:08.367358337 -0800 +++ b/net/rxrpc/ar-internal.h 2012-02-14 10:50:16.267243347 -0800 @@ -272,8 +272,8 @@ struct rxrpc_connection { int error; /* error code for local abort */ int debug_id; /* debug ID for printks */ unsigned call_counter; /* call ID counter */ - atomic_t serial; /* packet serial number counter */ - atomic_t hi_serial; /* highest serial number received */ + atomic_unchecked_t serial; /* packet serial number counter */ + atomic_unchecked_t hi_serial; /* highest serial number received */ u8 avail_calls; /* number of calls available */ u8 size_align; /* data size alignment (for security) */ u8 header_size; /* rxrpc + security header size */ @@ -346,7 +346,7 @@ struct rxrpc_call { spinlock_t lock; rwlock_t state_lock; /* lock for state transition */ atomic_t usage; - atomic_t sequence; /* Tx data packet sequence counter */ + atomic_unchecked_t sequence; /* Tx data packet sequence counter */ u32 abort_code; /* local/remote abort code */ enum { /* current state of call */ RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */ @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru */ extern atomic_t rxrpc_n_skbs; extern __be32 rxrpc_epoch; -extern atomic_t rxrpc_debug_id; +extern atomic_unchecked_t rxrpc_debug_id; extern struct workqueue_struct *rxrpc_workqueue; /* diff -purN a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c --- a/net/rxrpc/ar-local.c 2012-02-14 08:38:08.367358337 -0800 +++ b/net/rxrpc/ar-local.c 2012-02-14 10:50:16.267243347 -0800 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(st spin_lock_init(&local->lock); rwlock_init(&local->services_lock); atomic_set(&local->usage, 1); - local->debug_id = atomic_inc_return(&rxrpc_debug_id); + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); memcpy(&local->srx, srx, sizeof(*srx)); } diff -purN a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c --- a/net/rxrpc/ar-output.c 2012-02-14 08:38:08.371358337 -0800 +++ b/net/rxrpc/ar-output.c 2012-02-14 10:50:16.267243347 -0800 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb sp->hdr.cid = call->cid; sp->hdr.callNumber = call->call_id; sp->hdr.seq = - htonl(atomic_inc_return(&call->sequence)); + htonl(atomic_inc_return_unchecked(&call->sequence)); sp->hdr.serial = - htonl(atomic_inc_return(&conn->serial)); + htonl(atomic_inc_return_unchecked(&conn->serial)); sp->hdr.type = RXRPC_PACKET_TYPE_DATA; sp->hdr.userStatus = 0; sp->hdr.securityIndex = conn->security_ix; diff -purN a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c --- a/net/rxrpc/ar-peer.c 2012-02-14 08:38:08.367358337 -0800 +++ b/net/rxrpc/ar-peer.c 2012-02-14 10:50:16.271243346 -0800 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe INIT_LIST_HEAD(&peer->error_targets); spin_lock_init(&peer->lock); atomic_set(&peer->usage, 1); - peer->debug_id = atomic_inc_return(&rxrpc_debug_id); + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); memcpy(&peer->srx, srx, sizeof(*srx)); rxrpc_assess_MTU_size(peer); diff -purN a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c --- a/net/rxrpc/ar-proc.c 2012-02-14 08:38:08.367358337 -0800 +++ b/net/rxrpc/ar-proc.c 2012-02-14 10:50:16.275243346 -0800 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str atomic_read(&conn->usage), rxrpc_conn_states[conn->state], key_serial(conn->key), - atomic_read(&conn->serial), - atomic_read(&conn->hi_serial)); + atomic_read_unchecked(&conn->serial), + atomic_read_unchecked(&conn->hi_serial)); return 0; } diff -purN a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c --- a/net/rxrpc/ar-transport.c 2012-02-14 08:38:08.367358337 -0800 +++ b/net/rxrpc/ar-transport.c 2012-02-14 10:50:16.275243346 -0800 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_all spin_lock_init(&trans->client_lock); rwlock_init(&trans->conn_lock); atomic_set(&trans->usage, 1); - trans->debug_id = atomic_inc_return(&rxrpc_debug_id); + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id); if (peer->srx.transport.family == AF_INET) { switch (peer->srx.transport_type) { diff -purN a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c --- a/net/rxrpc/rxkad.c 2012-02-14 08:38:08.367358337 -0800 +++ b/net/rxrpc/rxkad.c 2012-02-14 10:50:16.279243346 -0800 @@ -609,7 +609,7 @@ static int rxkad_issue_challenge(struct len = iov[0].iov_len + iov[1].iov_len; - hdr.serial = htonl(atomic_inc_return(&conn->serial)); + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial)); _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial)); ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); @@ -659,7 +659,7 @@ static int rxkad_send_response(struct rx len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len; - hdr->serial = htonl(atomic_inc_return(&conn->serial)); + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial)); _proto("Tx RESPONSE %%%u", ntohl(hdr->serial)); ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len); diff -purN a/net/sunrpc/sched.c b/net/sunrpc/sched.c --- a/net/sunrpc/sched.c 2012-02-14 08:38:08.331358339 -0800 +++ b/net/sunrpc/sched.c 2012-02-14 10:50:16.283243346 -0800 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *w #ifdef RPC_DEBUG static void rpc_task_set_debuginfo(struct rpc_task *task) { - static atomic_t rpc_pid; + static atomic_unchecked_t rpc_pid; task->tk_magic = RPC_TASK_MAGIC_ID; - task->tk_pid = atomic_inc_return(&rpc_pid); + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid); } #else static inline void rpc_task_set_debuginfo(struct rpc_task *task) diff -purN a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c --- a/net/sunrpc/xprtrdma/svc_rdma.c 2012-02-14 08:38:08.323358340 -0800 +++ b/net/sunrpc/xprtrdma/svc_rdma.c 2012-02-14 10:50:16.283243346 -0800 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCR static unsigned int min_max_inline = 4096; static unsigned int max_max_inline = 65536; -atomic_t rdma_stat_recv; -atomic_t rdma_stat_read; -atomic_t rdma_stat_write; -atomic_t rdma_stat_sq_starve; -atomic_t rdma_stat_rq_starve; -atomic_t rdma_stat_rq_poll; -atomic_t rdma_stat_rq_prod; -atomic_t rdma_stat_sq_poll; -atomic_t rdma_stat_sq_prod; +atomic_unchecked_t rdma_stat_recv; +atomic_unchecked_t rdma_stat_read; +atomic_unchecked_t rdma_stat_write; +atomic_unchecked_t rdma_stat_sq_starve; +atomic_unchecked_t rdma_stat_rq_starve; +atomic_unchecked_t rdma_stat_rq_poll; +atomic_unchecked_t rdma_stat_rq_prod; +atomic_unchecked_t rdma_stat_sq_poll; +atomic_unchecked_t rdma_stat_sq_prod; /* Temporary NFS request map and context caches */ struct kmem_cache *svc_rdma_map_cachep; @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] = { .procname = "rdma_stat_read", .data = &rdma_stat_read, - .maxlen = sizeof(atomic_t), + .maxlen = sizeof(atomic_unchecked_t), .mode = 0644, .proc_handler = &read_reset_stat, }, { .procname = "rdma_stat_recv", .data = &rdma_stat_recv, - .maxlen = sizeof(atomic_t), + .maxlen = sizeof(atomic_unchecked_t), .mode = 0644, .proc_handler = &read_reset_stat, }, { .procname = "rdma_stat_write", .data = &rdma_stat_write, - .maxlen = sizeof(atomic_t), + .maxlen = sizeof(atomic_unchecked_t), .mode = 0644, .proc_handler = &read_reset_stat, }, { .procname = "rdma_stat_sq_starve", .data = &rdma_stat_sq_starve, - .maxlen = sizeof(atomic_t), + .maxlen = sizeof(atomic_unchecked_t), .mode = 0644, .proc_handler = &read_reset_stat, }, { .procname = "rdma_stat_rq_starve", .data = &rdma_stat_rq_starve, - .maxlen = sizeof(atomic_t), + .maxlen = sizeof(atomic_unchecked_t), .mode = 0644, .proc_handler = &read_reset_stat, }, { .procname = "rdma_stat_rq_poll", .data = &rdma_stat_rq_poll, - .maxlen = sizeof(atomic_t), + .maxlen = sizeof(atomic_unchecked_t), .mode = 0644, .proc_handler = &read_reset_stat, }, { .procname = "rdma_stat_rq_prod", .data = &rdma_stat_rq_prod, - .maxlen = sizeof(atomic_t), + .maxlen = sizeof(atomic_unchecked_t), .mode = 0644, .proc_handler = &read_reset_stat, }, { .procname = "rdma_stat_sq_poll", .data = &rdma_stat_sq_poll, - .maxlen = sizeof(atomic_t), + .maxlen = sizeof(atomic_unchecked_t), .mode = 0644, .proc_handler = &read_reset_stat, }, { .procname = "rdma_stat_sq_prod", .data = &rdma_stat_sq_prod, - .maxlen = sizeof(atomic_t), + .maxlen = sizeof(atomic_unchecked_t), .mode = 0644, .proc_handler = &read_reset_stat, }, diff -purN a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2012-02-14 08:38:08.323358340 -0800 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2012-02-14 10:50:16.283243346 -0800 @@ -495,7 +495,7 @@ next_sge: svc_rdma_put_context(ctxt, 0); goto out; } - atomic_inc(&rdma_stat_read); + atomic_inc_unchecked(&rdma_stat_read); if (read_wr.num_sge < chl_map->ch[ch_no].count) { chl_map->ch[ch_no].count -= read_wr.num_sge; @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r dto_q); list_del_init(&ctxt->dto_q); } else { - atomic_inc(&rdma_stat_rq_starve); + atomic_inc_unchecked(&rdma_stat_rq_starve); clear_bit(XPT_DATA, &xprt->xpt_flags); ctxt = NULL; } @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n", ctxt, rdma_xprt, rqstp, ctxt->wc_status); BUG_ON(ctxt->wc_status != IB_WC_SUCCESS); - atomic_inc(&rdma_stat_recv); + atomic_inc_unchecked(&rdma_stat_recv); /* Build up the XDR from the receive buffers. */ rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len); diff -purN a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2012-02-14 08:38:08.323358340 -0800 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2012-02-14 10:50:16.283243346 -0800 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdm write_wr.wr.rdma.remote_addr = to; /* Post It */ - atomic_inc(&rdma_stat_write); + atomic_inc_unchecked(&rdma_stat_write); if (svc_rdma_send(xprt, &write_wr)) goto err; return 0; diff -purN a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c 2012-02-14 08:38:08.323358340 -0800 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c 2012-02-14 10:50:16.283243346 -0800 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd return; ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP); - atomic_inc(&rdma_stat_rq_poll); + atomic_inc_unchecked(&rdma_stat_rq_poll); while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd } if (ctxt) - atomic_inc(&rdma_stat_rq_prod); + atomic_inc_unchecked(&rdma_stat_rq_prod); set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); /* @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd return; ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); - atomic_inc(&rdma_stat_sq_poll); + atomic_inc_unchecked(&rdma_stat_sq_poll); while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { if (wc.status != IB_WC_SUCCESS) /* Close the transport */ @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd } if (ctxt) - atomic_inc(&rdma_stat_sq_prod); + atomic_inc_unchecked(&rdma_stat_sq_prod); } static void sq_comp_handler(struct ib_cq *cq, void *cq_context) diff -purN a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c --- a/net/xfrm/xfrm_policy.c 2012-02-14 08:38:08.375358338 -0800 +++ b/net/xfrm/xfrm_policy.c 2012-02-14 10:50:16.291243346 -0800 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct x hlist_add_head(&policy->bydst, chain); xfrm_pol_hold(policy); net->xfrm.policy_count[dir]++; - atomic_inc(&flow_cache_genid); + atomic_inc_unchecked(&flow_cache_genid); if (delpol) __xfrm_policy_unlink(delpol, dir); policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ct write_unlock_bh(&xfrm_policy_lock); if (ret && delete) { - atomic_inc(&flow_cache_genid); + atomic_inc_unchecked(&flow_cache_genid); xfrm_policy_kill(ret); } return ret; @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(str write_unlock_bh(&xfrm_policy_lock); if (ret && delete) { - atomic_inc(&flow_cache_genid); + atomic_inc_unchecked(&flow_cache_genid); xfrm_policy_kill(ret); } return ret; @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u } } - atomic_inc(&flow_cache_genid); + atomic_inc_unchecked(&flow_cache_genid); out: write_unlock_bh(&xfrm_policy_lock); return err; @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_polic write_unlock_bh(&xfrm_policy_lock); if (pol) { if (dir < XFRM_POLICY_MAX) - atomic_inc(&flow_cache_genid); + atomic_inc_unchecked(&flow_cache_genid); xfrm_policy_kill(pol); return 0; } @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struc u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); restart: - genid = atomic_read(&flow_cache_genid); + genid = atomic_read_unchecked(&flow_cache_genid); policy = NULL; for (pi = 0; pi < ARRAY_SIZE(pols); pi++) pols[pi] = NULL; @@ -1680,7 +1680,7 @@ restart: goto error; } if (nx == -EAGAIN || - genid != atomic_read(&flow_cache_genid)) { + genid != atomic_read_unchecked(&flow_cache_genid)) { xfrm_pols_put(pols, npols); goto restart; } diff -purN a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c --- a/security/integrity/ima/ima_api.c 2012-02-14 08:38:12.143358168 -0800 +++ b/security/integrity/ima/ima_api.c 2012-02-14 10:50:16.291243346 -0800 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *ino int result; /* can overflow, only indicator */ - atomic_long_inc(&ima_htable.violations); + atomic_long_inc_unchecked(&ima_htable.violations); entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { diff -purN a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c --- a/security/integrity/ima/ima_fs.c 2012-02-14 08:38:12.143358168 -0800 +++ b/security/integrity/ima/ima_fs.c 2012-02-14 10:50:16.291243346 -0800 @@ -27,12 +27,12 @@ static int valid_policy = 1; #define TMPBUFLEN 12 static ssize_t ima_show_htable_value(char __user *buf, size_t count, - loff_t *ppos, atomic_long_t *val) + loff_t *ppos, atomic_long_unchecked_t *val) { char tmpbuf[TMPBUFLEN]; ssize_t len; - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val)); + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val)); return simple_read_from_buffer(buf, count, ppos, tmpbuf, len); } diff -purN a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h --- a/security/integrity/ima/ima.h 2012-02-14 08:38:12.143358168 -0800 +++ b/security/integrity/ima/ima.h 2012-02-14 10:50:16.291243346 -0800 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *ino extern spinlock_t ima_queue_lock; struct ima_h_table { - atomic_long_t len; /* number of stored measurements in the list */ - atomic_long_t violations; + atomic_long_unchecked_t len; /* number of stored measurements in the list */ + atomic_long_unchecked_t violations; struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE]; }; extern struct ima_h_table ima_htable; diff -purN a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c --- a/security/integrity/ima/ima_queue.c 2012-02-14 08:38:12.143358168 -0800 +++ b/security/integrity/ima/ima_queue.c 2012-02-14 10:50:16.291243346 -0800 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct i INIT_LIST_HEAD(&qe->later); list_add_tail_rcu(&qe->later, &ima_measurements); - atomic_long_inc(&ima_htable.len); + atomic_long_inc_unchecked(&ima_htable.len); key = ima_hash_key(entry->digest); hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]); return 0; diff -purN a/security/Kconfig b/security/Kconfig --- a/security/Kconfig 2012-02-14 08:38:12.107358170 -0800 +++ b/security/Kconfig 2012-02-14 10:52:31.943237253 -0800 @@ -159,6 +159,25 @@ config LSM_MMAP_MIN_ADDR this low address space will need the permission specific to the systems running LSM. +config PAX_REFCOUNT + bool "Prevent various kernel object reference counter overflows" + depends on X86 || SPARC64 + help + By saying Y here the kernel will detect and prevent overflowing + various (but not all) kinds of object reference counters. Such + overflows can normally occur due to bugs only and are often, if + not always, exploitable. + + The tradeoff is that data structures protected by an overflowed + refcount will never be freed and therefore will leak memory. Note + that this leak also happens even without this protection but in + that case the overflow can eventually trigger the freeing of the + data structure while it is still being used elsewhere, resulting + in the exploitable situation that this feature prevents. + + Since this has a negligible performance impact, you should enable + this feature. + source security/selinux/Kconfig source security/smack/Kconfig source security/tomoyo/Kconfig diff -purN a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h --- a/security/selinux/include/xfrm.h 2012-02-14 08:38:12.115358170 -0800 +++ b/security/selinux/include/xfrm.h 2012-02-14 10:50:16.299243345 -0800 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s static inline void selinux_xfrm_notify_policyload(void) { - atomic_inc(&flow_cache_genid); + atomic_inc_unchecked(&flow_cache_genid); } #else static inline int selinux_xfrm_enabled(void) diff -purN a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c --- a/sound/pci/ymfpci/ymfpci_main.c 2012-02-14 08:38:12.423358155 -0800 +++ b/sound/pci/ymfpci/ymfpci_main.c 2012-02-14 10:50:16.299243345 -0800 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0) break; } - if (atomic_read(&chip->interrupt_sleep_count)) { - atomic_set(&chip->interrupt_sleep_count, 0); + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) { + atomic_set_unchecked(&chip->interrupt_sleep_count, 0); wake_up(&chip->interrupt_sleep); } __end: @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s continue; init_waitqueue_entry(&wait, current); add_wait_queue(&chip->interrupt_sleep, &wait); - atomic_inc(&chip->interrupt_sleep_count); + atomic_inc_unchecked(&chip->interrupt_sleep_count); schedule_timeout_uninterruptible(msecs_to_jiffies(50)); remove_wait_queue(&chip->interrupt_sleep, &wait); } @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt( snd_ymfpci_writel(chip, YDSXGR_MODE, mode); spin_unlock(&chip->reg_lock); - if (atomic_read(&chip->interrupt_sleep_count)) { - atomic_set(&chip->interrupt_sleep_count, 0); + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) { + atomic_set_unchecked(&chip->interrupt_sleep_count, 0); wake_up(&chip->interrupt_sleep); } } @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct s spin_lock_init(&chip->reg_lock); spin_lock_init(&chip->voice_lock); init_waitqueue_head(&chip->interrupt_sleep); - atomic_set(&chip->interrupt_sleep_count, 0); + atomic_set_unchecked(&chip->interrupt_sleep_count, 0); chip->card = card; chip->pci = pci; chip->irq = -1;