@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)
8080/* A 64bit atomic type */
8181
8282typedef struct {
83- u64 __aligned ( 8 ) counter ;
83+ long long counter ;
8484} atomic64_t ;
8585
8686#define ATOMIC64_INIT (val ) { (val) }
@@ -91,14 +91,14 @@ typedef struct {
9191 *
9292 * Atomically reads the value of @v.
9393 */
94- static inline u64 atomic64_read (const atomic64_t * v )
94+ static inline long long atomic64_read (const atomic64_t * v )
9595{
9696 /*
9797 * Requires an atomic op to read both 32-bit parts consistently.
9898 * Casting away const is safe since the atomic support routines
9999 * do not write to memory if the value has not been modified.
100100 */
101- return _atomic64_xchg_add ((u64 * )& v -> counter , 0 );
101+ return _atomic64_xchg_add ((long long * )& v -> counter , 0 );
102102}
103103
104104/**
@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
108108 *
109109 * Atomically adds @i to @v.
110110 */
111- static inline void atomic64_add (u64 i , atomic64_t * v )
111+ static inline void atomic64_add (long long i , atomic64_t * v )
112112{
113113 _atomic64_xchg_add (& v -> counter , i );
114114}
@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
120120 *
121121 * Atomically adds @i to @v and returns @i + @v
122122 */
123- static inline u64 atomic64_add_return (u64 i , atomic64_t * v )
123+ static inline long long atomic64_add_return (long long i , atomic64_t * v )
124124{
125125 smp_mb (); /* barrier for proper semantics */
126126 return _atomic64_xchg_add (& v -> counter , i ) + i ;
@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
135135 * Atomically adds @a to @v, so long as @v was not already @u.
136136 * Returns non-zero if @v was not @u, and zero otherwise.
137137 */
138- static inline u64 atomic64_add_unless (atomic64_t * v , u64 a , u64 u )
138+ static inline long long atomic64_add_unless (atomic64_t * v , long long a ,
139+ long long u )
139140{
140141 smp_mb (); /* barrier for proper semantics */
141142 return _atomic64_xchg_add_unless (& v -> counter , a , u ) != u ;
@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
151152 * atomic64_set() can't be just a raw store, since it would be lost if it
152153 * fell between the load and store of one of the other atomic ops.
153154 */
154- static inline void atomic64_set (atomic64_t * v , u64 n )
155+ static inline void atomic64_set (atomic64_t * v , long long n )
155156{
156157 _atomic64_xchg (& v -> counter , n );
157158}
@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
236237extern struct __get_user __atomic_or (volatile int * p , int * lock , int n );
237238extern struct __get_user __atomic_andn (volatile int * p , int * lock , int n );
238239extern struct __get_user __atomic_xor (volatile int * p , int * lock , int n );
239- extern u64 __atomic64_cmpxchg (volatile u64 * p , int * lock , u64 o , u64 n );
240- extern u64 __atomic64_xchg (volatile u64 * p , int * lock , u64 n );
241- extern u64 __atomic64_xchg_add (volatile u64 * p , int * lock , u64 n );
242- extern u64 __atomic64_xchg_add_unless (volatile u64 * p ,
243- int * lock , u64 o , u64 n );
240+ extern long long __atomic64_cmpxchg (volatile long long * p , int * lock ,
241+ long long o , long long n );
242+ extern long long __atomic64_xchg (volatile long long * p , int * lock , long long n );
243+ extern long long __atomic64_xchg_add (volatile long long * p , int * lock ,
244+ long long n );
245+ extern long long __atomic64_xchg_add_unless (volatile long long * p ,
246+ int * lock , long long o , long long n );
244247
245248/* Return failure from the atomic wrappers. */
246249struct __get_user __atomic_bad_address (int __user * addr );
0 commit comments