@@ -58,10 +58,10 @@ typedef struct _Py_atomic_int {
5858 atomic_thread_fence(ORDER)
5959
6060#define _Py_atomic_store_explicit (ATOMIC_VAL , NEW_VAL , ORDER ) \
61- atomic_store_explicit(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER)
61+ atomic_store_explicit(&(( ATOMIC_VAL)->_value) , NEW_VAL, ORDER)
6262
6363#define _Py_atomic_load_explicit (ATOMIC_VAL , ORDER ) \
64- atomic_load_explicit(&(ATOMIC_VAL)->_value, ORDER)
64+ atomic_load_explicit(&(( ATOMIC_VAL)->_value) , ORDER)
6565
6666/* Use builtin atomic operations in GCC >= 4.7 */
6767#elif defined(HAVE_BUILTIN_ATOMIC )
@@ -92,14 +92,14 @@ typedef struct _Py_atomic_int {
9292 (assert((ORDER) == __ATOMIC_RELAXED \
9393 || (ORDER) == __ATOMIC_SEQ_CST \
9494 || (ORDER) == __ATOMIC_RELEASE), \
95- __atomic_store_n(&(ATOMIC_VAL)->_value, NEW_VAL, ORDER))
95+ __atomic_store_n(&(( ATOMIC_VAL)->_value) , NEW_VAL, ORDER))
9696
9797#define _Py_atomic_load_explicit (ATOMIC_VAL , ORDER ) \
9898 (assert((ORDER) == __ATOMIC_RELAXED \
9999 || (ORDER) == __ATOMIC_SEQ_CST \
100100 || (ORDER) == __ATOMIC_ACQUIRE \
101101 || (ORDER) == __ATOMIC_CONSUME), \
102- __atomic_load_n(&(ATOMIC_VAL)->_value, ORDER))
102+ __atomic_load_n(&(( ATOMIC_VAL)->_value) , ORDER))
103103
104104/* Only support GCC (for expression statements) and x86 (for simple
105105 * atomic semantics) and MSVC x86/x64/ARM */
@@ -324,7 +324,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
324324}
325325
326326#else
327- #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) *ATOMIC_VAL
327+ #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) *( ATOMIC_VAL)
328328#endif
329329
330330inline int _Py_atomic_load_32bit (volatile int * value , int order ) {
@@ -359,15 +359,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
359359}
360360
361361#define _Py_atomic_store_explicit (ATOMIC_VAL , NEW_VAL , ORDER ) \
362- if (sizeof(* ATOMIC_VAL. _value) == 8) { \
363- _Py_atomic_store_64bit((volatile long long*)ATOMIC_VAL. _value, NEW_VAL, ORDER) } else { \
364- _Py_atomic_store_32bit((volatile long*)ATOMIC_VAL. _value, NEW_VAL, ORDER) }
362+ if (sizeof(( ATOMIC_VAL)-> _value) == 8) { \
363+ _Py_atomic_store_64bit((volatile long long*)&(( ATOMIC_VAL)-> _value) , NEW_VAL, ORDER) } else { \
364+ _Py_atomic_store_32bit((volatile long*)&(( ATOMIC_VAL)-> _value) , NEW_VAL, ORDER) }
365365
366366#define _Py_atomic_load_explicit (ATOMIC_VAL , ORDER ) \
367367 ( \
368- sizeof(* (ATOMIC_VAL._value) ) == 8 ? \
369- _Py_atomic_load_64bit((volatile long long*)ATOMIC_VAL. _value, ORDER) : \
370- _Py_atomic_load_32bit((volatile long*)ATOMIC_VAL. _value, ORDER) \
368+ sizeof((ATOMIC_VAL)->_value ) == 8 ? \
369+ _Py_atomic_load_64bit((volatile long long*)&(( ATOMIC_VAL)-> _value) , ORDER) : \
370+ _Py_atomic_load_32bit((volatile long*)&(( ATOMIC_VAL)-> _value) , ORDER) \
371371 )
372372#elif defined(_M_ARM ) || defined(_M_ARM64 )
373373typedef enum _Py_memory_order {
@@ -391,13 +391,13 @@ typedef struct _Py_atomic_int {
391391#define _Py_atomic_store_64bit (ATOMIC_VAL , NEW_VAL , ORDER ) \
392392 switch (ORDER) { \
393393 case _Py_memory_order_acquire: \
394- _InterlockedExchange64_acq((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
394+ _InterlockedExchange64_acq((__int64 volatile*)&(( ATOMIC_VAL)->_value) , (__int64)NEW_VAL); \
395395 break; \
396396 case _Py_memory_order_release: \
397- _InterlockedExchange64_rel((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
397+ _InterlockedExchange64_rel((__int64 volatile*)&(( ATOMIC_VAL)->_value) , (__int64)NEW_VAL); \
398398 break; \
399399 default: \
400- _InterlockedExchange64((__int64 volatile*)ATOMIC_VAL, (__int64)NEW_VAL); \
400+ _InterlockedExchange64((__int64 volatile*)&(( ATOMIC_VAL)->_value) , (__int64)NEW_VAL); \
401401 break; \
402402 }
403403#else
@@ -407,13 +407,13 @@ typedef struct _Py_atomic_int {
407407#define _Py_atomic_store_32bit (ATOMIC_VAL , NEW_VAL , ORDER ) \
408408 switch (ORDER) { \
409409 case _Py_memory_order_acquire: \
410- _InterlockedExchange_acq((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
410+ _InterlockedExchange_acq((volatile long*)&(( ATOMIC_VAL)->_value) , (int)NEW_VAL); \
411411 break; \
412412 case _Py_memory_order_release: \
413- _InterlockedExchange_rel((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
413+ _InterlockedExchange_rel((volatile long*)&(( ATOMIC_VAL)->_value) , (int)NEW_VAL); \
414414 break; \
415415 default: \
416- _InterlockedExchange((volatile long*)ATOMIC_VAL, (int)NEW_VAL); \
416+ _InterlockedExchange((volatile long*)&(( ATOMIC_VAL)->_value) , (int)NEW_VAL); \
417417 break; \
418418 }
419419
@@ -454,7 +454,7 @@ inline intptr_t _Py_atomic_load_64bit(volatile uintptr_t* value, int order) {
454454}
455455
456456#else
457- #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) *ATOMIC_VAL
457+ #define _Py_atomic_load_64bit (ATOMIC_VAL , ORDER ) *( ATOMIC_VAL)
458458#endif
459459
460460inline int _Py_atomic_load_32bit (volatile int * value , int order ) {
@@ -489,15 +489,15 @@ inline int _Py_atomic_load_32bit(volatile int* value, int order) {
489489}
490490
491491#define _Py_atomic_store_explicit (ATOMIC_VAL , NEW_VAL , ORDER ) \
492- if (sizeof(* ATOMIC_VAL. _value) == 8) { \
493- _Py_atomic_store_64bit(ATOMIC_VAL. _value, NEW_VAL, ORDER) } else { \
494- _Py_atomic_store_32bit(ATOMIC_VAL. _value, NEW_VAL, ORDER) }
492+ if (sizeof(( ATOMIC_VAL)-> _value) == 8) { \
493+ _Py_atomic_store_64bit(&(( ATOMIC_VAL)-> _value) , NEW_VAL, ORDER) } else { \
494+ _Py_atomic_store_32bit(&(( ATOMIC_VAL)-> _value) , NEW_VAL, ORDER) }
495495
496496#define _Py_atomic_load_explicit (ATOMIC_VAL , ORDER ) \
497497 ( \
498- sizeof(* (ATOMIC_VAL._value) ) == 8 ? \
499- _Py_atomic_load_64bit(ATOMIC_VAL. _value, ORDER) : \
500- _Py_atomic_load_32bit(ATOMIC_VAL. _value, ORDER) \
498+ sizeof((ATOMIC_VAL)->_value ) == 8 ? \
499+ _Py_atomic_load_64bit(&(( ATOMIC_VAL)-> _value) , ORDER) : \
500+ _Py_atomic_load_32bit(&(( ATOMIC_VAL)-> _value) , ORDER) \
501501 )
502502#endif
503503#else /* !gcc x86 !_msc_ver */
0 commit comments