--- contrib/bind9/lib/isc/ia64/include/isc/atomic.h.orig 2009-06-01 11:47:44.000000000 +0400 +++ contrib/bind9/lib/isc/ia64/include/isc/atomic.h 2009-06-01 12:01:19.000000000 +0400 @@ -30,11 +30,13 @@ * Open issue: can 'fetchadd' make the code faster for some particular values * (e.g., 1 and -1)? */ -static inline isc_int32_t -isc_atomic_xadd(isc_int32_t *p, isc_int32_t val) #ifdef __GNUC__ -__attribute__ ((unused)) +static inline isc_int32_t +isc_atomic_xadd(isc_int32_t *p, isc_int32_t val) __attribute__ ((unused)); #endif + +static inline isc_int32_t +isc_atomic_xadd(isc_int32_t *p, isc_int32_t val) { isc_int32_t prev, swapped; @@ -56,11 +58,13 @@ /* * This routine atomically stores the value 'val' in 'p'. */ -static inline void -isc_atomic_store(isc_int32_t *p, isc_int32_t val) #ifdef __GNUC__ -__attribute__ ((unused)) +static inline void +isc_atomic_store(isc_int32_t *p, isc_int32_t val) __attribute__ ((unused)); #endif + +static inline void +isc_atomic_store(isc_int32_t *p, isc_int32_t val) { __asm__ volatile( "st4.rel %0=%1" @@ -75,11 +79,14 @@ * original value is equal to 'cmpval'. The original value is returned in any * case. */ +#ifdef __GNUC__ static inline isc_int32_t isc_atomic_cmpxchg(isc_int32_t *p, isc_int32_t cmpval, isc_int32_t val) -#ifdef __GNUC__ -__attribute__ ((unused)) +__attribute__ ((unused)); #endif + +static inline isc_int32_t +isc_atomic_cmpxchg(isc_int32_t *p, isc_int32_t cmpval, isc_int32_t val) { isc_int32_t ret;