1313 * Copyright (c) 2007 Sun Microsystems, Inc. All rights reserverd.
1414 * Copyright (c) 2012-2014 Los Alamos National Security, LLC. All rights
1515 * reserved.
16+ * Copyright (c) 2016 Research Organization for Information Science
17+ * and Technology (RIST). All rights reserved.
1618 * $COPYRIGHT$
1719 *
1820 * Additional copyrights may follow
@@ -158,8 +160,8 @@ static inline int32_t opal_atomic_swap_32( volatile int32_t *addr,
158160 int32_t oldval ;
159161
160162 __asm__ __volatile__("xchg %1, %0" :
161- "=r" (oldval ), "= m" (* addr ) :
162- "0" (newval ), "m" ( * addr ) :
163+ "=r" (oldval ), "+ m" (* addr ) :
164+ "0" (newval ) :
163165 "memory" );
164166 return oldval ;
165167}
@@ -174,8 +176,8 @@ static inline int64_t opal_atomic_swap_64( volatile int64_t *addr,
174176 int64_t oldval ;
175177
176178 __asm__ __volatile__("xchgq %1, %0" :
177- "=r" (oldval ) :
178- "m" ( * addr ), " 0" (newval ) :
179+ "=r" (oldval ), "+m" ( * addr ) :
180+ "0" (newval ) :
179181 "memory" );
180182 return oldval ;
181183}
@@ -203,8 +205,8 @@ static inline int32_t opal_atomic_add_32(volatile int32_t* v, int i)
203205 int ret = i ;
204206 __asm__ __volatile__(
205207 SMPLOCK "xaddl %1,%0"
206- :"= m" (* v ), "+r" (ret )
207- :"m" ( * v )
208+ :"+ m" (* v ), "+r" (ret )
209+ :
208210 :"memory" , "cc"
209211 );
210212 return (ret + i );
@@ -224,8 +226,8 @@ static inline int64_t opal_atomic_add_64(volatile int64_t* v, int64_t i)
224226 int64_t ret = i ;
225227 __asm__ __volatile__(
226228 SMPLOCK "xaddq %1,%0"
227- :"= m" (* v ), "+r" (ret )
228- :"m" ( * v )
229+ :"+ m" (* v ), "+r" (ret )
230+ :
229231 :"memory" , "cc"
230232 );
231233 return (ret + i );
@@ -245,8 +247,8 @@ static inline int32_t opal_atomic_sub_32(volatile int32_t* v, int i)
245247 int ret = - i ;
246248 __asm__ __volatile__(
247249 SMPLOCK "xaddl %1,%0"
248- :"= m" (* v ), "+r" (ret )
249- :"m" ( * v )
250+ :"+ m" (* v ), "+r" (ret )
251+ :
250252 :"memory" , "cc"
251253 );
252254 return (ret - i );
@@ -266,8 +268,8 @@ static inline int64_t opal_atomic_sub_64(volatile int64_t* v, int64_t i)
266268 int64_t ret = - i ;
267269 __asm__ __volatile__(
268270 SMPLOCK "xaddq %1,%0"
269- :"= m" (* v ), "+r" (ret )
270- :"m" ( * v )
271+ :"+ m" (* v ), "+r" (ret )
272+ :
271273 :"memory" , "cc"
272274 );
273275 return (ret - i );
0 commit comments