blob: 158af079838d007480f66b7d7ffe08d72b16c2d7 [file] [log] [blame]
Vineet Gupta14e968b2013-01-18 15:12:16 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _ASM_ARC_ATOMIC_H
10#define _ASM_ARC_ATOMIC_H
11
Vineet Gupta14e968b2013-01-18 15:12:16 +053012#ifndef __ASSEMBLY__
13
14#include <linux/types.h>
15#include <linux/compiler.h>
16#include <asm/cmpxchg.h>
17#include <asm/barrier.h>
18#include <asm/smp.h>
19
Noam Camus6492f092017-04-04 11:00:41 +030020#define ATOMIC_INIT(i) { (i) }
21
Noam Camusa5a10d92015-05-16 17:49:35 +030022#ifndef CONFIG_ARC_PLAT_EZNPS
23
Peter Zijlstra62e8a322015-09-18 11:13:10 +020024#define atomic_read(v) READ_ONCE((v)->counter)
Vineet Gupta14e968b2013-01-18 15:12:16 +053025
26#ifdef CONFIG_ARC_HAS_LLSC
27
Peter Zijlstra62e8a322015-09-18 11:13:10 +020028#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
Vineet Gupta14e968b2013-01-18 15:12:16 +053029
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010030#define ATOMIC_OP(op, c_op, asm_op) \
31static inline void atomic_##op(int i, atomic_t *v) \
32{ \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053033 unsigned int val; \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010034 \
35 __asm__ __volatile__( \
Vineet Gupta8ac06652015-07-21 12:05:42 +030036 "1: llock %[val], [%[ctr]] \n" \
37 " " #asm_op " %[val], %[val], %[i] \n" \
38 " scond %[val], [%[ctr]] \n" \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053039 " bnz 1b \n" \
Vineet Gupta8ac06652015-07-21 12:05:42 +030040 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
41 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
42 [i] "ir" (i) \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010043 : "cc"); \
44} \
Vineet Gupta14e968b2013-01-18 15:12:16 +053045
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010046#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
47static inline int atomic_##op##_return(int i, atomic_t *v) \
48{ \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053049 unsigned int val; \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010050 \
Vineet Gupta2576c282014-11-20 15:42:09 +053051 /* \
52 * Explicit full memory barrier needed before/after as \
53 * LLOCK/SCOND thmeselves don't provide any such semantics \
54 */ \
55 smp_mb(); \
56 \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010057 __asm__ __volatile__( \
Vineet Gupta8ac06652015-07-21 12:05:42 +030058 "1: llock %[val], [%[ctr]] \n" \
59 " " #asm_op " %[val], %[val], %[i] \n" \
60 " scond %[val], [%[ctr]] \n" \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053061 " bnz 1b \n" \
Vineet Gupta8ac06652015-07-21 12:05:42 +030062 : [val] "=&r" (val) \
63 : [ctr] "r" (&v->counter), \
64 [i] "ir" (i) \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010065 : "cc"); \
66 \
Vineet Gupta2576c282014-11-20 15:42:09 +053067 smp_mb(); \
68 \
Vineet Gupta8ac06652015-07-21 12:05:42 +030069 return val; \
Vineet Gupta14e968b2013-01-18 15:12:16 +053070}
71
Peter Zijlstrafbffe8922016-04-18 01:16:09 +020072#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
73static inline int atomic_fetch_##op(int i, atomic_t *v) \
74{ \
75 unsigned int val, orig; \
Peter Zijlstrafbffe8922016-04-18 01:16:09 +020076 \
77 /* \
78 * Explicit full memory barrier needed before/after as \
79 * LLOCK/SCOND thmeselves don't provide any such semantics \
80 */ \
81 smp_mb(); \
82 \
83 __asm__ __volatile__( \
84 "1: llock %[orig], [%[ctr]] \n" \
85 " " #asm_op " %[val], %[orig], %[i] \n" \
86 " scond %[val], [%[ctr]] \n" \
Will Deacon3fcbb822018-08-30 13:52:38 -070087 " bnz 1b \n" \
Peter Zijlstrafbffe8922016-04-18 01:16:09 +020088 : [val] "=&r" (val), \
89 [orig] "=&r" (orig) \
Peter Zijlstrafbffe8922016-04-18 01:16:09 +020090 : [ctr] "r" (&v->counter), \
91 [i] "ir" (i) \
92 : "cc"); \
93 \
94 smp_mb(); \
95 \
96 return orig; \
97}
98
Vineet Gupta14e968b2013-01-18 15:12:16 +053099#else /* !CONFIG_ARC_HAS_LLSC */
100
101#ifndef CONFIG_SMP
102
103 /* violating atomic_xxx API locking protocol in UP for optimization sake */
Peter Zijlstra62e8a322015-09-18 11:13:10 +0200104#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
Vineet Gupta14e968b2013-01-18 15:12:16 +0530105
106#else
107
108static inline void atomic_set(atomic_t *v, int i)
109{
110 /*
111 * Independent of hardware support, all of the atomic_xxx() APIs need
112 * to follow the same locking rules to make sure that a "hardware"
113 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
114 * sequence
115 *
116 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
117 * requires the locking.
118 */
119 unsigned long flags;
120
121 atomic_ops_lock(flags);
Peter Zijlstra62e8a322015-09-18 11:13:10 +0200122 WRITE_ONCE(v->counter, i);
Vineet Gupta14e968b2013-01-18 15:12:16 +0530123 atomic_ops_unlock(flags);
124}
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100125
Peter Zijlstra9d664c02017-06-09 13:05:06 +0200126#define atomic_set_release(v, i) atomic_set((v), (i))
127
Vineet Gupta14e968b2013-01-18 15:12:16 +0530128#endif
129
130/*
131 * Non hardware assisted Atomic-R-M-W
132 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
133 */
134
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100135#define ATOMIC_OP(op, c_op, asm_op) \
136static inline void atomic_##op(int i, atomic_t *v) \
137{ \
138 unsigned long flags; \
139 \
140 atomic_ops_lock(flags); \
141 v->counter c_op i; \
142 atomic_ops_unlock(flags); \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530143}
144
Vineet Guptadaaf40e2015-05-10 12:04:01 +0530145#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100146static inline int atomic_##op##_return(int i, atomic_t *v) \
147{ \
148 unsigned long flags; \
149 unsigned long temp; \
150 \
Vineet Gupta2576c282014-11-20 15:42:09 +0530151 /* \
152 * spin lock/unlock provides the needed smp_mb() before/after \
153 */ \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100154 atomic_ops_lock(flags); \
155 temp = v->counter; \
156 temp c_op i; \
157 v->counter = temp; \
158 atomic_ops_unlock(flags); \
159 \
160 return temp; \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530161}
162
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200163#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
164static inline int atomic_fetch_##op(int i, atomic_t *v) \
165{ \
166 unsigned long flags; \
167 unsigned long orig; \
168 \
169 /* \
170 * spin lock/unlock provides the needed smp_mb() before/after \
171 */ \
172 atomic_ops_lock(flags); \
173 orig = v->counter; \
174 v->counter c_op i; \
175 atomic_ops_unlock(flags); \
176 \
177 return orig; \
178}
179
Vineet Gupta14e968b2013-01-18 15:12:16 +0530180#endif /* !CONFIG_ARC_HAS_LLSC */
181
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100182#define ATOMIC_OPS(op, c_op, asm_op) \
183 ATOMIC_OP(op, c_op, asm_op) \
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200184 ATOMIC_OP_RETURN(op, c_op, asm_op) \
185 ATOMIC_FETCH_OP(op, c_op, asm_op)
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100186
187ATOMIC_OPS(add, +=, add)
188ATOMIC_OPS(sub, -=, sub)
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100189
Mark Rutland7cc7eaa2018-06-21 13:13:21 +0100190#define atomic_andnot atomic_andnot
191#define atomic_fetch_andnot atomic_fetch_andnot
Peter Zijlstracda7e412014-04-23 20:06:20 +0200192
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200193#undef ATOMIC_OPS
194#define ATOMIC_OPS(op, c_op, asm_op) \
195 ATOMIC_OP(op, c_op, asm_op) \
196 ATOMIC_FETCH_OP(op, c_op, asm_op)
197
198ATOMIC_OPS(and, &=, and)
199ATOMIC_OPS(andnot, &= ~, bic)
200ATOMIC_OPS(or, |=, or)
201ATOMIC_OPS(xor, ^=, xor)
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100202
Noam Camusa5a10d92015-05-16 17:49:35 +0300203#else /* CONFIG_ARC_PLAT_EZNPS */
204
205static inline int atomic_read(const atomic_t *v)
206{
207 int temp;
208
209 __asm__ __volatile__(
210 " ld.di %0, [%1]"
211 : "=r"(temp)
212 : "r"(&v->counter)
213 : "memory");
214 return temp;
215}
216
217static inline void atomic_set(atomic_t *v, int i)
218{
219 __asm__ __volatile__(
220 " st.di %0,[%1]"
221 :
222 : "r"(i), "r"(&v->counter)
223 : "memory");
224}
225
226#define ATOMIC_OP(op, c_op, asm_op) \
227static inline void atomic_##op(int i, atomic_t *v) \
228{ \
229 __asm__ __volatile__( \
230 " mov r2, %0\n" \
231 " mov r3, %1\n" \
232 " .word %2\n" \
233 : \
234 : "r"(i), "r"(&v->counter), "i"(asm_op) \
235 : "r2", "r3", "memory"); \
236} \
237
238#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
239static inline int atomic_##op##_return(int i, atomic_t *v) \
240{ \
241 unsigned int temp = i; \
242 \
243 /* Explicit full memory barrier needed before/after */ \
244 smp_mb(); \
245 \
246 __asm__ __volatile__( \
247 " mov r2, %0\n" \
248 " mov r3, %1\n" \
249 " .word %2\n" \
250 " mov %0, r2" \
251 : "+r"(temp) \
252 : "r"(&v->counter), "i"(asm_op) \
253 : "r2", "r3", "memory"); \
254 \
255 smp_mb(); \
256 \
257 temp c_op i; \
258 \
259 return temp; \
260}
261
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200262#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
263static inline int atomic_fetch_##op(int i, atomic_t *v) \
264{ \
265 unsigned int temp = i; \
266 \
267 /* Explicit full memory barrier needed before/after */ \
268 smp_mb(); \
269 \
270 __asm__ __volatile__( \
271 " mov r2, %0\n" \
272 " mov r3, %1\n" \
273 " .word %2\n" \
274 " mov %0, r2" \
275 : "+r"(temp) \
276 : "r"(&v->counter), "i"(asm_op) \
277 : "r2", "r3", "memory"); \
278 \
279 smp_mb(); \
280 \
281 return temp; \
282}
283
Noam Camusa5a10d92015-05-16 17:49:35 +0300284#define ATOMIC_OPS(op, c_op, asm_op) \
285 ATOMIC_OP(op, c_op, asm_op) \
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200286 ATOMIC_OP_RETURN(op, c_op, asm_op) \
287 ATOMIC_FETCH_OP(op, c_op, asm_op)
Noam Camusa5a10d92015-05-16 17:49:35 +0300288
289ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
290#define atomic_sub(i, v) atomic_add(-(i), (v))
291#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
Noam Camusce0f4932016-09-19 08:34:40 +0300292#define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v))
Noam Camusa5a10d92015-05-16 17:49:35 +0300293
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200294#undef ATOMIC_OPS
295#define ATOMIC_OPS(op, c_op, asm_op) \
296 ATOMIC_OP(op, c_op, asm_op) \
297 ATOMIC_FETCH_OP(op, c_op, asm_op)
298
299ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200300ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
301ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
Noam Camusa5a10d92015-05-16 17:49:35 +0300302
303#endif /* CONFIG_ARC_PLAT_EZNPS */
304
305#undef ATOMIC_OPS
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200306#undef ATOMIC_FETCH_OP
Noam Camusa5a10d92015-05-16 17:49:35 +0300307#undef ATOMIC_OP_RETURN
308#undef ATOMIC_OP
309
Vineet Guptace636522015-07-27 17:23:28 +0530310#ifdef CONFIG_GENERIC_ATOMIC64
Vineet Gupta14e968b2013-01-18 15:12:16 +0530311
312#include <asm-generic/atomic64.h>
313
Vineet Guptace636522015-07-27 17:23:28 +0530314#else /* Kconfig ensures this is only enabled with needed h/w assist */
315
316/*
317 * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
318 * - The address HAS to be 64-bit aligned
319 * - There are 2 semantics involved here:
320 * = exclusive implies no interim update between load/store to same addr
321 * = both words are observed/updated together: this is guaranteed even
322 * for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
323 * is NOT required to use LLOCKD+SCONDD, STD suffices
324 */
325
326typedef struct {
327 aligned_u64 counter;
328} atomic64_t;
329
330#define ATOMIC64_INIT(a) { (a) }
331
332static inline long long atomic64_read(const atomic64_t *v)
333{
334 unsigned long long val;
335
336 __asm__ __volatile__(
337 " ldd %0, [%1] \n"
338 : "=r"(val)
339 : "r"(&v->counter));
340
341 return val;
342}
343
344static inline void atomic64_set(atomic64_t *v, long long a)
345{
346 /*
347 * This could have been a simple assignment in "C" but would need
348 * explicit volatile. Otherwise gcc optimizers could elide the store
349 * which borked atomic64 self-test
350 * In the inline asm version, memory clobber needed for exact same
351 * reason, to tell gcc about the store.
352 *
353 * This however is not needed for sibling atomic64_add() etc since both
354 * load/store are explicitly done in inline asm. As long as API is used
355 * for each access, gcc has no way to optimize away any load/store
356 */
357 __asm__ __volatile__(
358 " std %0, [%1] \n"
359 :
360 : "r"(a), "r"(&v->counter)
361 : "memory");
362}
363
364#define ATOMIC64_OP(op, op1, op2) \
365static inline void atomic64_##op(long long a, atomic64_t *v) \
366{ \
367 unsigned long long val; \
368 \
369 __asm__ __volatile__( \
370 "1: \n" \
371 " llockd %0, [%1] \n" \
372 " " #op1 " %L0, %L0, %L2 \n" \
373 " " #op2 " %H0, %H0, %H2 \n" \
374 " scondd %0, [%1] \n" \
375 " bnz 1b \n" \
376 : "=&r"(val) \
377 : "r"(&v->counter), "ir"(a) \
378 : "cc"); \
379} \
380
381#define ATOMIC64_OP_RETURN(op, op1, op2) \
382static inline long long atomic64_##op##_return(long long a, atomic64_t *v) \
383{ \
384 unsigned long long val; \
385 \
386 smp_mb(); \
387 \
388 __asm__ __volatile__( \
389 "1: \n" \
390 " llockd %0, [%1] \n" \
391 " " #op1 " %L0, %L0, %L2 \n" \
392 " " #op2 " %H0, %H0, %H2 \n" \
393 " scondd %0, [%1] \n" \
394 " bnz 1b \n" \
395 : [val] "=&r"(val) \
396 : "r"(&v->counter), "ir"(a) \
397 : "cc"); /* memory clobber comes from smp_mb() */ \
398 \
399 smp_mb(); \
400 \
401 return val; \
402}
403
404#define ATOMIC64_FETCH_OP(op, op1, op2) \
405static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \
406{ \
407 unsigned long long val, orig; \
408 \
409 smp_mb(); \
410 \
411 __asm__ __volatile__( \
412 "1: \n" \
413 " llockd %0, [%2] \n" \
414 " " #op1 " %L1, %L0, %L3 \n" \
415 " " #op2 " %H1, %H0, %H3 \n" \
416 " scondd %1, [%2] \n" \
417 " bnz 1b \n" \
418 : "=&r"(orig), "=&r"(val) \
419 : "r"(&v->counter), "ir"(a) \
420 : "cc"); /* memory clobber comes from smp_mb() */ \
421 \
422 smp_mb(); \
423 \
424 return orig; \
425}
426
427#define ATOMIC64_OPS(op, op1, op2) \
428 ATOMIC64_OP(op, op1, op2) \
429 ATOMIC64_OP_RETURN(op, op1, op2) \
430 ATOMIC64_FETCH_OP(op, op1, op2)
431
Mark Rutland7cc7eaa2018-06-21 13:13:21 +0100432#define atomic64_andnot atomic64_andnot
433#define atomic64_fetch_andnot atomic64_fetch_andnot
Vineet Guptace636522015-07-27 17:23:28 +0530434
435ATOMIC64_OPS(add, add.f, adc)
436ATOMIC64_OPS(sub, sub.f, sbc)
437ATOMIC64_OPS(and, and, and)
438ATOMIC64_OPS(andnot, bic, bic)
439ATOMIC64_OPS(or, or, or)
440ATOMIC64_OPS(xor, xor, xor)
441
442#undef ATOMIC64_OPS
443#undef ATOMIC64_FETCH_OP
444#undef ATOMIC64_OP_RETURN
445#undef ATOMIC64_OP
446
447static inline long long
448atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new)
449{
450 long long prev;
451
452 smp_mb();
453
454 __asm__ __volatile__(
455 "1: llockd %0, [%1] \n"
456 " brne %L0, %L2, 2f \n"
457 " brne %H0, %H2, 2f \n"
458 " scondd %3, [%1] \n"
459 " bnz 1b \n"
460 "2: \n"
461 : "=&r"(prev)
462 : "r"(ptr), "ir"(expected), "r"(new)
463 : "cc"); /* memory clobber comes from smp_mb() */
464
465 smp_mb();
466
467 return prev;
468}
469
470static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
471{
472 long long prev;
473
474 smp_mb();
475
476 __asm__ __volatile__(
477 "1: llockd %0, [%1] \n"
478 " scondd %2, [%1] \n"
479 " bnz 1b \n"
480 "2: \n"
481 : "=&r"(prev)
482 : "r"(ptr), "r"(new)
483 : "cc"); /* memory clobber comes from smp_mb() */
484
485 smp_mb();
486
487 return prev;
488}
489
490/**
491 * atomic64_dec_if_positive - decrement by 1 if old value positive
492 * @v: pointer of type atomic64_t
493 *
494 * The function returns the old value of *v minus 1, even if
495 * the atomic variable, v, was not decremented.
496 */
497
498static inline long long atomic64_dec_if_positive(atomic64_t *v)
499{
500 long long val;
501
502 smp_mb();
503
504 __asm__ __volatile__(
505 "1: llockd %0, [%1] \n"
506 " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n"
507 " sub.c %H0, %H0, 1 # if C set, w1 - 1\n"
508 " brlt %H0, 0, 2f \n"
509 " scondd %0, [%1] \n"
510 " bnz 1b \n"
511 "2: \n"
512 : "=&r"(val)
513 : "r"(&v->counter)
514 : "cc"); /* memory clobber comes from smp_mb() */
515
516 smp_mb();
517
518 return val;
519}
Mark Rutlandb3a2a052018-06-21 13:13:20 +0100520#define atomic64_dec_if_positive atomic64_dec_if_positive
Vineet Guptace636522015-07-27 17:23:28 +0530521
522/**
Mark Rutlandab0b9102018-06-21 13:13:13 +0100523 * atomic64_fetch_add_unless - add unless the number is a given value
Vineet Guptace636522015-07-27 17:23:28 +0530524 * @v: pointer of type atomic64_t
525 * @a: the amount to add to v...
526 * @u: ...unless v is equal to u.
527 *
Mark Rutlandab0b9102018-06-21 13:13:13 +0100528 * Atomically adds @a to @v, if it was not @u.
529 * Returns the old value of @v
Vineet Guptace636522015-07-27 17:23:28 +0530530 */
Mark Rutlandab0b9102018-06-21 13:13:13 +0100531static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
532 long long u)
Vineet Guptace636522015-07-27 17:23:28 +0530533{
Mark Rutlandab0b9102018-06-21 13:13:13 +0100534 long long old, temp;
Vineet Guptace636522015-07-27 17:23:28 +0530535
536 smp_mb();
537
538 __asm__ __volatile__(
539 "1: llockd %0, [%2] \n"
Vineet Guptace636522015-07-27 17:23:28 +0530540 " brne %L0, %L4, 2f # continue to add since v != u \n"
541 " breq.d %H0, %H4, 3f # return since v == u \n"
Vineet Guptace636522015-07-27 17:23:28 +0530542 "2: \n"
Mark Rutlandab0b9102018-06-21 13:13:13 +0100543 " add.f %L1, %L0, %L3 \n"
544 " adc %H1, %H0, %H3 \n"
545 " scondd %1, [%2] \n"
Vineet Guptace636522015-07-27 17:23:28 +0530546 " bnz 1b \n"
547 "3: \n"
Mark Rutlandab0b9102018-06-21 13:13:13 +0100548 : "=&r"(old), "=&r" (temp)
Vineet Guptace636522015-07-27 17:23:28 +0530549 : "r"(&v->counter), "r"(a), "r"(u)
550 : "cc"); /* memory clobber comes from smp_mb() */
551
552 smp_mb();
553
Mark Rutlandab0b9102018-06-21 13:13:13 +0100554 return old;
Vineet Guptace636522015-07-27 17:23:28 +0530555}
Mark Rutlandab0b9102018-06-21 13:13:13 +0100556#define atomic64_fetch_add_unless atomic64_fetch_add_unless
Vineet Guptace636522015-07-27 17:23:28 +0530557
Vineet Guptace636522015-07-27 17:23:28 +0530558#endif /* !CONFIG_GENERIC_ATOMIC64 */
559
560#endif /* !__ASSEMBLY__ */
Vineet Gupta14e968b2013-01-18 15:12:16 +0530561
562#endif