blob: 17cf1c657cb39bf763d3464100a11084e59e72f9 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Vineet Gupta14e968b2013-01-18 15:12:16 +05302/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
Vineet Gupta14e968b2013-01-18 15:12:16 +05304 */
5
6#ifndef _ASM_ARC_ATOMIC_H
7#define _ASM_ARC_ATOMIC_H
8
Vineet Gupta14e968b2013-01-18 15:12:16 +05309#ifndef __ASSEMBLY__
10
11#include <linux/types.h>
12#include <linux/compiler.h>
13#include <asm/cmpxchg.h>
14#include <asm/barrier.h>
15#include <asm/smp.h>
16
Noam Camus6492f092017-04-04 11:00:41 +030017#define ATOMIC_INIT(i) { (i) }
18
Noam Camusa5a10d92015-05-16 17:49:35 +030019#ifndef CONFIG_ARC_PLAT_EZNPS
20
Peter Zijlstra62e8a322015-09-18 11:13:10 +020021#define atomic_read(v) READ_ONCE((v)->counter)
Vineet Gupta14e968b2013-01-18 15:12:16 +053022
23#ifdef CONFIG_ARC_HAS_LLSC
24
Peter Zijlstra62e8a322015-09-18 11:13:10 +020025#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
Vineet Gupta14e968b2013-01-18 15:12:16 +053026
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010027#define ATOMIC_OP(op, c_op, asm_op) \
28static inline void atomic_##op(int i, atomic_t *v) \
29{ \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053030 unsigned int val; \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010031 \
32 __asm__ __volatile__( \
Vineet Gupta8ac06652015-07-21 12:05:42 +030033 "1: llock %[val], [%[ctr]] \n" \
34 " " #asm_op " %[val], %[val], %[i] \n" \
35 " scond %[val], [%[ctr]] \n" \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053036 " bnz 1b \n" \
Vineet Gupta8ac06652015-07-21 12:05:42 +030037 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
38 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
39 [i] "ir" (i) \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010040 : "cc"); \
41} \
Vineet Gupta14e968b2013-01-18 15:12:16 +053042
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010043#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
44static inline int atomic_##op##_return(int i, atomic_t *v) \
45{ \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053046 unsigned int val; \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010047 \
Vineet Gupta2576c282014-11-20 15:42:09 +053048 /* \
49 * Explicit full memory barrier needed before/after as \
50 * LLOCK/SCOND thmeselves don't provide any such semantics \
51 */ \
52 smp_mb(); \
53 \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010054 __asm__ __volatile__( \
Vineet Gupta8ac06652015-07-21 12:05:42 +030055 "1: llock %[val], [%[ctr]] \n" \
56 " " #asm_op " %[val], %[val], %[i] \n" \
57 " scond %[val], [%[ctr]] \n" \
Vineet Guptaed6aefe2016-05-31 16:35:09 +053058 " bnz 1b \n" \
Vineet Gupta8ac06652015-07-21 12:05:42 +030059 : [val] "=&r" (val) \
60 : [ctr] "r" (&v->counter), \
61 [i] "ir" (i) \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +010062 : "cc"); \
63 \
Vineet Gupta2576c282014-11-20 15:42:09 +053064 smp_mb(); \
65 \
Vineet Gupta8ac06652015-07-21 12:05:42 +030066 return val; \
Vineet Gupta14e968b2013-01-18 15:12:16 +053067}
68
Peter Zijlstrafbffe8922016-04-18 01:16:09 +020069#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
70static inline int atomic_fetch_##op(int i, atomic_t *v) \
71{ \
72 unsigned int val, orig; \
Peter Zijlstrafbffe8922016-04-18 01:16:09 +020073 \
74 /* \
75 * Explicit full memory barrier needed before/after as \
76 * LLOCK/SCOND thmeselves don't provide any such semantics \
77 */ \
78 smp_mb(); \
79 \
80 __asm__ __volatile__( \
81 "1: llock %[orig], [%[ctr]] \n" \
82 " " #asm_op " %[val], %[orig], %[i] \n" \
83 " scond %[val], [%[ctr]] \n" \
Will Deacon3fcbb822018-08-30 13:52:38 -070084 " bnz 1b \n" \
Peter Zijlstrafbffe8922016-04-18 01:16:09 +020085 : [val] "=&r" (val), \
86 [orig] "=&r" (orig) \
Peter Zijlstrafbffe8922016-04-18 01:16:09 +020087 : [ctr] "r" (&v->counter), \
88 [i] "ir" (i) \
89 : "cc"); \
90 \
91 smp_mb(); \
92 \
93 return orig; \
94}
95
Vineet Gupta14e968b2013-01-18 15:12:16 +053096#else /* !CONFIG_ARC_HAS_LLSC */
97
98#ifndef CONFIG_SMP
99
100 /* violating atomic_xxx API locking protocol in UP for optimization sake */
Peter Zijlstra62e8a322015-09-18 11:13:10 +0200101#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
Vineet Gupta14e968b2013-01-18 15:12:16 +0530102
103#else
104
105static inline void atomic_set(atomic_t *v, int i)
106{
107 /*
108 * Independent of hardware support, all of the atomic_xxx() APIs need
109 * to follow the same locking rules to make sure that a "hardware"
110 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
111 * sequence
112 *
113 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
114 * requires the locking.
115 */
116 unsigned long flags;
117
118 atomic_ops_lock(flags);
Peter Zijlstra62e8a322015-09-18 11:13:10 +0200119 WRITE_ONCE(v->counter, i);
Vineet Gupta14e968b2013-01-18 15:12:16 +0530120 atomic_ops_unlock(flags);
121}
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100122
Peter Zijlstra9d664c02017-06-09 13:05:06 +0200123#define atomic_set_release(v, i) atomic_set((v), (i))
124
Vineet Gupta14e968b2013-01-18 15:12:16 +0530125#endif
126
127/*
128 * Non hardware assisted Atomic-R-M-W
129 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
130 */
131
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100132#define ATOMIC_OP(op, c_op, asm_op) \
133static inline void atomic_##op(int i, atomic_t *v) \
134{ \
135 unsigned long flags; \
136 \
137 atomic_ops_lock(flags); \
138 v->counter c_op i; \
139 atomic_ops_unlock(flags); \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530140}
141
Vineet Guptadaaf40e2015-05-10 12:04:01 +0530142#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100143static inline int atomic_##op##_return(int i, atomic_t *v) \
144{ \
145 unsigned long flags; \
146 unsigned long temp; \
147 \
Vineet Gupta2576c282014-11-20 15:42:09 +0530148 /* \
149 * spin lock/unlock provides the needed smp_mb() before/after \
150 */ \
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100151 atomic_ops_lock(flags); \
152 temp = v->counter; \
153 temp c_op i; \
154 v->counter = temp; \
155 atomic_ops_unlock(flags); \
156 \
157 return temp; \
Vineet Gupta14e968b2013-01-18 15:12:16 +0530158}
159
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200160#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
161static inline int atomic_fetch_##op(int i, atomic_t *v) \
162{ \
163 unsigned long flags; \
164 unsigned long orig; \
165 \
166 /* \
167 * spin lock/unlock provides the needed smp_mb() before/after \
168 */ \
169 atomic_ops_lock(flags); \
170 orig = v->counter; \
171 v->counter c_op i; \
172 atomic_ops_unlock(flags); \
173 \
174 return orig; \
175}
176
Vineet Gupta14e968b2013-01-18 15:12:16 +0530177#endif /* !CONFIG_ARC_HAS_LLSC */
178
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100179#define ATOMIC_OPS(op, c_op, asm_op) \
180 ATOMIC_OP(op, c_op, asm_op) \
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200181 ATOMIC_OP_RETURN(op, c_op, asm_op) \
182 ATOMIC_FETCH_OP(op, c_op, asm_op)
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100183
184ATOMIC_OPS(add, +=, add)
185ATOMIC_OPS(sub, -=, sub)
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100186
Mark Rutland7cc7eaa2018-06-21 13:13:21 +0100187#define atomic_andnot atomic_andnot
188#define atomic_fetch_andnot atomic_fetch_andnot
Peter Zijlstracda7e412014-04-23 20:06:20 +0200189
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200190#undef ATOMIC_OPS
191#define ATOMIC_OPS(op, c_op, asm_op) \
192 ATOMIC_OP(op, c_op, asm_op) \
193 ATOMIC_FETCH_OP(op, c_op, asm_op)
194
195ATOMIC_OPS(and, &=, and)
196ATOMIC_OPS(andnot, &= ~, bic)
197ATOMIC_OPS(or, |=, or)
198ATOMIC_OPS(xor, ^=, xor)
Peter Zijlstraf7d11e92014-03-23 16:29:31 +0100199
Noam Camusa5a10d92015-05-16 17:49:35 +0300200#else /* CONFIG_ARC_PLAT_EZNPS */
201
202static inline int atomic_read(const atomic_t *v)
203{
204 int temp;
205
206 __asm__ __volatile__(
207 " ld.di %0, [%1]"
208 : "=r"(temp)
209 : "r"(&v->counter)
210 : "memory");
211 return temp;
212}
213
214static inline void atomic_set(atomic_t *v, int i)
215{
216 __asm__ __volatile__(
217 " st.di %0,[%1]"
218 :
219 : "r"(i), "r"(&v->counter)
220 : "memory");
221}
222
223#define ATOMIC_OP(op, c_op, asm_op) \
224static inline void atomic_##op(int i, atomic_t *v) \
225{ \
226 __asm__ __volatile__( \
227 " mov r2, %0\n" \
228 " mov r3, %1\n" \
229 " .word %2\n" \
230 : \
231 : "r"(i), "r"(&v->counter), "i"(asm_op) \
232 : "r2", "r3", "memory"); \
233} \
234
235#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
236static inline int atomic_##op##_return(int i, atomic_t *v) \
237{ \
238 unsigned int temp = i; \
239 \
240 /* Explicit full memory barrier needed before/after */ \
241 smp_mb(); \
242 \
243 __asm__ __volatile__( \
244 " mov r2, %0\n" \
245 " mov r3, %1\n" \
246 " .word %2\n" \
247 " mov %0, r2" \
248 : "+r"(temp) \
249 : "r"(&v->counter), "i"(asm_op) \
250 : "r2", "r3", "memory"); \
251 \
252 smp_mb(); \
253 \
254 temp c_op i; \
255 \
256 return temp; \
257}
258
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200259#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
260static inline int atomic_fetch_##op(int i, atomic_t *v) \
261{ \
262 unsigned int temp = i; \
263 \
264 /* Explicit full memory barrier needed before/after */ \
265 smp_mb(); \
266 \
267 __asm__ __volatile__( \
268 " mov r2, %0\n" \
269 " mov r3, %1\n" \
270 " .word %2\n" \
271 " mov %0, r2" \
272 : "+r"(temp) \
273 : "r"(&v->counter), "i"(asm_op) \
274 : "r2", "r3", "memory"); \
275 \
276 smp_mb(); \
277 \
278 return temp; \
279}
280
Noam Camusa5a10d92015-05-16 17:49:35 +0300281#define ATOMIC_OPS(op, c_op, asm_op) \
282 ATOMIC_OP(op, c_op, asm_op) \
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200283 ATOMIC_OP_RETURN(op, c_op, asm_op) \
284 ATOMIC_FETCH_OP(op, c_op, asm_op)
Noam Camusa5a10d92015-05-16 17:49:35 +0300285
286ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
287#define atomic_sub(i, v) atomic_add(-(i), (v))
288#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
Noam Camusce0f4932016-09-19 08:34:40 +0300289#define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v))
Noam Camusa5a10d92015-05-16 17:49:35 +0300290
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200291#undef ATOMIC_OPS
292#define ATOMIC_OPS(op, c_op, asm_op) \
293 ATOMIC_OP(op, c_op, asm_op) \
294 ATOMIC_FETCH_OP(op, c_op, asm_op)
295
296ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200297ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
298ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
Noam Camusa5a10d92015-05-16 17:49:35 +0300299
300#endif /* CONFIG_ARC_PLAT_EZNPS */
301
302#undef ATOMIC_OPS
Peter Zijlstrafbffe8922016-04-18 01:16:09 +0200303#undef ATOMIC_FETCH_OP
Noam Camusa5a10d92015-05-16 17:49:35 +0300304#undef ATOMIC_OP_RETURN
305#undef ATOMIC_OP
306
Vineet Guptace636522015-07-27 17:23:28 +0530307#ifdef CONFIG_GENERIC_ATOMIC64
Vineet Gupta14e968b2013-01-18 15:12:16 +0530308
309#include <asm-generic/atomic64.h>
310
Vineet Guptace636522015-07-27 17:23:28 +0530311#else /* Kconfig ensures this is only enabled with needed h/w assist */
312
313/*
314 * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
315 * - The address HAS to be 64-bit aligned
316 * - There are 2 semantics involved here:
317 * = exclusive implies no interim update between load/store to same addr
318 * = both words are observed/updated together: this is guaranteed even
319 * for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
320 * is NOT required to use LLOCKD+SCONDD, STD suffices
321 */
322
323typedef struct {
324 aligned_u64 counter;
325} atomic64_t;
326
327#define ATOMIC64_INIT(a) { (a) }
328
329static inline long long atomic64_read(const atomic64_t *v)
330{
331 unsigned long long val;
332
333 __asm__ __volatile__(
334 " ldd %0, [%1] \n"
335 : "=r"(val)
336 : "r"(&v->counter));
337
338 return val;
339}
340
341static inline void atomic64_set(atomic64_t *v, long long a)
342{
343 /*
344 * This could have been a simple assignment in "C" but would need
345 * explicit volatile. Otherwise gcc optimizers could elide the store
346 * which borked atomic64 self-test
347 * In the inline asm version, memory clobber needed for exact same
348 * reason, to tell gcc about the store.
349 *
350 * This however is not needed for sibling atomic64_add() etc since both
351 * load/store are explicitly done in inline asm. As long as API is used
352 * for each access, gcc has no way to optimize away any load/store
353 */
354 __asm__ __volatile__(
355 " std %0, [%1] \n"
356 :
357 : "r"(a), "r"(&v->counter)
358 : "memory");
359}
360
361#define ATOMIC64_OP(op, op1, op2) \
362static inline void atomic64_##op(long long a, atomic64_t *v) \
363{ \
364 unsigned long long val; \
365 \
366 __asm__ __volatile__( \
367 "1: \n" \
368 " llockd %0, [%1] \n" \
369 " " #op1 " %L0, %L0, %L2 \n" \
370 " " #op2 " %H0, %H0, %H2 \n" \
371 " scondd %0, [%1] \n" \
372 " bnz 1b \n" \
373 : "=&r"(val) \
374 : "r"(&v->counter), "ir"(a) \
375 : "cc"); \
376} \
377
378#define ATOMIC64_OP_RETURN(op, op1, op2) \
379static inline long long atomic64_##op##_return(long long a, atomic64_t *v) \
380{ \
381 unsigned long long val; \
382 \
383 smp_mb(); \
384 \
385 __asm__ __volatile__( \
386 "1: \n" \
387 " llockd %0, [%1] \n" \
388 " " #op1 " %L0, %L0, %L2 \n" \
389 " " #op2 " %H0, %H0, %H2 \n" \
390 " scondd %0, [%1] \n" \
391 " bnz 1b \n" \
392 : [val] "=&r"(val) \
393 : "r"(&v->counter), "ir"(a) \
394 : "cc"); /* memory clobber comes from smp_mb() */ \
395 \
396 smp_mb(); \
397 \
398 return val; \
399}
400
401#define ATOMIC64_FETCH_OP(op, op1, op2) \
402static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \
403{ \
404 unsigned long long val, orig; \
405 \
406 smp_mb(); \
407 \
408 __asm__ __volatile__( \
409 "1: \n" \
410 " llockd %0, [%2] \n" \
411 " " #op1 " %L1, %L0, %L3 \n" \
412 " " #op2 " %H1, %H0, %H3 \n" \
413 " scondd %1, [%2] \n" \
414 " bnz 1b \n" \
415 : "=&r"(orig), "=&r"(val) \
416 : "r"(&v->counter), "ir"(a) \
417 : "cc"); /* memory clobber comes from smp_mb() */ \
418 \
419 smp_mb(); \
420 \
421 return orig; \
422}
423
424#define ATOMIC64_OPS(op, op1, op2) \
425 ATOMIC64_OP(op, op1, op2) \
426 ATOMIC64_OP_RETURN(op, op1, op2) \
427 ATOMIC64_FETCH_OP(op, op1, op2)
428
Mark Rutland7cc7eaa2018-06-21 13:13:21 +0100429#define atomic64_andnot atomic64_andnot
430#define atomic64_fetch_andnot atomic64_fetch_andnot
Vineet Guptace636522015-07-27 17:23:28 +0530431
432ATOMIC64_OPS(add, add.f, adc)
433ATOMIC64_OPS(sub, sub.f, sbc)
434ATOMIC64_OPS(and, and, and)
435ATOMIC64_OPS(andnot, bic, bic)
436ATOMIC64_OPS(or, or, or)
437ATOMIC64_OPS(xor, xor, xor)
438
439#undef ATOMIC64_OPS
440#undef ATOMIC64_FETCH_OP
441#undef ATOMIC64_OP_RETURN
442#undef ATOMIC64_OP
443
444static inline long long
445atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new)
446{
447 long long prev;
448
449 smp_mb();
450
451 __asm__ __volatile__(
452 "1: llockd %0, [%1] \n"
453 " brne %L0, %L2, 2f \n"
454 " brne %H0, %H2, 2f \n"
455 " scondd %3, [%1] \n"
456 " bnz 1b \n"
457 "2: \n"
458 : "=&r"(prev)
459 : "r"(ptr), "ir"(expected), "r"(new)
460 : "cc"); /* memory clobber comes from smp_mb() */
461
462 smp_mb();
463
464 return prev;
465}
466
467static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
468{
469 long long prev;
470
471 smp_mb();
472
473 __asm__ __volatile__(
474 "1: llockd %0, [%1] \n"
475 " scondd %2, [%1] \n"
476 " bnz 1b \n"
477 "2: \n"
478 : "=&r"(prev)
479 : "r"(ptr), "r"(new)
480 : "cc"); /* memory clobber comes from smp_mb() */
481
482 smp_mb();
483
484 return prev;
485}
486
487/**
488 * atomic64_dec_if_positive - decrement by 1 if old value positive
489 * @v: pointer of type atomic64_t
490 *
491 * The function returns the old value of *v minus 1, even if
492 * the atomic variable, v, was not decremented.
493 */
494
495static inline long long atomic64_dec_if_positive(atomic64_t *v)
496{
497 long long val;
498
499 smp_mb();
500
501 __asm__ __volatile__(
502 "1: llockd %0, [%1] \n"
503 " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n"
504 " sub.c %H0, %H0, 1 # if C set, w1 - 1\n"
505 " brlt %H0, 0, 2f \n"
506 " scondd %0, [%1] \n"
507 " bnz 1b \n"
508 "2: \n"
509 : "=&r"(val)
510 : "r"(&v->counter)
511 : "cc"); /* memory clobber comes from smp_mb() */
512
513 smp_mb();
514
515 return val;
516}
Mark Rutlandb3a2a052018-06-21 13:13:20 +0100517#define atomic64_dec_if_positive atomic64_dec_if_positive
Vineet Guptace636522015-07-27 17:23:28 +0530518
519/**
Mark Rutlandab0b9102018-06-21 13:13:13 +0100520 * atomic64_fetch_add_unless - add unless the number is a given value
Vineet Guptace636522015-07-27 17:23:28 +0530521 * @v: pointer of type atomic64_t
522 * @a: the amount to add to v...
523 * @u: ...unless v is equal to u.
524 *
Mark Rutlandab0b9102018-06-21 13:13:13 +0100525 * Atomically adds @a to @v, if it was not @u.
526 * Returns the old value of @v
Vineet Guptace636522015-07-27 17:23:28 +0530527 */
Mark Rutlandab0b9102018-06-21 13:13:13 +0100528static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
529 long long u)
Vineet Guptace636522015-07-27 17:23:28 +0530530{
Mark Rutlandab0b9102018-06-21 13:13:13 +0100531 long long old, temp;
Vineet Guptace636522015-07-27 17:23:28 +0530532
533 smp_mb();
534
535 __asm__ __volatile__(
536 "1: llockd %0, [%2] \n"
Vineet Guptace636522015-07-27 17:23:28 +0530537 " brne %L0, %L4, 2f # continue to add since v != u \n"
538 " breq.d %H0, %H4, 3f # return since v == u \n"
Vineet Guptace636522015-07-27 17:23:28 +0530539 "2: \n"
Mark Rutlandab0b9102018-06-21 13:13:13 +0100540 " add.f %L1, %L0, %L3 \n"
541 " adc %H1, %H0, %H3 \n"
542 " scondd %1, [%2] \n"
Vineet Guptace636522015-07-27 17:23:28 +0530543 " bnz 1b \n"
544 "3: \n"
Mark Rutlandab0b9102018-06-21 13:13:13 +0100545 : "=&r"(old), "=&r" (temp)
Vineet Guptace636522015-07-27 17:23:28 +0530546 : "r"(&v->counter), "r"(a), "r"(u)
547 : "cc"); /* memory clobber comes from smp_mb() */
548
549 smp_mb();
550
Mark Rutlandab0b9102018-06-21 13:13:13 +0100551 return old;
Vineet Guptace636522015-07-27 17:23:28 +0530552}
Mark Rutlandab0b9102018-06-21 13:13:13 +0100553#define atomic64_fetch_add_unless atomic64_fetch_add_unless
Vineet Guptace636522015-07-27 17:23:28 +0530554
Vineet Guptace636522015-07-27 17:23:28 +0530555#endif /* !CONFIG_GENERIC_ATOMIC64 */
556
557#endif /* !__ASSEMBLY__ */
Vineet Gupta14e968b2013-01-18 15:12:16 +0530558
559#endif