blob: 351f89e1d15c5e63b16fb66fa625e212c1fb965a [file] [log] [blame]
Arun Sharmaacac43e2011-07-26 16:09:08 -07001/* Atomic operations usable in machine independent code */
Eric Dumazet3f9d35b2010-11-11 14:05:08 -08002#ifndef _LINUX_ATOMIC_H
3#define _LINUX_ATOMIC_H
4#include <asm/atomic.h>
Will Deacon654672d2015-08-06 17:54:37 +01005#include <asm/barrier.h>
6
7/*
8 * Relaxed variants of xchg, cmpxchg and some atomic operations.
9 *
10 * We support four variants:
11 *
12 * - Fully ordered: The default implementation, no suffix required.
13 * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
14 * - Release: Provides RELEASE semantics, _release suffix.
15 * - Relaxed: No ordering guarantees, _relaxed suffix.
16 *
17 * For compound atomics performing both a load and a store, ACQUIRE
18 * semantics apply only to the load and RELEASE semantics only to the
19 * store portion of the operation. Note that a failed cmpxchg_acquire
20 * does -not- imply any memory ordering constraints.
21 *
22 * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
23 */
24
25#ifndef atomic_read_acquire
26#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
27#endif
28
29#ifndef atomic_set_release
30#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
31#endif
32
33/*
34 * The idea here is to build acquire/release variants by adding explicit
35 * barriers on top of the relaxed variant. In the case where the relaxed
36 * variant is already fully ordered, no additional barriers are needed.
Boqun Fenge1ab7f392015-12-15 22:24:14 +080037 *
38 * Besides, if an arch has a special barrier for acquire/release, it could
39 * implement its own __atomic_op_* and use the same framework for building
40 * variants
Will Deacon654672d2015-08-06 17:54:37 +010041 */
Boqun Fenge1ab7f392015-12-15 22:24:14 +080042#ifndef __atomic_op_acquire
Will Deacon654672d2015-08-06 17:54:37 +010043#define __atomic_op_acquire(op, args...) \
44({ \
45 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
46 smp_mb__after_atomic(); \
47 __ret; \
48})
Boqun Fenge1ab7f392015-12-15 22:24:14 +080049#endif
Will Deacon654672d2015-08-06 17:54:37 +010050
Boqun Fenge1ab7f392015-12-15 22:24:14 +080051#ifndef __atomic_op_release
Will Deacon654672d2015-08-06 17:54:37 +010052#define __atomic_op_release(op, args...) \
53({ \
54 smp_mb__before_atomic(); \
55 op##_relaxed(args); \
56})
Boqun Fenge1ab7f392015-12-15 22:24:14 +080057#endif
Will Deacon654672d2015-08-06 17:54:37 +010058
Boqun Fenge1ab7f392015-12-15 22:24:14 +080059#ifndef __atomic_op_fence
Will Deacon654672d2015-08-06 17:54:37 +010060#define __atomic_op_fence(op, args...) \
61({ \
62 typeof(op##_relaxed(args)) __ret; \
63 smp_mb__before_atomic(); \
64 __ret = op##_relaxed(args); \
65 smp_mb__after_atomic(); \
66 __ret; \
67})
Boqun Fenge1ab7f392015-12-15 22:24:14 +080068#endif
Will Deacon654672d2015-08-06 17:54:37 +010069
70/* atomic_add_return_relaxed */
71#ifndef atomic_add_return_relaxed
72#define atomic_add_return_relaxed atomic_add_return
73#define atomic_add_return_acquire atomic_add_return
74#define atomic_add_return_release atomic_add_return
75
76#else /* atomic_add_return_relaxed */
77
78#ifndef atomic_add_return_acquire
79#define atomic_add_return_acquire(...) \
80 __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
81#endif
82
83#ifndef atomic_add_return_release
84#define atomic_add_return_release(...) \
85 __atomic_op_release(atomic_add_return, __VA_ARGS__)
86#endif
87
88#ifndef atomic_add_return
89#define atomic_add_return(...) \
90 __atomic_op_fence(atomic_add_return, __VA_ARGS__)
91#endif
92#endif /* atomic_add_return_relaxed */
93
Davidlohr Bueso63ab7bd2015-09-30 13:03:11 -070094/* atomic_inc_return_relaxed */
95#ifndef atomic_inc_return_relaxed
96#define atomic_inc_return_relaxed atomic_inc_return
97#define atomic_inc_return_acquire atomic_inc_return
98#define atomic_inc_return_release atomic_inc_return
99
100#else /* atomic_inc_return_relaxed */
101
102#ifndef atomic_inc_return_acquire
103#define atomic_inc_return_acquire(...) \
104 __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
105#endif
106
107#ifndef atomic_inc_return_release
108#define atomic_inc_return_release(...) \
109 __atomic_op_release(atomic_inc_return, __VA_ARGS__)
110#endif
111
112#ifndef atomic_inc_return
113#define atomic_inc_return(...) \
114 __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
115#endif
116#endif /* atomic_inc_return_relaxed */
117
Will Deacon654672d2015-08-06 17:54:37 +0100118/* atomic_sub_return_relaxed */
119#ifndef atomic_sub_return_relaxed
120#define atomic_sub_return_relaxed atomic_sub_return
121#define atomic_sub_return_acquire atomic_sub_return
122#define atomic_sub_return_release atomic_sub_return
123
124#else /* atomic_sub_return_relaxed */
125
126#ifndef atomic_sub_return_acquire
127#define atomic_sub_return_acquire(...) \
128 __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
129#endif
130
131#ifndef atomic_sub_return_release
132#define atomic_sub_return_release(...) \
133 __atomic_op_release(atomic_sub_return, __VA_ARGS__)
134#endif
135
136#ifndef atomic_sub_return
137#define atomic_sub_return(...) \
138 __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
139#endif
140#endif /* atomic_sub_return_relaxed */
141
Davidlohr Bueso63ab7bd2015-09-30 13:03:11 -0700142/* atomic_dec_return_relaxed */
143#ifndef atomic_dec_return_relaxed
144#define atomic_dec_return_relaxed atomic_dec_return
145#define atomic_dec_return_acquire atomic_dec_return
146#define atomic_dec_return_release atomic_dec_return
147
148#else /* atomic_dec_return_relaxed */
149
150#ifndef atomic_dec_return_acquire
151#define atomic_dec_return_acquire(...) \
152 __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
153#endif
154
155#ifndef atomic_dec_return_release
156#define atomic_dec_return_release(...) \
157 __atomic_op_release(atomic_dec_return, __VA_ARGS__)
158#endif
159
160#ifndef atomic_dec_return
161#define atomic_dec_return(...) \
162 __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
163#endif
164#endif /* atomic_dec_return_relaxed */
165
Will Deacon654672d2015-08-06 17:54:37 +0100166/* atomic_xchg_relaxed */
167#ifndef atomic_xchg_relaxed
168#define atomic_xchg_relaxed atomic_xchg
169#define atomic_xchg_acquire atomic_xchg
170#define atomic_xchg_release atomic_xchg
171
172#else /* atomic_xchg_relaxed */
173
174#ifndef atomic_xchg_acquire
175#define atomic_xchg_acquire(...) \
176 __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
177#endif
178
179#ifndef atomic_xchg_release
180#define atomic_xchg_release(...) \
181 __atomic_op_release(atomic_xchg, __VA_ARGS__)
182#endif
183
184#ifndef atomic_xchg
185#define atomic_xchg(...) \
186 __atomic_op_fence(atomic_xchg, __VA_ARGS__)
187#endif
188#endif /* atomic_xchg_relaxed */
189
190/* atomic_cmpxchg_relaxed */
191#ifndef atomic_cmpxchg_relaxed
192#define atomic_cmpxchg_relaxed atomic_cmpxchg
193#define atomic_cmpxchg_acquire atomic_cmpxchg
194#define atomic_cmpxchg_release atomic_cmpxchg
195
196#else /* atomic_cmpxchg_relaxed */
197
198#ifndef atomic_cmpxchg_acquire
199#define atomic_cmpxchg_acquire(...) \
200 __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
201#endif
202
203#ifndef atomic_cmpxchg_release
204#define atomic_cmpxchg_release(...) \
205 __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
206#endif
207
208#ifndef atomic_cmpxchg
209#define atomic_cmpxchg(...) \
210 __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
211#endif
212#endif /* atomic_cmpxchg_relaxed */
213
Will Deacon654672d2015-08-06 17:54:37 +0100214/* cmpxchg_relaxed */
215#ifndef cmpxchg_relaxed
216#define cmpxchg_relaxed cmpxchg
217#define cmpxchg_acquire cmpxchg
218#define cmpxchg_release cmpxchg
219
220#else /* cmpxchg_relaxed */
221
222#ifndef cmpxchg_acquire
223#define cmpxchg_acquire(...) \
224 __atomic_op_acquire(cmpxchg, __VA_ARGS__)
225#endif
226
227#ifndef cmpxchg_release
228#define cmpxchg_release(...) \
229 __atomic_op_release(cmpxchg, __VA_ARGS__)
230#endif
231
232#ifndef cmpxchg
233#define cmpxchg(...) \
234 __atomic_op_fence(cmpxchg, __VA_ARGS__)
235#endif
236#endif /* cmpxchg_relaxed */
237
238/* cmpxchg64_relaxed */
239#ifndef cmpxchg64_relaxed
240#define cmpxchg64_relaxed cmpxchg64
241#define cmpxchg64_acquire cmpxchg64
242#define cmpxchg64_release cmpxchg64
243
244#else /* cmpxchg64_relaxed */
245
246#ifndef cmpxchg64_acquire
247#define cmpxchg64_acquire(...) \
248 __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
249#endif
250
251#ifndef cmpxchg64_release
252#define cmpxchg64_release(...) \
253 __atomic_op_release(cmpxchg64, __VA_ARGS__)
254#endif
255
256#ifndef cmpxchg64
257#define cmpxchg64(...) \
258 __atomic_op_fence(cmpxchg64, __VA_ARGS__)
259#endif
260#endif /* cmpxchg64_relaxed */
261
262/* xchg_relaxed */
263#ifndef xchg_relaxed
264#define xchg_relaxed xchg
265#define xchg_acquire xchg
266#define xchg_release xchg
267
268#else /* xchg_relaxed */
269
270#ifndef xchg_acquire
271#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
272#endif
273
274#ifndef xchg_release
275#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
276#endif
277
278#ifndef xchg
279#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
280#endif
281#endif /* xchg_relaxed */
Eric Dumazet3f9d35b2010-11-11 14:05:08 -0800282
283/**
Arun Sharmaf24219b2011-07-26 16:09:07 -0700284 * atomic_add_unless - add unless the number is already a given value
285 * @v: pointer of type atomic_t
286 * @a: the amount to add to v...
287 * @u: ...unless v is equal to u.
288 *
289 * Atomically adds @a to @v, so long as @v was not already @u.
290 * Returns non-zero if @v was not @u, and zero otherwise.
291 */
292static inline int atomic_add_unless(atomic_t *v, int a, int u)
293{
294 return __atomic_add_unless(v, a, u) != u;
295}
296
297/**
Arun Sharma600634972011-07-26 16:09:06 -0700298 * atomic_inc_not_zero - increment unless the number is zero
299 * @v: pointer of type atomic_t
300 *
301 * Atomically increments @v by 1, so long as @v is non-zero.
302 * Returns non-zero if @v was non-zero, and zero otherwise.
303 */
Anton Blanchardb1ada602012-02-29 21:09:53 +0000304#ifndef atomic_inc_not_zero
Arun Sharma600634972011-07-26 16:09:06 -0700305#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
Anton Blanchardb1ada602012-02-29 21:09:53 +0000306#endif
Arun Sharma600634972011-07-26 16:09:06 -0700307
Peter Zijlstrade9e4322015-04-24 01:12:32 +0200308#ifndef atomic_andnot
309static inline void atomic_andnot(int i, atomic_t *v)
310{
311 atomic_and(~i, v);
312}
313#endif
314
315static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
316{
317 atomic_andnot(mask, v);
318}
319
320static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
321{
322 atomic_or(mask, v);
323}
324
Arun Sharma600634972011-07-26 16:09:06 -0700325/**
Eric Dumazet3f9d35b2010-11-11 14:05:08 -0800326 * atomic_inc_not_zero_hint - increment if not null
327 * @v: pointer of type atomic_t
328 * @hint: probable value of the atomic before the increment
329 *
330 * This version of atomic_inc_not_zero() gives a hint of probable
331 * value of the atomic. This helps processor to not read the memory
332 * before doing the atomic read/modify/write cycle, lowering
333 * number of bus transactions on some arches.
334 *
335 * Returns: 0 if increment was not done, 1 otherwise.
336 */
337#ifndef atomic_inc_not_zero_hint
338static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
339{
340 int val, c = hint;
341
342 /* sanity test, should be removed by compiler if hint is a constant */
343 if (!hint)
344 return atomic_inc_not_zero(v);
345
346 do {
347 val = atomic_cmpxchg(v, c, c + 1);
348 if (val == c)
349 return 1;
350 c = val;
351 } while (c);
352
353 return 0;
354}
355#endif
356
Al Viro07b8ce12011-06-20 10:52:57 -0400357#ifndef atomic_inc_unless_negative
358static inline int atomic_inc_unless_negative(atomic_t *p)
359{
360 int v, v1;
361 for (v = 0; v >= 0; v = v1) {
362 v1 = atomic_cmpxchg(p, v, v + 1);
363 if (likely(v1 == v))
364 return 1;
365 }
366 return 0;
367}
368#endif
369
370#ifndef atomic_dec_unless_positive
371static inline int atomic_dec_unless_positive(atomic_t *p)
372{
373 int v, v1;
374 for (v = 0; v <= 0; v = v1) {
375 v1 = atomic_cmpxchg(p, v, v - 1);
376 if (likely(v1 == v))
377 return 1;
378 }
379 return 0;
380}
381#endif
382
Shaohua Lie79bee22012-10-08 16:32:18 -0700383/*
384 * atomic_dec_if_positive - decrement by 1 if old value positive
385 * @v: pointer of type atomic_t
386 *
387 * The function returns the old value of *v minus 1, even if
388 * the atomic variable, v, was not decremented.
389 */
390#ifndef atomic_dec_if_positive
391static inline int atomic_dec_if_positive(atomic_t *v)
392{
393 int c, old, dec;
394 c = atomic_read(v);
395 for (;;) {
396 dec = c - 1;
397 if (unlikely(dec < 0))
398 break;
399 old = atomic_cmpxchg((v), c, dec);
400 if (likely(old == c))
401 break;
402 c = old;
403 }
404 return dec;
405}
406#endif
407
Frederic Weisbecker5fd7a092015-08-11 18:03:23 +0200408/**
Frederic Weisbecker5acba712016-03-24 15:37:59 +0100409 * atomic_fetch_or - perform *p |= mask and return old value of *p
Frederic Weisbecker5acba712016-03-24 15:37:59 +0100410 * @mask: mask to OR on the atomic_t
Peter Zijlstraa1cc5bc2016-04-21 20:35:25 +0200411 * @p: pointer to atomic_t
Frederic Weisbecker5acba712016-03-24 15:37:59 +0100412 */
413#ifndef atomic_fetch_or
Peter Zijlstraa1cc5bc2016-04-21 20:35:25 +0200414static inline int atomic_fetch_or(int mask, atomic_t *p)
Frederic Weisbecker5acba712016-03-24 15:37:59 +0100415{
416 int old, val = atomic_read(p);
417
418 for (;;) {
419 old = atomic_cmpxchg(p, val, val | mask);
420 if (old == val)
421 break;
422 val = old;
423 }
424
425 return old;
426}
427#endif
428
Arun Sharma78477772011-07-26 16:09:08 -0700429#ifdef CONFIG_GENERIC_ATOMIC64
430#include <asm-generic/atomic64.h>
431#endif
Peter Zijlstrade9e4322015-04-24 01:12:32 +0200432
Peter Zijlstrae1213332016-04-18 00:52:13 +0200433#ifndef atomic64_read_acquire
434#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
435#endif
436
437#ifndef atomic64_set_release
438#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
439#endif
440
441/* atomic64_add_return_relaxed */
442#ifndef atomic64_add_return_relaxed
443#define atomic64_add_return_relaxed atomic64_add_return
444#define atomic64_add_return_acquire atomic64_add_return
445#define atomic64_add_return_release atomic64_add_return
446
447#else /* atomic64_add_return_relaxed */
448
449#ifndef atomic64_add_return_acquire
450#define atomic64_add_return_acquire(...) \
451 __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
452#endif
453
454#ifndef atomic64_add_return_release
455#define atomic64_add_return_release(...) \
456 __atomic_op_release(atomic64_add_return, __VA_ARGS__)
457#endif
458
459#ifndef atomic64_add_return
460#define atomic64_add_return(...) \
461 __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
462#endif
463#endif /* atomic64_add_return_relaxed */
464
465/* atomic64_inc_return_relaxed */
466#ifndef atomic64_inc_return_relaxed
467#define atomic64_inc_return_relaxed atomic64_inc_return
468#define atomic64_inc_return_acquire atomic64_inc_return
469#define atomic64_inc_return_release atomic64_inc_return
470
471#else /* atomic64_inc_return_relaxed */
472
473#ifndef atomic64_inc_return_acquire
474#define atomic64_inc_return_acquire(...) \
475 __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
476#endif
477
478#ifndef atomic64_inc_return_release
479#define atomic64_inc_return_release(...) \
480 __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
481#endif
482
483#ifndef atomic64_inc_return
484#define atomic64_inc_return(...) \
485 __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
486#endif
487#endif /* atomic64_inc_return_relaxed */
488
489
490/* atomic64_sub_return_relaxed */
491#ifndef atomic64_sub_return_relaxed
492#define atomic64_sub_return_relaxed atomic64_sub_return
493#define atomic64_sub_return_acquire atomic64_sub_return
494#define atomic64_sub_return_release atomic64_sub_return
495
496#else /* atomic64_sub_return_relaxed */
497
498#ifndef atomic64_sub_return_acquire
499#define atomic64_sub_return_acquire(...) \
500 __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
501#endif
502
503#ifndef atomic64_sub_return_release
504#define atomic64_sub_return_release(...) \
505 __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
506#endif
507
508#ifndef atomic64_sub_return
509#define atomic64_sub_return(...) \
510 __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
511#endif
512#endif /* atomic64_sub_return_relaxed */
513
514/* atomic64_dec_return_relaxed */
515#ifndef atomic64_dec_return_relaxed
516#define atomic64_dec_return_relaxed atomic64_dec_return
517#define atomic64_dec_return_acquire atomic64_dec_return
518#define atomic64_dec_return_release atomic64_dec_return
519
520#else /* atomic64_dec_return_relaxed */
521
522#ifndef atomic64_dec_return_acquire
523#define atomic64_dec_return_acquire(...) \
524 __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
525#endif
526
527#ifndef atomic64_dec_return_release
528#define atomic64_dec_return_release(...) \
529 __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
530#endif
531
532#ifndef atomic64_dec_return
533#define atomic64_dec_return(...) \
534 __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
535#endif
536#endif /* atomic64_dec_return_relaxed */
537
538/* atomic64_xchg_relaxed */
539#ifndef atomic64_xchg_relaxed
540#define atomic64_xchg_relaxed atomic64_xchg
541#define atomic64_xchg_acquire atomic64_xchg
542#define atomic64_xchg_release atomic64_xchg
543
544#else /* atomic64_xchg_relaxed */
545
546#ifndef atomic64_xchg_acquire
547#define atomic64_xchg_acquire(...) \
548 __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
549#endif
550
551#ifndef atomic64_xchg_release
552#define atomic64_xchg_release(...) \
553 __atomic_op_release(atomic64_xchg, __VA_ARGS__)
554#endif
555
556#ifndef atomic64_xchg
557#define atomic64_xchg(...) \
558 __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
559#endif
560#endif /* atomic64_xchg_relaxed */
561
562/* atomic64_cmpxchg_relaxed */
563#ifndef atomic64_cmpxchg_relaxed
564#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
565#define atomic64_cmpxchg_acquire atomic64_cmpxchg
566#define atomic64_cmpxchg_release atomic64_cmpxchg
567
568#else /* atomic64_cmpxchg_relaxed */
569
570#ifndef atomic64_cmpxchg_acquire
571#define atomic64_cmpxchg_acquire(...) \
572 __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
573#endif
574
575#ifndef atomic64_cmpxchg_release
576#define atomic64_cmpxchg_release(...) \
577 __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
578#endif
579
580#ifndef atomic64_cmpxchg
581#define atomic64_cmpxchg(...) \
582 __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
583#endif
584#endif /* atomic64_cmpxchg_relaxed */
585
Peter Zijlstrade9e4322015-04-24 01:12:32 +0200586#ifndef atomic64_andnot
587static inline void atomic64_andnot(long long i, atomic64_t *v)
588{
589 atomic64_and(~i, v);
590}
591#endif
592
Peter Zijlstra90fe6512015-09-18 15:04:59 +0200593#include <asm-generic/atomic-long.h>
594
Eric Dumazet3f9d35b2010-11-11 14:05:08 -0800595#endif /* _LINUX_ATOMIC_H */