blob: e326469bb9d6d9f4478075ca852aba826d1b5590 [file] [log] [blame]
Arun Sharmaacac43e2011-07-26 16:09:08 -07001/* Atomic operations usable in machine independent code */
Eric Dumazet3f9d35b2010-11-11 14:05:08 -08002#ifndef _LINUX_ATOMIC_H
3#define _LINUX_ATOMIC_H
4#include <asm/atomic.h>
Will Deacon654672d2015-08-06 17:54:37 +01005#include <asm/barrier.h>
6
Peter Zijlstrae3e72ab2015-09-18 13:22:52 +02007#ifndef atomic_read_ctrl
8static inline int atomic_read_ctrl(const atomic_t *v)
9{
10 int val = atomic_read(v);
11 smp_read_barrier_depends(); /* Enforce control dependency. */
12 return val;
13}
14#endif
15
Will Deacon654672d2015-08-06 17:54:37 +010016/*
17 * Relaxed variants of xchg, cmpxchg and some atomic operations.
18 *
19 * We support four variants:
20 *
21 * - Fully ordered: The default implementation, no suffix required.
22 * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
23 * - Release: Provides RELEASE semantics, _release suffix.
24 * - Relaxed: No ordering guarantees, _relaxed suffix.
25 *
26 * For compound atomics performing both a load and a store, ACQUIRE
27 * semantics apply only to the load and RELEASE semantics only to the
28 * store portion of the operation. Note that a failed cmpxchg_acquire
29 * does -not- imply any memory ordering constraints.
30 *
31 * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
32 */
33
34#ifndef atomic_read_acquire
35#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
36#endif
37
38#ifndef atomic_set_release
39#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
40#endif
41
42/*
43 * The idea here is to build acquire/release variants by adding explicit
44 * barriers on top of the relaxed variant. In the case where the relaxed
45 * variant is already fully ordered, no additional barriers are needed.
46 */
47#define __atomic_op_acquire(op, args...) \
48({ \
49 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
50 smp_mb__after_atomic(); \
51 __ret; \
52})
53
54#define __atomic_op_release(op, args...) \
55({ \
56 smp_mb__before_atomic(); \
57 op##_relaxed(args); \
58})
59
60#define __atomic_op_fence(op, args...) \
61({ \
62 typeof(op##_relaxed(args)) __ret; \
63 smp_mb__before_atomic(); \
64 __ret = op##_relaxed(args); \
65 smp_mb__after_atomic(); \
66 __ret; \
67})
68
69/* atomic_add_return_relaxed */
70#ifndef atomic_add_return_relaxed
71#define atomic_add_return_relaxed atomic_add_return
72#define atomic_add_return_acquire atomic_add_return
73#define atomic_add_return_release atomic_add_return
74
75#else /* atomic_add_return_relaxed */
76
77#ifndef atomic_add_return_acquire
78#define atomic_add_return_acquire(...) \
79 __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
80#endif
81
82#ifndef atomic_add_return_release
83#define atomic_add_return_release(...) \
84 __atomic_op_release(atomic_add_return, __VA_ARGS__)
85#endif
86
87#ifndef atomic_add_return
88#define atomic_add_return(...) \
89 __atomic_op_fence(atomic_add_return, __VA_ARGS__)
90#endif
91#endif /* atomic_add_return_relaxed */
92
93/* atomic_sub_return_relaxed */
94#ifndef atomic_sub_return_relaxed
95#define atomic_sub_return_relaxed atomic_sub_return
96#define atomic_sub_return_acquire atomic_sub_return
97#define atomic_sub_return_release atomic_sub_return
98
99#else /* atomic_sub_return_relaxed */
100
101#ifndef atomic_sub_return_acquire
102#define atomic_sub_return_acquire(...) \
103 __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
104#endif
105
106#ifndef atomic_sub_return_release
107#define atomic_sub_return_release(...) \
108 __atomic_op_release(atomic_sub_return, __VA_ARGS__)
109#endif
110
111#ifndef atomic_sub_return
112#define atomic_sub_return(...) \
113 __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
114#endif
115#endif /* atomic_sub_return_relaxed */
116
117/* atomic_xchg_relaxed */
118#ifndef atomic_xchg_relaxed
119#define atomic_xchg_relaxed atomic_xchg
120#define atomic_xchg_acquire atomic_xchg
121#define atomic_xchg_release atomic_xchg
122
123#else /* atomic_xchg_relaxed */
124
125#ifndef atomic_xchg_acquire
126#define atomic_xchg_acquire(...) \
127 __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
128#endif
129
130#ifndef atomic_xchg_release
131#define atomic_xchg_release(...) \
132 __atomic_op_release(atomic_xchg, __VA_ARGS__)
133#endif
134
135#ifndef atomic_xchg
136#define atomic_xchg(...) \
137 __atomic_op_fence(atomic_xchg, __VA_ARGS__)
138#endif
139#endif /* atomic_xchg_relaxed */
140
141/* atomic_cmpxchg_relaxed */
142#ifndef atomic_cmpxchg_relaxed
143#define atomic_cmpxchg_relaxed atomic_cmpxchg
144#define atomic_cmpxchg_acquire atomic_cmpxchg
145#define atomic_cmpxchg_release atomic_cmpxchg
146
147#else /* atomic_cmpxchg_relaxed */
148
149#ifndef atomic_cmpxchg_acquire
150#define atomic_cmpxchg_acquire(...) \
151 __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
152#endif
153
154#ifndef atomic_cmpxchg_release
155#define atomic_cmpxchg_release(...) \
156 __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
157#endif
158
159#ifndef atomic_cmpxchg
160#define atomic_cmpxchg(...) \
161 __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
162#endif
163#endif /* atomic_cmpxchg_relaxed */
164
165#ifndef atomic64_read_acquire
166#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
167#endif
168
169#ifndef atomic64_set_release
170#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
171#endif
172
173/* atomic64_add_return_relaxed */
174#ifndef atomic64_add_return_relaxed
175#define atomic64_add_return_relaxed atomic64_add_return
176#define atomic64_add_return_acquire atomic64_add_return
177#define atomic64_add_return_release atomic64_add_return
178
179#else /* atomic64_add_return_relaxed */
180
181#ifndef atomic64_add_return_acquire
182#define atomic64_add_return_acquire(...) \
183 __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
184#endif
185
186#ifndef atomic64_add_return_release
187#define atomic64_add_return_release(...) \
188 __atomic_op_release(atomic64_add_return, __VA_ARGS__)
189#endif
190
191#ifndef atomic64_add_return
192#define atomic64_add_return(...) \
193 __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
194#endif
195#endif /* atomic64_add_return_relaxed */
196
197/* atomic64_sub_return_relaxed */
198#ifndef atomic64_sub_return_relaxed
199#define atomic64_sub_return_relaxed atomic64_sub_return
200#define atomic64_sub_return_acquire atomic64_sub_return
201#define atomic64_sub_return_release atomic64_sub_return
202
203#else /* atomic64_sub_return_relaxed */
204
205#ifndef atomic64_sub_return_acquire
206#define atomic64_sub_return_acquire(...) \
207 __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
208#endif
209
210#ifndef atomic64_sub_return_release
211#define atomic64_sub_return_release(...) \
212 __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
213#endif
214
215#ifndef atomic64_sub_return
216#define atomic64_sub_return(...) \
217 __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
218#endif
219#endif /* atomic64_sub_return_relaxed */
220
221/* atomic64_xchg_relaxed */
222#ifndef atomic64_xchg_relaxed
223#define atomic64_xchg_relaxed atomic64_xchg
224#define atomic64_xchg_acquire atomic64_xchg
225#define atomic64_xchg_release atomic64_xchg
226
227#else /* atomic64_xchg_relaxed */
228
229#ifndef atomic64_xchg_acquire
230#define atomic64_xchg_acquire(...) \
231 __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
232#endif
233
234#ifndef atomic64_xchg_release
235#define atomic64_xchg_release(...) \
236 __atomic_op_release(atomic64_xchg, __VA_ARGS__)
237#endif
238
239#ifndef atomic64_xchg
240#define atomic64_xchg(...) \
241 __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
242#endif
243#endif /* atomic64_xchg_relaxed */
244
245/* atomic64_cmpxchg_relaxed */
246#ifndef atomic64_cmpxchg_relaxed
247#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
248#define atomic64_cmpxchg_acquire atomic64_cmpxchg
249#define atomic64_cmpxchg_release atomic64_cmpxchg
250
251#else /* atomic64_cmpxchg_relaxed */
252
253#ifndef atomic64_cmpxchg_acquire
254#define atomic64_cmpxchg_acquire(...) \
255 __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
256#endif
257
258#ifndef atomic64_cmpxchg_release
259#define atomic64_cmpxchg_release(...) \
260 __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
261#endif
262
263#ifndef atomic64_cmpxchg
264#define atomic64_cmpxchg(...) \
265 __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
266#endif
267#endif /* atomic64_cmpxchg_relaxed */
268
269/* cmpxchg_relaxed */
270#ifndef cmpxchg_relaxed
271#define cmpxchg_relaxed cmpxchg
272#define cmpxchg_acquire cmpxchg
273#define cmpxchg_release cmpxchg
274
275#else /* cmpxchg_relaxed */
276
277#ifndef cmpxchg_acquire
278#define cmpxchg_acquire(...) \
279 __atomic_op_acquire(cmpxchg, __VA_ARGS__)
280#endif
281
282#ifndef cmpxchg_release
283#define cmpxchg_release(...) \
284 __atomic_op_release(cmpxchg, __VA_ARGS__)
285#endif
286
287#ifndef cmpxchg
288#define cmpxchg(...) \
289 __atomic_op_fence(cmpxchg, __VA_ARGS__)
290#endif
291#endif /* cmpxchg_relaxed */
292
293/* cmpxchg64_relaxed */
294#ifndef cmpxchg64_relaxed
295#define cmpxchg64_relaxed cmpxchg64
296#define cmpxchg64_acquire cmpxchg64
297#define cmpxchg64_release cmpxchg64
298
299#else /* cmpxchg64_relaxed */
300
301#ifndef cmpxchg64_acquire
302#define cmpxchg64_acquire(...) \
303 __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
304#endif
305
306#ifndef cmpxchg64_release
307#define cmpxchg64_release(...) \
308 __atomic_op_release(cmpxchg64, __VA_ARGS__)
309#endif
310
311#ifndef cmpxchg64
312#define cmpxchg64(...) \
313 __atomic_op_fence(cmpxchg64, __VA_ARGS__)
314#endif
315#endif /* cmpxchg64_relaxed */
316
317/* xchg_relaxed */
318#ifndef xchg_relaxed
319#define xchg_relaxed xchg
320#define xchg_acquire xchg
321#define xchg_release xchg
322
323#else /* xchg_relaxed */
324
325#ifndef xchg_acquire
326#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
327#endif
328
329#ifndef xchg_release
330#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
331#endif
332
333#ifndef xchg
334#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
335#endif
336#endif /* xchg_relaxed */
Eric Dumazet3f9d35b2010-11-11 14:05:08 -0800337
338/**
Arun Sharmaf24219b2011-07-26 16:09:07 -0700339 * atomic_add_unless - add unless the number is already a given value
340 * @v: pointer of type atomic_t
341 * @a: the amount to add to v...
342 * @u: ...unless v is equal to u.
343 *
344 * Atomically adds @a to @v, so long as @v was not already @u.
345 * Returns non-zero if @v was not @u, and zero otherwise.
346 */
347static inline int atomic_add_unless(atomic_t *v, int a, int u)
348{
349 return __atomic_add_unless(v, a, u) != u;
350}
351
352/**
Arun Sharma600634972011-07-26 16:09:06 -0700353 * atomic_inc_not_zero - increment unless the number is zero
354 * @v: pointer of type atomic_t
355 *
356 * Atomically increments @v by 1, so long as @v is non-zero.
357 * Returns non-zero if @v was non-zero, and zero otherwise.
358 */
Anton Blanchardb1ada602012-02-29 21:09:53 +0000359#ifndef atomic_inc_not_zero
Arun Sharma600634972011-07-26 16:09:06 -0700360#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
Anton Blanchardb1ada602012-02-29 21:09:53 +0000361#endif
Arun Sharma600634972011-07-26 16:09:06 -0700362
Peter Zijlstrade9e4322015-04-24 01:12:32 +0200363#ifndef atomic_andnot
364static inline void atomic_andnot(int i, atomic_t *v)
365{
366 atomic_and(~i, v);
367}
368#endif
369
370static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
371{
372 atomic_andnot(mask, v);
373}
374
375static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
376{
377 atomic_or(mask, v);
378}
379
Arun Sharma600634972011-07-26 16:09:06 -0700380/**
Eric Dumazet3f9d35b2010-11-11 14:05:08 -0800381 * atomic_inc_not_zero_hint - increment if not null
382 * @v: pointer of type atomic_t
383 * @hint: probable value of the atomic before the increment
384 *
385 * This version of atomic_inc_not_zero() gives a hint of probable
386 * value of the atomic. This helps processor to not read the memory
387 * before doing the atomic read/modify/write cycle, lowering
388 * number of bus transactions on some arches.
389 *
390 * Returns: 0 if increment was not done, 1 otherwise.
391 */
392#ifndef atomic_inc_not_zero_hint
393static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
394{
395 int val, c = hint;
396
397 /* sanity test, should be removed by compiler if hint is a constant */
398 if (!hint)
399 return atomic_inc_not_zero(v);
400
401 do {
402 val = atomic_cmpxchg(v, c, c + 1);
403 if (val == c)
404 return 1;
405 c = val;
406 } while (c);
407
408 return 0;
409}
410#endif
411
Al Viro07b8ce12011-06-20 10:52:57 -0400412#ifndef atomic_inc_unless_negative
413static inline int atomic_inc_unless_negative(atomic_t *p)
414{
415 int v, v1;
416 for (v = 0; v >= 0; v = v1) {
417 v1 = atomic_cmpxchg(p, v, v + 1);
418 if (likely(v1 == v))
419 return 1;
420 }
421 return 0;
422}
423#endif
424
425#ifndef atomic_dec_unless_positive
426static inline int atomic_dec_unless_positive(atomic_t *p)
427{
428 int v, v1;
429 for (v = 0; v <= 0; v = v1) {
430 v1 = atomic_cmpxchg(p, v, v - 1);
431 if (likely(v1 == v))
432 return 1;
433 }
434 return 0;
435}
436#endif
437
Shaohua Lie79bee22012-10-08 16:32:18 -0700438/*
439 * atomic_dec_if_positive - decrement by 1 if old value positive
440 * @v: pointer of type atomic_t
441 *
442 * The function returns the old value of *v minus 1, even if
443 * the atomic variable, v, was not decremented.
444 */
445#ifndef atomic_dec_if_positive
446static inline int atomic_dec_if_positive(atomic_t *v)
447{
448 int c, old, dec;
449 c = atomic_read(v);
450 for (;;) {
451 dec = c - 1;
452 if (unlikely(dec < 0))
453 break;
454 old = atomic_cmpxchg((v), c, dec);
455 if (likely(old == c))
456 break;
457 c = old;
458 }
459 return dec;
460}
461#endif
462
Arun Sharma78477772011-07-26 16:09:08 -0700463#ifdef CONFIG_GENERIC_ATOMIC64
464#include <asm-generic/atomic64.h>
465#endif
Peter Zijlstrade9e4322015-04-24 01:12:32 +0200466
Peter Zijlstrae3e72ab2015-09-18 13:22:52 +0200467#ifndef atomic64_read_ctrl
468static inline long long atomic64_read_ctrl(const atomic64_t *v)
469{
470 long long val = atomic64_read(v);
471 smp_read_barrier_depends(); /* Enforce control dependency. */
472 return val;
473}
474#endif
475
Peter Zijlstrade9e4322015-04-24 01:12:32 +0200476#ifndef atomic64_andnot
477static inline void atomic64_andnot(long long i, atomic64_t *v)
478{
479 atomic64_and(~i, v);
480}
481#endif
482
Peter Zijlstra90fe6512015-09-18 15:04:59 +0200483#include <asm-generic/atomic-long.h>
484
Eric Dumazet3f9d35b2010-11-11 14:05:08 -0800485#endif /* _LINUX_ATOMIC_H */