Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _ASM_X86_ATOMIC64_64_H
3 : #define _ASM_X86_ATOMIC64_64_H
4 :
5 : #include <linux/types.h>
6 : #include <asm/alternative.h>
7 : #include <asm/cmpxchg.h>
8 :
9 : /* The 64-bit atomic type */
10 :
11 : #define ATOMIC64_INIT(i) { (i) }
12 :
13 : /**
14 : * arch_atomic64_read - read atomic64 variable
15 : * @v: pointer of type atomic64_t
16 : *
17 : * Atomically reads the value of @v.
18 : * Doesn't imply a read memory barrier.
19 : */
20 : static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
21 : {
22 28232 : return __READ_ONCE((v)->counter);
23 : }
24 :
25 : /**
26 : * arch_atomic64_set - set atomic64 variable
27 : * @v: pointer to type atomic64_t
28 : * @i: required value
29 : *
30 : * Atomically sets the value of @v to @i.
31 : */
32 : static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
33 : {
34 48343 : __WRITE_ONCE(v->counter, i);
35 : }
36 :
37 : /**
38 : * arch_atomic64_add - add integer to atomic64 variable
39 : * @i: integer value to add
40 : * @v: pointer to type atomic64_t
41 : *
42 : * Atomically adds @i to @v.
43 : */
44 : static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
45 : {
46 19997 : asm volatile(LOCK_PREFIX "addq %1,%0"
47 : : "=m" (v->counter)
48 : : "er" (i), "m" (v->counter) : "memory");
49 : }
50 :
51 : /**
52 : * arch_atomic64_sub - subtract the atomic64 variable
53 : * @i: integer value to subtract
54 : * @v: pointer to type atomic64_t
55 : *
56 : * Atomically subtracts @i from @v.
57 : */
58 : static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
59 : {
60 2319 : asm volatile(LOCK_PREFIX "subq %1,%0"
61 : : "=m" (v->counter)
62 : : "er" (i), "m" (v->counter) : "memory");
63 : }
64 :
65 : /**
66 : * arch_atomic64_sub_and_test - subtract value from variable and test result
67 : * @i: integer value to subtract
68 : * @v: pointer to type atomic64_t
69 : *
70 : * Atomically subtracts @i from @v and returns
71 : * true if the result is zero, or false for all
72 : * other cases.
73 : */
74 : static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
75 : {
76 0 : return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
77 : }
78 : #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
79 :
80 : /**
81 : * arch_atomic64_inc - increment atomic64 variable
82 : * @v: pointer to type atomic64_t
83 : *
84 : * Atomically increments @v by 1.
85 : */
86 : static __always_inline void arch_atomic64_inc(atomic64_t *v)
87 : {
88 2496 : asm volatile(LOCK_PREFIX "incq %0"
89 : : "=m" (v->counter)
90 : : "m" (v->counter) : "memory");
91 : }
92 : #define arch_atomic64_inc arch_atomic64_inc
93 :
94 : /**
95 : * arch_atomic64_dec - decrement atomic64 variable
96 : * @v: pointer to type atomic64_t
97 : *
98 : * Atomically decrements @v by 1.
99 : */
100 : static __always_inline void arch_atomic64_dec(atomic64_t *v)
101 : {
102 2056 : asm volatile(LOCK_PREFIX "decq %0"
103 : : "=m" (v->counter)
104 : : "m" (v->counter) : "memory");
105 : }
106 : #define arch_atomic64_dec arch_atomic64_dec
107 :
108 : /**
109 : * arch_atomic64_dec_and_test - decrement and test
110 : * @v: pointer to type atomic64_t
111 : *
112 : * Atomically decrements @v by 1 and
113 : * returns true if the result is 0, or false for all other
114 : * cases.
115 : */
116 : static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v)
117 : {
118 0 : return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
119 : }
120 : #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
121 :
122 : /**
123 : * arch_atomic64_inc_and_test - increment and test
124 : * @v: pointer to type atomic64_t
125 : *
126 : * Atomically increments @v by 1
127 : * and returns true if the result is zero, or false for all
128 : * other cases.
129 : */
130 : static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v)
131 : {
132 : return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
133 : }
134 : #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
135 :
136 : /**
137 : * arch_atomic64_add_negative - add and test if negative
138 : * @i: integer value to add
139 : * @v: pointer to type atomic64_t
140 : *
141 : * Atomically adds @i to @v and returns true
142 : * if the result is negative, or false when
143 : * result is greater than or equal to zero.
144 : */
145 : static __always_inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
146 : {
147 : return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
148 : }
149 : #define arch_atomic64_add_negative arch_atomic64_add_negative
150 :
151 : /**
152 : * arch_atomic64_add_return - add and return
153 : * @i: integer value to add
154 : * @v: pointer to type atomic64_t
155 : *
156 : * Atomically adds @i to @v and returns @i + @v
157 : */
158 : static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
159 : {
160 2320 : return i + xadd(&v->counter, i);
161 : }
162 : #define arch_atomic64_add_return arch_atomic64_add_return
163 :
164 : static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
165 : {
166 650 : return arch_atomic64_add_return(-i, v);
167 : }
168 : #define arch_atomic64_sub_return arch_atomic64_sub_return
169 :
170 : static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
171 : {
172 18230 : return xadd(&v->counter, i);
173 : }
174 : #define arch_atomic64_fetch_add arch_atomic64_fetch_add
175 :
176 : static __always_inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
177 : {
178 : return xadd(&v->counter, -i);
179 : }
180 : #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
181 :
182 : static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
183 : {
184 748 : return arch_cmpxchg(&v->counter, old, new);
185 : }
186 : #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
187 :
188 : static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
189 : {
190 32157 : return arch_try_cmpxchg(&v->counter, old, new);
191 : }
192 : #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
193 :
194 : static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
195 : {
196 0 : return arch_xchg(&v->counter, new);
197 : }
198 : #define arch_atomic64_xchg arch_atomic64_xchg
199 :
200 : static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
201 : {
202 0 : asm volatile(LOCK_PREFIX "andq %1,%0"
203 : : "+m" (v->counter)
204 : : "er" (i)
205 : : "memory");
206 : }
207 :
208 : static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
209 : {
210 : s64 val = arch_atomic64_read(v);
211 :
212 : do {
213 : } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
214 : return val;
215 : }
216 : #define arch_atomic64_fetch_and arch_atomic64_fetch_and
217 :
218 : static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
219 : {
220 0 : asm volatile(LOCK_PREFIX "orq %1,%0"
221 : : "+m" (v->counter)
222 : : "er" (i)
223 : : "memory");
224 : }
225 :
226 : static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
227 : {
228 : s64 val = arch_atomic64_read(v);
229 :
230 : do {
231 : } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
232 : return val;
233 : }
234 : #define arch_atomic64_fetch_or arch_atomic64_fetch_or
235 :
236 : static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
237 : {
238 : asm volatile(LOCK_PREFIX "xorq %1,%0"
239 : : "+m" (v->counter)
240 : : "er" (i)
241 : : "memory");
242 : }
243 :
244 : static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
245 : {
246 : s64 val = arch_atomic64_read(v);
247 :
248 : do {
249 : } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
250 : return val;
251 : }
252 : #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
253 :
254 : #endif /* _ASM_X86_ATOMIC64_64_H */
|