PageRenderTime 69ms CodeModel.GetById 13ms app.highlight 50ms RepoModel.GetById 0ms app.codeStats 1ms

/arch/powerpc/include/asm/atomic.h

https://bitbucket.org/cresqo/cm7-p500-kernel
C Header | 480 lines | 354 code | 80 blank | 46 comment | 8 complexity | 189f6233a65c2bbe285a13977d5cb528 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1#ifndef _ASM_POWERPC_ATOMIC_H_
  2#define _ASM_POWERPC_ATOMIC_H_
  3
  4/*
  5 * PowerPC atomic operations
  6 */
  7
  8#include <linux/types.h>
  9
 10#ifdef __KERNEL__
 11#include <linux/compiler.h>
 12#include <asm/synch.h>
 13#include <asm/asm-compat.h>
 14#include <asm/system.h>
 15
 16#define ATOMIC_INIT(i)		{ (i) }
 17
 18static __inline__ int atomic_read(const atomic_t *v)
 19{
 20	int t;
 21
 22	__asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
 23
 24	return t;
 25}
 26
 27static __inline__ void atomic_set(atomic_t *v, int i)
 28{
 29	__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
 30}
 31
 32static __inline__ void atomic_add(int a, atomic_t *v)
 33{
 34	int t;
 35
 36	__asm__ __volatile__(
 37"1:	lwarx	%0,0,%3		# atomic_add\n\
 38	add	%0,%2,%0\n"
 39	PPC405_ERR77(0,%3)
 40"	stwcx.	%0,0,%3 \n\
 41	bne-	1b"
 42	: "=&r" (t), "+m" (v->counter)
 43	: "r" (a), "r" (&v->counter)
 44	: "cc");
 45}
 46
 47static __inline__ int atomic_add_return(int a, atomic_t *v)
 48{
 49	int t;
 50
 51	__asm__ __volatile__(
 52	PPC_RELEASE_BARRIER
 53"1:	lwarx	%0,0,%2		# atomic_add_return\n\
 54	add	%0,%1,%0\n"
 55	PPC405_ERR77(0,%2)
 56"	stwcx.	%0,0,%2 \n\
 57	bne-	1b"
 58	PPC_ACQUIRE_BARRIER
 59	: "=&r" (t)
 60	: "r" (a), "r" (&v->counter)
 61	: "cc", "memory");
 62
 63	return t;
 64}
 65
 66#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
 67
 68static __inline__ void atomic_sub(int a, atomic_t *v)
 69{
 70	int t;
 71
 72	__asm__ __volatile__(
 73"1:	lwarx	%0,0,%3		# atomic_sub\n\
 74	subf	%0,%2,%0\n"
 75	PPC405_ERR77(0,%3)
 76"	stwcx.	%0,0,%3 \n\
 77	bne-	1b"
 78	: "=&r" (t), "+m" (v->counter)
 79	: "r" (a), "r" (&v->counter)
 80	: "cc");
 81}
 82
 83static __inline__ int atomic_sub_return(int a, atomic_t *v)
 84{
 85	int t;
 86
 87	__asm__ __volatile__(
 88	PPC_RELEASE_BARRIER
 89"1:	lwarx	%0,0,%2		# atomic_sub_return\n\
 90	subf	%0,%1,%0\n"
 91	PPC405_ERR77(0,%2)
 92"	stwcx.	%0,0,%2 \n\
 93	bne-	1b"
 94	PPC_ACQUIRE_BARRIER
 95	: "=&r" (t)
 96	: "r" (a), "r" (&v->counter)
 97	: "cc", "memory");
 98
 99	return t;
100}
101
102static __inline__ void atomic_inc(atomic_t *v)
103{
104	int t;
105
106	__asm__ __volatile__(
107"1:	lwarx	%0,0,%2		# atomic_inc\n\
108	addic	%0,%0,1\n"
109	PPC405_ERR77(0,%2)
110"	stwcx.	%0,0,%2 \n\
111	bne-	1b"
112	: "=&r" (t), "+m" (v->counter)
113	: "r" (&v->counter)
114	: "cc", "xer");
115}
116
117static __inline__ int atomic_inc_return(atomic_t *v)
118{
119	int t;
120
121	__asm__ __volatile__(
122	PPC_RELEASE_BARRIER
123"1:	lwarx	%0,0,%1		# atomic_inc_return\n\
124	addic	%0,%0,1\n"
125	PPC405_ERR77(0,%1)
126"	stwcx.	%0,0,%1 \n\
127	bne-	1b"
128	PPC_ACQUIRE_BARRIER
129	: "=&r" (t)
130	: "r" (&v->counter)
131	: "cc", "xer", "memory");
132
133	return t;
134}
135
136/*
137 * atomic_inc_and_test - increment and test
138 * @v: pointer of type atomic_t
139 *
140 * Atomically increments @v by 1
141 * and returns true if the result is zero, or false for all
142 * other cases.
143 */
144#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
145
146static __inline__ void atomic_dec(atomic_t *v)
147{
148	int t;
149
150	__asm__ __volatile__(
151"1:	lwarx	%0,0,%2		# atomic_dec\n\
152	addic	%0,%0,-1\n"
153	PPC405_ERR77(0,%2)\
154"	stwcx.	%0,0,%2\n\
155	bne-	1b"
156	: "=&r" (t), "+m" (v->counter)
157	: "r" (&v->counter)
158	: "cc", "xer");
159}
160
161static __inline__ int atomic_dec_return(atomic_t *v)
162{
163	int t;
164
165	__asm__ __volatile__(
166	PPC_RELEASE_BARRIER
167"1:	lwarx	%0,0,%1		# atomic_dec_return\n\
168	addic	%0,%0,-1\n"
169	PPC405_ERR77(0,%1)
170"	stwcx.	%0,0,%1\n\
171	bne-	1b"
172	PPC_ACQUIRE_BARRIER
173	: "=&r" (t)
174	: "r" (&v->counter)
175	: "cc", "xer", "memory");
176
177	return t;
178}
179
180#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
181#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
182
183/**
184 * atomic_add_unless - add unless the number is a given value
185 * @v: pointer of type atomic_t
186 * @a: the amount to add to v...
187 * @u: ...unless v is equal to u.
188 *
189 * Atomically adds @a to @v, so long as it was not @u.
190 * Returns non-zero if @v was not @u, and zero otherwise.
191 */
192static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
193{
194	int t;
195
196	__asm__ __volatile__ (
197	PPC_RELEASE_BARRIER
198"1:	lwarx	%0,0,%1		# atomic_add_unless\n\
199	cmpw	0,%0,%3 \n\
200	beq-	2f \n\
201	add	%0,%2,%0 \n"
202	PPC405_ERR77(0,%2)
203"	stwcx.	%0,0,%1 \n\
204	bne-	1b \n"
205	PPC_ACQUIRE_BARRIER
206"	subf	%0,%2,%0 \n\
2072:"
208	: "=&r" (t)
209	: "r" (&v->counter), "r" (a), "r" (u)
210	: "cc", "memory");
211
212	return t != u;
213}
214
215#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
216
217#define atomic_sub_and_test(a, v)	(atomic_sub_return((a), (v)) == 0)
218#define atomic_dec_and_test(v)		(atomic_dec_return((v)) == 0)
219
220/*
221 * Atomically test *v and decrement if it is greater than 0.
222 * The function returns the old value of *v minus 1, even if
223 * the atomic variable, v, was not decremented.
224 */
225static __inline__ int atomic_dec_if_positive(atomic_t *v)
226{
227	int t;
228
229	__asm__ __volatile__(
230	PPC_RELEASE_BARRIER
231"1:	lwarx	%0,0,%1		# atomic_dec_if_positive\n\
232	cmpwi	%0,1\n\
233	addi	%0,%0,-1\n\
234	blt-	2f\n"
235	PPC405_ERR77(0,%1)
236"	stwcx.	%0,0,%1\n\
237	bne-	1b"
238	PPC_ACQUIRE_BARRIER
239	"\n\
2402:"	: "=&b" (t)
241	: "r" (&v->counter)
242	: "cc", "memory");
243
244	return t;
245}
246
247#define smp_mb__before_atomic_dec()     smp_mb()
248#define smp_mb__after_atomic_dec()      smp_mb()
249#define smp_mb__before_atomic_inc()     smp_mb()
250#define smp_mb__after_atomic_inc()      smp_mb()
251
252#ifdef __powerpc64__
253
254#define ATOMIC64_INIT(i)	{ (i) }
255
256static __inline__ long atomic64_read(const atomic64_t *v)
257{
258	long t;
259
260	__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
261
262	return t;
263}
264
265static __inline__ void atomic64_set(atomic64_t *v, long i)
266{
267	__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
268}
269
270static __inline__ void atomic64_add(long a, atomic64_t *v)
271{
272	long t;
273
274	__asm__ __volatile__(
275"1:	ldarx	%0,0,%3		# atomic64_add\n\
276	add	%0,%2,%0\n\
277	stdcx.	%0,0,%3 \n\
278	bne-	1b"
279	: "=&r" (t), "+m" (v->counter)
280	: "r" (a), "r" (&v->counter)
281	: "cc");
282}
283
284static __inline__ long atomic64_add_return(long a, atomic64_t *v)
285{
286	long t;
287
288	__asm__ __volatile__(
289	PPC_RELEASE_BARRIER
290"1:	ldarx	%0,0,%2		# atomic64_add_return\n\
291	add	%0,%1,%0\n\
292	stdcx.	%0,0,%2 \n\
293	bne-	1b"
294	PPC_ACQUIRE_BARRIER
295	: "=&r" (t)
296	: "r" (a), "r" (&v->counter)
297	: "cc", "memory");
298
299	return t;
300}
301
302#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
303
304static __inline__ void atomic64_sub(long a, atomic64_t *v)
305{
306	long t;
307
308	__asm__ __volatile__(
309"1:	ldarx	%0,0,%3		# atomic64_sub\n\
310	subf	%0,%2,%0\n\
311	stdcx.	%0,0,%3 \n\
312	bne-	1b"
313	: "=&r" (t), "+m" (v->counter)
314	: "r" (a), "r" (&v->counter)
315	: "cc");
316}
317
318static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
319{
320	long t;
321
322	__asm__ __volatile__(
323	PPC_RELEASE_BARRIER
324"1:	ldarx	%0,0,%2		# atomic64_sub_return\n\
325	subf	%0,%1,%0\n\
326	stdcx.	%0,0,%2 \n\
327	bne-	1b"
328	PPC_ACQUIRE_BARRIER
329	: "=&r" (t)
330	: "r" (a), "r" (&v->counter)
331	: "cc", "memory");
332
333	return t;
334}
335
336static __inline__ void atomic64_inc(atomic64_t *v)
337{
338	long t;
339
340	__asm__ __volatile__(
341"1:	ldarx	%0,0,%2		# atomic64_inc\n\
342	addic	%0,%0,1\n\
343	stdcx.	%0,0,%2 \n\
344	bne-	1b"
345	: "=&r" (t), "+m" (v->counter)
346	: "r" (&v->counter)
347	: "cc", "xer");
348}
349
350static __inline__ long atomic64_inc_return(atomic64_t *v)
351{
352	long t;
353
354	__asm__ __volatile__(
355	PPC_RELEASE_BARRIER
356"1:	ldarx	%0,0,%1		# atomic64_inc_return\n\
357	addic	%0,%0,1\n\
358	stdcx.	%0,0,%1 \n\
359	bne-	1b"
360	PPC_ACQUIRE_BARRIER
361	: "=&r" (t)
362	: "r" (&v->counter)
363	: "cc", "xer", "memory");
364
365	return t;
366}
367
368/*
369 * atomic64_inc_and_test - increment and test
370 * @v: pointer of type atomic64_t
371 *
372 * Atomically increments @v by 1
373 * and returns true if the result is zero, or false for all
374 * other cases.
375 */
376#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
377
378static __inline__ void atomic64_dec(atomic64_t *v)
379{
380	long t;
381
382	__asm__ __volatile__(
383"1:	ldarx	%0,0,%2		# atomic64_dec\n\
384	addic	%0,%0,-1\n\
385	stdcx.	%0,0,%2\n\
386	bne-	1b"
387	: "=&r" (t), "+m" (v->counter)
388	: "r" (&v->counter)
389	: "cc", "xer");
390}
391
392static __inline__ long atomic64_dec_return(atomic64_t *v)
393{
394	long t;
395
396	__asm__ __volatile__(
397	PPC_RELEASE_BARRIER
398"1:	ldarx	%0,0,%1		# atomic64_dec_return\n\
399	addic	%0,%0,-1\n\
400	stdcx.	%0,0,%1\n\
401	bne-	1b"
402	PPC_ACQUIRE_BARRIER
403	: "=&r" (t)
404	: "r" (&v->counter)
405	: "cc", "xer", "memory");
406
407	return t;
408}
409
410#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
411#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
412
413/*
414 * Atomically test *v and decrement if it is greater than 0.
415 * The function returns the old value of *v minus 1.
416 */
417static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
418{
419	long t;
420
421	__asm__ __volatile__(
422	PPC_RELEASE_BARRIER
423"1:	ldarx	%0,0,%1		# atomic64_dec_if_positive\n\
424	addic.	%0,%0,-1\n\
425	blt-	2f\n\
426	stdcx.	%0,0,%1\n\
427	bne-	1b"
428	PPC_ACQUIRE_BARRIER
429	"\n\
4302:"	: "=&r" (t)
431	: "r" (&v->counter)
432	: "cc", "xer", "memory");
433
434	return t;
435}
436
437#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
438#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
439
440/**
441 * atomic64_add_unless - add unless the number is a given value
442 * @v: pointer of type atomic64_t
443 * @a: the amount to add to v...
444 * @u: ...unless v is equal to u.
445 *
446 * Atomically adds @a to @v, so long as it was not @u.
447 * Returns non-zero if @v was not @u, and zero otherwise.
448 */
449static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
450{
451	long t;
452
453	__asm__ __volatile__ (
454	PPC_RELEASE_BARRIER
455"1:	ldarx	%0,0,%1		# atomic_add_unless\n\
456	cmpd	0,%0,%3 \n\
457	beq-	2f \n\
458	add	%0,%2,%0 \n"
459"	stdcx.	%0,0,%1 \n\
460	bne-	1b \n"
461	PPC_ACQUIRE_BARRIER
462"	subf	%0,%2,%0 \n\
4632:"
464	: "=&r" (t)
465	: "r" (&v->counter), "r" (a), "r" (u)
466	: "cc", "memory");
467
468	return t != u;
469}
470
471#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
472
473#else  /* __powerpc64__ */
474#include <asm-generic/atomic64.h>
475
476#endif /* __powerpc64__ */
477
478#include <asm-generic/atomic-long.h>
479#endif /* __KERNEL__ */
480#endif /* _ASM_POWERPC_ATOMIC_H_ */