PageRenderTime 63ms CodeModel.GetById 52ms app.highlight 8ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/powerpc/include/asm/spinlock.h

https://github.com/aicjofs/android_kernel_lge_v500_20d_f2fs
C Header | 299 lines | 211 code | 34 blank | 54 comment | 30 complexity | cf47feb0bd6438b917c93f16cdfa357c MD5 | raw file
  1#ifndef __ASM_SPINLOCK_H
  2#define __ASM_SPINLOCK_H
  3#ifdef __KERNEL__
  4
  5/*
  6 * Simple spin lock operations.  
  7 *
  8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
  9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
 10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
 11 *	Rework to support virtual processors
 12 *
 13 * Type of int is used as a full 64b word is not necessary.
 14 *
 15 * This program is free software; you can redistribute it and/or
 16 * modify it under the terms of the GNU General Public License
 17 * as published by the Free Software Foundation; either version
 18 * 2 of the License, or (at your option) any later version.
 19 *
 20 * (the type definitions are in asm/spinlock_types.h)
 21 */
 22#include <linux/irqflags.h>
 23#ifdef CONFIG_PPC64
 24#include <asm/paca.h>
 25#include <asm/hvcall.h>
 26#endif
 27#include <asm/asm-compat.h>
 28#include <asm/synch.h>
 29#include <asm/ppc-opcode.h>
 30
 31#define arch_spin_is_locked(x)		((x)->slock != 0)
 32
 33#ifdef CONFIG_PPC64
 34/* use 0x800000yy when locked, where yy == CPU number */
 35#define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
 36#else
 37#define LOCK_TOKEN	1
 38#endif
 39
 40#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
 41#define CLEAR_IO_SYNC	(get_paca()->io_sync = 0)
 42#define SYNC_IO		do {						\
 43				if (unlikely(get_paca()->io_sync)) {	\
 44					mb();				\
 45					get_paca()->io_sync = 0;	\
 46				}					\
 47			} while (0)
 48#else
 49#define CLEAR_IO_SYNC
 50#define SYNC_IO
 51#endif
 52
 53/*
 54 * This returns the old value in the lock, so we succeeded
 55 * in getting the lock if the return value is 0.
 56 */
 57static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
 58{
 59	unsigned long tmp, token;
 60
 61	token = LOCK_TOKEN;
 62	__asm__ __volatile__(
 63"1:	" PPC_LWARX(%0,0,%2,1) "\n\
 64	cmpwi		0,%0,0\n\
 65	bne-		2f\n\
 66	stwcx.		%1,0,%2\n\
 67	bne-		1b\n"
 68	PPC_ACQUIRE_BARRIER
 69"2:"
 70	: "=&r" (tmp)
 71	: "r" (token), "r" (&lock->slock)
 72	: "cr0", "memory");
 73
 74	return tmp;
 75}
 76
 77static inline int arch_spin_trylock(arch_spinlock_t *lock)
 78{
 79	CLEAR_IO_SYNC;
 80	return __arch_spin_trylock(lock) == 0;
 81}
 82
 83/*
 84 * On a system with shared processors (that is, where a physical
 85 * processor is multiplexed between several virtual processors),
 86 * there is no point spinning on a lock if the holder of the lock
 87 * isn't currently scheduled on a physical processor.  Instead
 88 * we detect this situation and ask the hypervisor to give the
 89 * rest of our timeslice to the lock holder.
 90 *
 91 * So that we can tell which virtual processor is holding a lock,
 92 * we put 0x80000000 | smp_processor_id() in the lock when it is
 93 * held.  Conveniently, we have a word in the paca that holds this
 94 * value.
 95 */
 96
 97#if defined(CONFIG_PPC_SPLPAR)
 98/* We only yield to the hypervisor if we are in shared processor mode */
 99#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
100extern void __spin_yield(arch_spinlock_t *lock);
101extern void __rw_yield(arch_rwlock_t *lock);
102#else /* SPLPAR */
103#define __spin_yield(x)	barrier()
104#define __rw_yield(x)	barrier()
105#define SHARED_PROCESSOR	0
106#endif
107
108static inline void arch_spin_lock(arch_spinlock_t *lock)
109{
110	CLEAR_IO_SYNC;
111	while (1) {
112		if (likely(__arch_spin_trylock(lock) == 0))
113			break;
114		do {
115			HMT_low();
116			if (SHARED_PROCESSOR)
117				__spin_yield(lock);
118		} while (unlikely(lock->slock != 0));
119		HMT_medium();
120	}
121}
122
123static inline
124void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
125{
126	unsigned long flags_dis;
127
128	CLEAR_IO_SYNC;
129	while (1) {
130		if (likely(__arch_spin_trylock(lock) == 0))
131			break;
132		local_save_flags(flags_dis);
133		local_irq_restore(flags);
134		do {
135			HMT_low();
136			if (SHARED_PROCESSOR)
137				__spin_yield(lock);
138		} while (unlikely(lock->slock != 0));
139		HMT_medium();
140		local_irq_restore(flags_dis);
141	}
142}
143
144static inline void arch_spin_unlock(arch_spinlock_t *lock)
145{
146	SYNC_IO;
147	__asm__ __volatile__("# arch_spin_unlock\n\t"
148				PPC_RELEASE_BARRIER: : :"memory");
149	lock->slock = 0;
150}
151
152#ifdef CONFIG_PPC64
153extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
154#else
155#define arch_spin_unlock_wait(lock) \
156	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
157#endif
158
159/*
160 * Read-write spinlocks, allowing multiple readers
161 * but only one writer.
162 *
163 * NOTE! it is quite common to have readers in interrupts
164 * but no interrupt writers. For those circumstances we
165 * can "mix" irq-safe locks - any writer needs to get a
166 * irq-safe write-lock, but readers can get non-irqsafe
167 * read-locks.
168 */
169
170#define arch_read_can_lock(rw)		((rw)->lock >= 0)
171#define arch_write_can_lock(rw)	(!(rw)->lock)
172
173#ifdef CONFIG_PPC64
174#define __DO_SIGN_EXTEND	"extsw	%0,%0\n"
175#define WRLOCK_TOKEN		LOCK_TOKEN	/* it's negative */
176#else
177#define __DO_SIGN_EXTEND
178#define WRLOCK_TOKEN		(-1)
179#endif
180
181/*
182 * This returns the old value in the lock + 1,
183 * so we got a read lock if the return value is > 0.
184 */
185static inline long __arch_read_trylock(arch_rwlock_t *rw)
186{
187	long tmp;
188
189	__asm__ __volatile__(
190"1:	" PPC_LWARX(%0,0,%1,1) "\n"
191	__DO_SIGN_EXTEND
192"	addic.		%0,%0,1\n\
193	ble-		2f\n"
194	PPC405_ERR77(0,%1)
195"	stwcx.		%0,0,%1\n\
196	bne-		1b\n"
197	PPC_ACQUIRE_BARRIER
198"2:"	: "=&r" (tmp)
199	: "r" (&rw->lock)
200	: "cr0", "xer", "memory");
201
202	return tmp;
203}
204
205/*
206 * This returns the old value in the lock,
207 * so we got the write lock if the return value is 0.
208 */
209static inline long __arch_write_trylock(arch_rwlock_t *rw)
210{
211	long tmp, token;
212
213	token = WRLOCK_TOKEN;
214	__asm__ __volatile__(
215"1:	" PPC_LWARX(%0,0,%2,1) "\n\
216	cmpwi		0,%0,0\n\
217	bne-		2f\n"
218	PPC405_ERR77(0,%1)
219"	stwcx.		%1,0,%2\n\
220	bne-		1b\n"
221	PPC_ACQUIRE_BARRIER
222"2:"	: "=&r" (tmp)
223	: "r" (token), "r" (&rw->lock)
224	: "cr0", "memory");
225
226	return tmp;
227}
228
229static inline void arch_read_lock(arch_rwlock_t *rw)
230{
231	while (1) {
232		if (likely(__arch_read_trylock(rw) > 0))
233			break;
234		do {
235			HMT_low();
236			if (SHARED_PROCESSOR)
237				__rw_yield(rw);
238		} while (unlikely(rw->lock < 0));
239		HMT_medium();
240	}
241}
242
243static inline void arch_write_lock(arch_rwlock_t *rw)
244{
245	while (1) {
246		if (likely(__arch_write_trylock(rw) == 0))
247			break;
248		do {
249			HMT_low();
250			if (SHARED_PROCESSOR)
251				__rw_yield(rw);
252		} while (unlikely(rw->lock != 0));
253		HMT_medium();
254	}
255}
256
257static inline int arch_read_trylock(arch_rwlock_t *rw)
258{
259	return __arch_read_trylock(rw) > 0;
260}
261
262static inline int arch_write_trylock(arch_rwlock_t *rw)
263{
264	return __arch_write_trylock(rw) == 0;
265}
266
267static inline void arch_read_unlock(arch_rwlock_t *rw)
268{
269	long tmp;
270
271	__asm__ __volatile__(
272	"# read_unlock\n\t"
273	PPC_RELEASE_BARRIER
274"1:	lwarx		%0,0,%1\n\
275	addic		%0,%0,-1\n"
276	PPC405_ERR77(0,%1)
277"	stwcx.		%0,0,%1\n\
278	bne-		1b"
279	: "=&r"(tmp)
280	: "r"(&rw->lock)
281	: "cr0", "xer", "memory");
282}
283
284static inline void arch_write_unlock(arch_rwlock_t *rw)
285{
286	__asm__ __volatile__("# write_unlock\n\t"
287				PPC_RELEASE_BARRIER: : :"memory");
288	rw->lock = 0;
289}
290
291#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
292#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
293
294#define arch_spin_relax(lock)	__spin_yield(lock)
295#define arch_read_relax(lock)	__rw_yield(lock)
296#define arch_write_relax(lock)	__rw_yield(lock)
297
298#endif /* __KERNEL__ */
299#endif /* __ASM_SPINLOCK_H */