PageRenderTime 31ms CodeModel.GetById 13ms app.highlight 13ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/ppc64/lib/locks.c

https://bitbucket.org/evzijst/gittest
C | 95 lines | 68 code | 8 blank | 19 comment | 14 complexity | c8b2f678b680f784ec52ed60545a8a77 MD5 | raw file
 1/*
 2 * Spin and read/write lock operations.
 3 *
 4 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
 5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
 6 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
 7 *   Rework to support virtual processors
 8 *
 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/config.h>
16#include <linux/kernel.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19#include <linux/stringify.h>
20#include <asm/hvcall.h>
21#include <asm/iSeries/HvCall.h>
22
23/* waiting for a spinlock... */
24#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
25
26void __spin_yield(spinlock_t *lock)
27{
28	unsigned int lock_value, holder_cpu, yield_count;
29	struct paca_struct *holder_paca;
30
31	lock_value = lock->lock;
32	if (lock_value == 0)
33		return;
34	holder_cpu = lock_value & 0xffff;
35	BUG_ON(holder_cpu >= NR_CPUS);
36	holder_paca = &paca[holder_cpu];
37	yield_count = holder_paca->lppaca.yield_count;
38	if ((yield_count & 1) == 0)
39		return;		/* virtual cpu is currently running */
40	rmb();
41	if (lock->lock != lock_value)
42		return;		/* something has changed */
43#ifdef CONFIG_PPC_ISERIES
44	HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
45		((u64)holder_cpu << 32) | yield_count);
46#else
47	plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu),
48			   yield_count);
49#endif
50}
51
52/*
53 * Waiting for a read lock or a write lock on a rwlock...
54 * This turns out to be the same for read and write locks, since
55 * we only know the holder if it is write-locked.
56 */
57void __rw_yield(rwlock_t *rw)
58{
59	int lock_value;
60	unsigned int holder_cpu, yield_count;
61	struct paca_struct *holder_paca;
62
63	lock_value = rw->lock;
64	if (lock_value >= 0)
65		return;		/* no write lock at present */
66	holder_cpu = lock_value & 0xffff;
67	BUG_ON(holder_cpu >= NR_CPUS);
68	holder_paca = &paca[holder_cpu];
69	yield_count = holder_paca->lppaca.yield_count;
70	if ((yield_count & 1) == 0)
71		return;		/* virtual cpu is currently running */
72	rmb();
73	if (rw->lock != lock_value)
74		return;		/* something has changed */
75#ifdef CONFIG_PPC_ISERIES
76	HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
77		((u64)holder_cpu << 32) | yield_count);
78#else
79	plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu),
80			   yield_count);
81#endif
82}
83#endif
84
85void spin_unlock_wait(spinlock_t *lock)
86{
87	while (lock->lock) {
88		HMT_low();
89		if (SHARED_PROCESSOR)
90			__spin_yield(lock);
91	}
92	HMT_medium();
93}
94
95EXPORT_SYMBOL(spin_unlock_wait);