PageRenderTime 21ms CodeModel.GetById 16ms app.highlight 3ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/m68k/kernel/semaphore.c

https://bitbucket.org/evzijst/gittest
C | 133 lines | 54 code | 13 blank | 66 comment | 5 complexity | d76eb99f85d2fc0c9b125e36ec1a919c MD5 | raw file
  1/*
  2 *  Generic semaphore code. Buyer beware. Do your own
  3 * specific changes in <asm/semaphore-helper.h>
  4 */
  5
  6#include <linux/config.h>
  7#include <linux/sched.h>
  8#include <linux/init.h>
  9#include <asm/semaphore-helper.h>
 10
 11#ifndef CONFIG_RMW_INSNS
 12spinlock_t semaphore_wake_lock;
 13#endif
 14
 15/*
 16 * Semaphores are implemented using a two-way counter:
 17 * The "count" variable is decremented for each process
 18 * that tries to sleep, while the "waking" variable is
 19 * incremented when the "up()" code goes to wake up waiting
 20 * processes.
 21 *
 22 * Notably, the inline "up()" and "down()" functions can
 23 * efficiently test if they need to do any extra work (up
 24 * needs to do something only if count was negative before
 25 * the increment operation.
 26 *
 27 * waking_non_zero() (from asm/semaphore.h) must execute
 28 * atomically.
 29 *
 30 * When __up() is called, the count was negative before
 31 * incrementing it, and we need to wake up somebody.
 32 *
 33 * This routine adds one to the count of processes that need to
 34 * wake up and exit.  ALL waiting processes actually wake up but
 35 * only the one that gets to the "waking" field first will gate
 36 * through and acquire the semaphore.  The others will go back
 37 * to sleep.
 38 *
 39 * Note that these functions are only called when there is
 40 * contention on the lock, and as such all this is the
 41 * "non-critical" part of the whole semaphore business. The
 42 * critical part is the inline stuff in <asm/semaphore.h>
 43 * where we want to avoid any extra jumps and calls.
 44 */
 45void __up(struct semaphore *sem)
 46{
 47	wake_one_more(sem);
 48	wake_up(&sem->wait);
 49}
 50
 51/*
 52 * Perform the "down" function.  Return zero for semaphore acquired,
 53 * return negative for signalled out of the function.
 54 *
 55 * If called from __down, the return is ignored and the wait loop is
 56 * not interruptible.  This means that a task waiting on a semaphore
 57 * using "down()" cannot be killed until someone does an "up()" on
 58 * the semaphore.
 59 *
 60 * If called from __down_interruptible, the return value gets checked
 61 * upon return.  If the return value is negative then the task continues
 62 * with the negative value in the return register (it can be tested by
 63 * the caller).
 64 *
 65 * Either form may be used in conjunction with "up()".
 66 *
 67 */
 68
 69
 70#define DOWN_HEAD(task_state)						\
 71									\
 72									\
 73	current->state = (task_state);					\
 74	add_wait_queue(&sem->wait, &wait);				\
 75									\
 76	/*								\
 77	 * Ok, we're set up.  sem->count is known to be less than zero	\
 78	 * so we must wait.						\
 79	 *								\
 80	 * We can let go the lock for purposes of waiting.		\
 81	 * We re-acquire it after awaking so as to protect		\
 82	 * all semaphore operations.					\
 83	 *								\
 84	 * If "up()" is called before we call waking_non_zero() then	\
 85	 * we will catch it right away.  If it is called later then	\
 86	 * we will have to go through a wakeup cycle to catch it.	\
 87	 *								\
 88	 * Multiple waiters contend for the semaphore lock to see	\
 89	 * who gets to gate through and who has to wait some more.	\
 90	 */								\
 91	for (;;) {
 92
 93#define DOWN_TAIL(task_state)			\
 94		current->state = (task_state);	\
 95	}					\
 96	current->state = TASK_RUNNING;		\
 97	remove_wait_queue(&sem->wait, &wait);
 98
 99void __sched __down(struct semaphore * sem)
100{
101	DECLARE_WAITQUEUE(wait, current);
102
103	DOWN_HEAD(TASK_UNINTERRUPTIBLE)
104	if (waking_non_zero(sem))
105		break;
106	schedule();
107	DOWN_TAIL(TASK_UNINTERRUPTIBLE)
108}
109
110int __sched __down_interruptible(struct semaphore * sem)
111{
112	DECLARE_WAITQUEUE(wait, current);
113	int ret = 0;
114
115	DOWN_HEAD(TASK_INTERRUPTIBLE)
116
117	ret = waking_non_zero_interruptible(sem, current);
118	if (ret)
119	{
120		if (ret == 1)
121			/* ret != 0 only if we get interrupted -arca */
122			ret = 0;
123		break;
124	}
125	schedule();
126	DOWN_TAIL(TASK_INTERRUPTIBLE)
127	return ret;
128}
129
130int __down_trylock(struct semaphore * sem)
131{
132	return waking_non_zero_trylock(sem);
133}