PageRenderTime 29ms CodeModel.GetById 16ms app.highlight 10ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/ia64/kernel/semaphore.c

https://bitbucket.org/evzijst/gittest
C | 165 lines | 83 code | 22 blank | 60 comment | 6 complexity | c082cc2e2962901de484e5d766cf2716 MD5 | raw file
  1/*
  2 * IA-64 semaphore implementation (derived from x86 version).
  3 *
  4 * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
  5 *	David Mosberger-Tang <davidm@hpl.hp.com>
  6 */
  7
  8/*
  9 * Semaphores are implemented using a two-way counter: The "count"
 10 * variable is decremented for each process that tries to acquire the
 11 * semaphore, while the "sleepers" variable is a count of such
 12 * acquires.
 13 *
 14 * Notably, the inline "up()" and "down()" functions can efficiently
 15 * test if they need to do any extra work (up needs to do something
 16 * only if count was negative before the increment operation.
 17 *
 18 * "sleeping" and the contention routine ordering is protected
 19 * by the spinlock in the semaphore's waitqueue head.
 20 *
 21 * Note that these functions are only called when there is contention
 22 * on the lock, and as such all this is the "non-critical" part of the
 23 * whole semaphore business. The critical part is the inline stuff in
 24 * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
 25 */
 26#include <linux/sched.h>
 27#include <linux/init.h>
 28
 29#include <asm/errno.h>
 30#include <asm/semaphore.h>
 31
 32/*
 33 * Logic:
 34 *  - Only on a boundary condition do we need to care. When we go
 35 *    from a negative count to a non-negative, we wake people up.
 36 *  - When we go from a non-negative count to a negative do we
 37 *    (a) synchronize with the "sleepers" count and (b) make sure
 38 *    that we're on the wakeup list before we synchronize so that
 39 *    we cannot lose wakeup events.
 40 */
 41
 42void
 43__up (struct semaphore *sem)
 44{
 45	wake_up(&sem->wait);
 46}
 47
 48void __sched __down (struct semaphore *sem)
 49{
 50	struct task_struct *tsk = current;
 51	DECLARE_WAITQUEUE(wait, tsk);
 52	unsigned long flags;
 53
 54	tsk->state = TASK_UNINTERRUPTIBLE;
 55	spin_lock_irqsave(&sem->wait.lock, flags);
 56	add_wait_queue_exclusive_locked(&sem->wait, &wait);
 57
 58	sem->sleepers++;
 59	for (;;) {
 60		int sleepers = sem->sleepers;
 61
 62		/*
 63		 * Add "everybody else" into it. They aren't
 64		 * playing, because we own the spinlock in
 65		 * the wait_queue_head.
 66		 */
 67		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
 68			sem->sleepers = 0;
 69			break;
 70		}
 71		sem->sleepers = 1;	/* us - see -1 above */
 72		spin_unlock_irqrestore(&sem->wait.lock, flags);
 73
 74		schedule();
 75
 76		spin_lock_irqsave(&sem->wait.lock, flags);
 77		tsk->state = TASK_UNINTERRUPTIBLE;
 78	}
 79	remove_wait_queue_locked(&sem->wait, &wait);
 80	wake_up_locked(&sem->wait);
 81	spin_unlock_irqrestore(&sem->wait.lock, flags);
 82	tsk->state = TASK_RUNNING;
 83}
 84
 85int __sched __down_interruptible (struct semaphore * sem)
 86{
 87	int retval = 0;
 88	struct task_struct *tsk = current;
 89	DECLARE_WAITQUEUE(wait, tsk);
 90	unsigned long flags;
 91
 92	tsk->state = TASK_INTERRUPTIBLE;
 93	spin_lock_irqsave(&sem->wait.lock, flags);
 94	add_wait_queue_exclusive_locked(&sem->wait, &wait);
 95
 96	sem->sleepers ++;
 97	for (;;) {
 98		int sleepers = sem->sleepers;
 99
100		/*
101		 * With signals pending, this turns into
102		 * the trylock failure case - we won't be
103		 * sleeping, and we* can't get the lock as
104		 * it has contention. Just correct the count
105		 * and exit.
106		 */
107		if (signal_pending(current)) {
108			retval = -EINTR;
109			sem->sleepers = 0;
110			atomic_add(sleepers, &sem->count);
111			break;
112		}
113
114		/*
115		 * Add "everybody else" into it. They aren't
116		 * playing, because we own the spinlock in
117		 * wait_queue_head. The "-1" is because we're
118		 * still hoping to get the semaphore.
119		 */
120		if (!atomic_add_negative(sleepers - 1, &sem->count)) {
121			sem->sleepers = 0;
122			break;
123		}
124		sem->sleepers = 1;	/* us - see -1 above */
125		spin_unlock_irqrestore(&sem->wait.lock, flags);
126
127		schedule();
128
129		spin_lock_irqsave(&sem->wait.lock, flags);
130		tsk->state = TASK_INTERRUPTIBLE;
131	}
132	remove_wait_queue_locked(&sem->wait, &wait);
133	wake_up_locked(&sem->wait);
134	spin_unlock_irqrestore(&sem->wait.lock, flags);
135
136	tsk->state = TASK_RUNNING;
137	return retval;
138}
139
140/*
141 * Trylock failed - make sure we correct for having decremented the
142 * count.
143 */
144int
145__down_trylock (struct semaphore *sem)
146{
147	unsigned long flags;
148	int sleepers;
149
150	spin_lock_irqsave(&sem->wait.lock, flags);
151	sleepers = sem->sleepers + 1;
152	sem->sleepers = 0;
153
154	/*
155	 * Add "everybody else" and us into it. They aren't
156	 * playing, because we own the spinlock in the
157	 * wait_queue_head.
158	 */
159	if (!atomic_add_negative(sleepers, &sem->count)) {
160		wake_up_locked(&sem->wait);
161	}
162
163	spin_unlock_irqrestore(&sem->wait.lock, flags);
164	return 1;
165}