PageRenderTime 25ms CodeModel.GetById 7ms app.highlight 13ms RepoModel.GetById 2ms app.codeStats 0ms

/arch/m32r/kernel/process.c

https://bitbucket.org/evzijst/gittest
C | 359 lines | 224 code | 54 blank | 81 comment | 14 complexity | b1192cf66762adc2ecf257dc05841b71 MD5 | raw file
  1/*
  2 *  linux/arch/m32r/kernel/process.c
  3 *
  4 *  Copyright (c) 2001, 2002  Hiroyuki Kondo, Hirokazu Takata,
  5 *                            Hitoshi Yamamoto
  6 *  Taken from sh version.
  7 *    Copyright (C) 1995  Linus Torvalds
  8 *    SuperH version:  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
  9 */
 10
 11#undef DEBUG_PROCESS
 12#ifdef DEBUG_PROCESS
 13#define DPRINTK(fmt, args...)  printk("%s:%d:%s: " fmt, __FILE__, __LINE__, \
 14  __FUNCTION__, ##args)
 15#else
 16#define DPRINTK(fmt, args...)
 17#endif
 18
 19/*
 20 * This file handles the architecture-dependent parts of process handling..
 21 */
 22
 23#include <linux/fs.h>
 24#include <linux/config.h>
 25#include <linux/module.h>
 26#include <linux/ptrace.h>
 27#include <linux/unistd.h>
 28#include <linux/slab.h>
 29#include <linux/hardirq.h>
 30
 31#include <asm/io.h>
 32#include <asm/uaccess.h>
 33#include <asm/mmu_context.h>
 34#include <asm/elf.h>
 35#include <asm/m32r.h>
 36
 37#include <linux/err.h>
 38
 39static int hlt_counter=0;
 40
 41/*
 42 * Return saved PC of a blocked thread.
 43 */
 44unsigned long thread_saved_pc(struct task_struct *tsk)
 45{
 46	return tsk->thread.lr;
 47}
 48
 49/*
 50 * Powermanagement idle function, if any..
 51 */
 52void (*pm_idle)(void) = NULL;
 53
 54void disable_hlt(void)
 55{
 56	hlt_counter++;
 57}
 58
 59EXPORT_SYMBOL(disable_hlt);
 60
 61void enable_hlt(void)
 62{
 63	hlt_counter--;
 64}
 65
 66EXPORT_SYMBOL(enable_hlt);
 67
 68/*
 69 * We use this is we don't have any better
 70 * idle routine..
 71 */
 72void default_idle(void)
 73{
 74	/* M32R_FIXME: Please use "cpu_sleep" mode.  */
 75	cpu_relax();
 76}
 77
 78/*
 79 * On SMP it's slightly faster (but much more power-consuming!)
 80 * to poll the ->work.need_resched flag instead of waiting for the
 81 * cross-CPU IPI to arrive. Use this option with caution.
 82 */
 83static void poll_idle (void)
 84{
 85	/* M32R_FIXME */
 86	cpu_relax();
 87}
 88
 89/*
 90 * The idle thread. There's no useful work to be
 91 * done, so just try to conserve power and have a
 92 * low exit latency (ie sit in a loop waiting for
 93 * somebody to say that they'd like to reschedule)
 94 */
 95void cpu_idle (void)
 96{
 97	/* endless idle loop with no priority at all */
 98	while (1) {
 99		while (!need_resched()) {
100			void (*idle)(void) = pm_idle;
101
102			if (!idle)
103				idle = default_idle;
104
105			idle();
106		}
107		schedule();
108	}
109}
110
111void machine_restart(char *__unused)
112{
113	printk("Please push reset button!\n");
114	while (1)
115		cpu_relax();
116}
117
118EXPORT_SYMBOL(machine_restart);
119
120void machine_halt(void)
121{
122	printk("Please push reset button!\n");
123	while (1)
124		cpu_relax();
125}
126
127EXPORT_SYMBOL(machine_halt);
128
129void machine_power_off(void)
130{
131	/* M32R_FIXME */
132}
133
134EXPORT_SYMBOL(machine_power_off);
135
136static int __init idle_setup (char *str)
137{
138	if (!strncmp(str, "poll", 4)) {
139		printk("using poll in idle threads.\n");
140		pm_idle = poll_idle;
141	} else if (!strncmp(str, "sleep", 4)) {
142		printk("using sleep in idle threads.\n");
143		pm_idle = default_idle;
144	}
145
146	return 1;
147}
148
149__setup("idle=", idle_setup);
150
151void show_regs(struct pt_regs * regs)
152{
153	printk("\n");
154	printk("BPC[%08lx]:PSW[%08lx]:LR [%08lx]:FP [%08lx]\n", \
155	  regs->bpc, regs->psw, regs->lr, regs->fp);
156	printk("BBPC[%08lx]:BBPSW[%08lx]:SPU[%08lx]:SPI[%08lx]\n", \
157	  regs->bbpc, regs->bbpsw, regs->spu, regs->spi);
158	printk("R0 [%08lx]:R1 [%08lx]:R2 [%08lx]:R3 [%08lx]\n", \
159	  regs->r0, regs->r1, regs->r2, regs->r3);
160	printk("R4 [%08lx]:R5 [%08lx]:R6 [%08lx]:R7 [%08lx]\n", \
161	  regs->r4, regs->r5, regs->r6, regs->r7);
162	printk("R8 [%08lx]:R9 [%08lx]:R10[%08lx]:R11[%08lx]\n", \
163	  regs->r8, regs->r9, regs->r10, regs->r11);
164	printk("R12[%08lx]\n", \
165	  regs->r12);
166
167#if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
168	printk("ACC0H[%08lx]:ACC0L[%08lx]\n", \
169	  regs->acc0h, regs->acc0l);
170	printk("ACC1H[%08lx]:ACC1L[%08lx]\n", \
171	  regs->acc1h, regs->acc1l);
172#elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
173	printk("ACCH[%08lx]:ACCL[%08lx]\n", \
174	  regs->acch, regs->accl);
175#else
176#error unknown isa configuration
177#endif
178}
179
180/*
181 * Create a kernel thread
182 */
183
184/*
185 * This is the mechanism for creating a new kernel thread.
186 *
187 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
188 * who haven't done an "execve()") should use this: it will work within
189 * a system call from a "real" process, but the process memory space will
190 * not be free'd until both the parent and the child have exited.
191 */
192static void kernel_thread_helper(void *nouse, int (*fn)(void *), void *arg)
193{
194	fn(arg);
195	do_exit(-1);
196}
197
198int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
199{
200	struct pt_regs regs;
201
202	memset(&regs, 0, sizeof (regs));
203	regs.r1 = (unsigned long)fn;
204	regs.r2 = (unsigned long)arg;
205
206	regs.bpc = (unsigned long)kernel_thread_helper;
207
208	regs.psw = M32R_PSW_BIE;
209
210	/* Ok, create the new process. */
211	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
212		NULL);
213}
214
215/*
216 * Free current thread data structures etc..
217 */
218void exit_thread(void)
219{
220	/* Nothing to do. */
221	DPRINTK("pid = %d\n", current->pid);
222}
223
224void flush_thread(void)
225{
226	DPRINTK("pid = %d\n", current->pid);
227	memset(&current->thread.debug_trap, 0, sizeof(struct debug_trap));
228}
229
230void release_thread(struct task_struct *dead_task)
231{
232	/* do nothing */
233	DPRINTK("pid = %d\n", dead_task->pid);
234}
235
236/* Fill in the fpu structure for a core dump.. */
237int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
238{
239	return 0; /* Task didn't use the fpu at all. */
240}
241
242int copy_thread(int nr, unsigned long clone_flags, unsigned long spu,
243	unsigned long unused, struct task_struct *tsk, struct pt_regs *regs)
244{
245	struct pt_regs *childregs;
246	unsigned long sp = (unsigned long)tsk->thread_info + THREAD_SIZE;
247	extern void ret_from_fork(void);
248
249	/* Copy registers */
250	sp -= sizeof (struct pt_regs);
251	childregs = (struct pt_regs *)sp;
252	*childregs = *regs;
253
254	childregs->spu = spu;
255	childregs->r0 = 0;	/* Child gets zero as return value */
256	regs->r0 = tsk->pid;
257	tsk->thread.sp = (unsigned long)childregs;
258	tsk->thread.lr = (unsigned long)ret_from_fork;
259
260	return 0;
261}
262
263/*
264 * fill in the user structure for a core dump..
265 */
266void dump_thread(struct pt_regs * regs, struct user * dump)
267{
268	/* M32R_FIXME */
269}
270
271/*
272 * Capture the user space registers if the task is not running (in user space)
273 */
274int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
275{
276	/* M32R_FIXME */
277	return 1;
278}
279
280asmlinkage int sys_fork(unsigned long r0, unsigned long r1, unsigned long r2,
281	unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6,
282	struct pt_regs regs)
283{
284#ifdef CONFIG_MMU
285	return do_fork(SIGCHLD, regs.spu, &regs, 0, NULL, NULL);
286#else
287	return -EINVAL;
288#endif /* CONFIG_MMU */
289}
290
291asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
292			 unsigned long parent_tidptr,
293			 unsigned long child_tidptr,
294			 unsigned long r4, unsigned long r5, unsigned long r6,
295			 struct pt_regs regs)
296{
297	if (!newsp)
298		newsp = regs.spu;
299
300	return do_fork(clone_flags, newsp, &regs, 0,
301		       (int __user *)parent_tidptr, (int __user *)child_tidptr);
302}
303
304/*
305 * This is trivial, and on the face of it looks like it
306 * could equally well be done in user mode.
307 *
308 * Not so, for quite unobvious reasons - register pressure.
309 * In user mode vfork() cannot have a stack frame, and if
310 * done by calling the "clone()" system call directly, you
311 * do not have enough call-clobbered registers to hold all
312 * the information you need.
313 */
314asmlinkage int sys_vfork(unsigned long r0, unsigned long r1, unsigned long r2,
315	unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6,
316	struct pt_regs regs)
317{
318	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.spu, &regs, 0,
319			NULL, NULL);
320}
321
322/*
323 * sys_execve() executes a new program.
324 */
325asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
326			  char __user * __user *uenvp,
327			  unsigned long r3, unsigned long r4, unsigned long r5,
328			  unsigned long r6, struct pt_regs regs)
329{
330	int error;
331	char *filename;
332
333	filename = getname(ufilename);
334	error = PTR_ERR(filename);
335	if (IS_ERR(filename))
336		goto out;
337
338	error = do_execve(filename, uargv, uenvp, &regs);
339	if (error == 0) {
340		task_lock(current);
341		current->ptrace &= ~PT_DTRACE;
342		task_unlock(current);
343	}
344	putname(filename);
345out:
346	return error;
347}
348
349/*
350 * These bracket the sleeping functions..
351 */
352#define first_sched	((unsigned long) scheduling_functions_start_here)
353#define last_sched	((unsigned long) scheduling_functions_end_here)
354
355unsigned long get_wchan(struct task_struct *p)
356{
357	/* M32R_FIXME */
358	return (0);
359}