PageRenderTime 46ms CodeModel.GetById 9ms app.highlight 29ms RepoModel.GetById 2ms app.codeStats 0ms

/arch/ppc/kernel/entry.S

https://bitbucket.org/evzijst/gittest
Assembly | 969 lines | 947 code | 22 blank | 0 comment | 17 complexity | 84f7170c82b8193a84f8cefef91e8e17 MD5 | raw file
  1/*
  2 *  PowerPC version
  3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
  5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
  6 *  Adapted for Power Macintosh by Paul Mackerras.
  7 *  Low-level exception handlers and MMU support
  8 *  rewritten by Paul Mackerras.
  9 *    Copyright (C) 1996 Paul Mackerras.
 10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
 11 *
 12 *  This file contains the system call entry code, context switch
 13 *  code, and exception/interrupt return code for PowerPC.
 14 *
 15 *  This program is free software; you can redistribute it and/or
 16 *  modify it under the terms of the GNU General Public License
 17 *  as published by the Free Software Foundation; either version
 18 *  2 of the License, or (at your option) any later version.
 19 *
 20 */
 21
 22#include <linux/config.h>
 23#include <linux/errno.h>
 24#include <linux/sys.h>
 25#include <linux/threads.h>
 26#include <asm/processor.h>
 27#include <asm/page.h>
 28#include <asm/mmu.h>
 29#include <asm/cputable.h>
 30#include <asm/thread_info.h>
 31#include <asm/ppc_asm.h>
 32#include <asm/offsets.h>
 33#include <asm/unistd.h>
 34
 35#undef SHOW_SYSCALLS
 36#undef SHOW_SYSCALLS_TASK
 37
 38/*
 39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
 40 */
 41#if MSR_KERNEL >= 0x10000
 42#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
 43#else
 44#define LOAD_MSR_KERNEL(r, x)	li r,(x)
 45#endif
 46
 47#ifdef CONFIG_BOOKE
 48#include "head_booke.h"
 49	.globl	mcheck_transfer_to_handler
 50mcheck_transfer_to_handler:
 51	mtspr	MCHECK_SPRG,r8
 52	BOOKE_LOAD_MCHECK_STACK
 53	lwz	r0,GPR10-INT_FRAME_SIZE(r8)
 54	stw	r0,GPR10(r11)
 55	lwz	r0,GPR11-INT_FRAME_SIZE(r8)
 56	stw	r0,GPR11(r11)
 57	mfspr	r8,MCHECK_SPRG
 58	b	transfer_to_handler_full
 59
 60	.globl	crit_transfer_to_handler
 61crit_transfer_to_handler:
 62	mtspr	CRIT_SPRG,r8
 63	BOOKE_LOAD_CRIT_STACK
 64	lwz	r0,GPR10-INT_FRAME_SIZE(r8)
 65	stw	r0,GPR10(r11)
 66	lwz	r0,GPR11-INT_FRAME_SIZE(r8)
 67	stw	r0,GPR11(r11)
 68	mfspr	r8,CRIT_SPRG
 69	/* fall through */
 70#endif
 71
 72#ifdef CONFIG_40x
 73	.globl	crit_transfer_to_handler
 74crit_transfer_to_handler:
 75	lwz	r0,crit_r10@l(0)
 76	stw	r0,GPR10(r11)
 77	lwz	r0,crit_r11@l(0)
 78	stw	r0,GPR11(r11)
 79	/* fall through */
 80#endif
 81
 82/*
 83 * This code finishes saving the registers to the exception frame
 84 * and jumps to the appropriate handler for the exception, turning
 85 * on address translation.
 86 * Note that we rely on the caller having set cr0.eq iff the exception
 87 * occurred in kernel mode (i.e. MSR:PR = 0).
 88 */
 89	.globl	transfer_to_handler_full
 90transfer_to_handler_full:
 91	SAVE_NVGPRS(r11)
 92	/* fall through */
 93
 94	.globl	transfer_to_handler
 95transfer_to_handler:
 96	stw	r2,GPR2(r11)
 97	stw	r12,_NIP(r11)
 98	stw	r9,_MSR(r11)
 99	andi.	r2,r9,MSR_PR
100	mfctr	r12
101	mfspr	r2,SPRN_XER
102	stw	r12,_CTR(r11)
103	stw	r2,_XER(r11)
104	mfspr	r12,SPRN_SPRG3
105	addi	r2,r12,-THREAD
106	tovirt(r2,r2)			/* set r2 to current */
107	beq	2f			/* if from user, fix up THREAD.regs */
108	addi	r11,r1,STACK_FRAME_OVERHEAD
109	stw	r11,PT_REGS(r12)
110#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
111	/* Check to see if the dbcr0 register is set up to debug.  Use the
112	   single-step bit to do this. */
113	lwz	r12,THREAD_DBCR0(r12)
114	andis.	r12,r12,DBCR0_IC@h
115	beq+	3f
116	/* From user and task is ptraced - load up global dbcr0 */
117	li	r12,-1			/* clear all pending debug events */
118	mtspr	SPRN_DBSR,r12
119	lis	r11,global_dbcr0@ha
120	tophys(r11,r11)
121	addi	r11,r11,global_dbcr0@l
122	lwz	r12,0(r11)
123	mtspr	SPRN_DBCR0,r12
124	lwz	r12,4(r11)
125	addi	r12,r12,-1
126	stw	r12,4(r11)
127#endif
128	b	3f
1292:	/* if from kernel, check interrupted DOZE/NAP mode and
130         * check for stack overflow
131         */
132#ifdef CONFIG_6xx
133	mfspr	r11,SPRN_HID0
134	mtcr	r11
135BEGIN_FTR_SECTION
136	bt-	8,power_save_6xx_restore	/* Check DOZE */
137END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
138BEGIN_FTR_SECTION
139	bt-	9,power_save_6xx_restore	/* Check NAP */
140END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
141#endif /* CONFIG_6xx */
142	.globl transfer_to_handler_cont
143transfer_to_handler_cont:
144	lwz	r11,THREAD_INFO-THREAD(r12)
145	cmplw	r1,r11			/* if r1 <= current->thread_info */
146	ble-	stack_ovf		/* then the kernel stack overflowed */
1473:
148	mflr	r9
149	lwz	r11,0(r9)		/* virtual address of handler */
150	lwz	r9,4(r9)		/* where to go when done */
151	FIX_SRR1(r10,r12)
152	mtspr	SPRN_SRR0,r11
153	mtspr	SPRN_SRR1,r10
154	mtlr	r9
155	SYNC
156	RFI				/* jump to handler, enable MMU */
157
158/*
159 * On kernel stack overflow, load up an initial stack pointer
160 * and call StackOverflow(regs), which should not return.
161 */
162stack_ovf:
163	/* sometimes we use a statically-allocated stack, which is OK. */
164	lis	r11,_end@h
165	ori	r11,r11,_end@l
166	cmplw	r1,r11
167	ble	3b			/* r1 <= &_end is OK */
168	SAVE_NVGPRS(r11)
169	addi	r3,r1,STACK_FRAME_OVERHEAD
170	lis	r1,init_thread_union@ha
171	addi	r1,r1,init_thread_union@l
172	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
173	lis	r9,StackOverflow@ha
174	addi	r9,r9,StackOverflow@l
175	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
176	FIX_SRR1(r10,r12)
177	mtspr	SPRN_SRR0,r9
178	mtspr	SPRN_SRR1,r10
179	SYNC
180	RFI
181
182/*
183 * Handle a system call.
184 */
185	.stabs	"arch/ppc/kernel/",N_SO,0,0,0f
186	.stabs	"entry.S",N_SO,0,0,0f
1870:
188
189_GLOBAL(DoSyscall)
190	stw	r0,THREAD+LAST_SYSCALL(r2)
191	stw	r3,ORIG_GPR3(r1)
192	li	r12,0
193	stw	r12,RESULT(r1)
194	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
195	rlwinm	r11,r11,0,4,2
196	stw	r11,_CCR(r1)
197#ifdef SHOW_SYSCALLS
198	bl	do_show_syscall
199#endif /* SHOW_SYSCALLS */
200	rlwinm	r10,r1,0,0,18	/* current_thread_info() */
201	lwz	r11,TI_LOCAL_FLAGS(r10)
202	rlwinm	r11,r11,0,~_TIFL_FORCE_NOERROR
203	stw	r11,TI_LOCAL_FLAGS(r10)
204	lwz	r11,TI_FLAGS(r10)
205	andi.	r11,r11,_TIF_SYSCALL_TRACE
206	bne-	syscall_dotrace
207syscall_dotrace_cont:
208	cmplwi	0,r0,NR_syscalls
209	lis	r10,sys_call_table@h
210	ori	r10,r10,sys_call_table@l
211	slwi	r0,r0,2
212	bge-	66f
213	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
214	mtlr	r10
215	addi	r9,r1,STACK_FRAME_OVERHEAD
216	blrl			/* Call handler */
217	.globl	ret_from_syscall
218ret_from_syscall:
219#ifdef SHOW_SYSCALLS
220	bl	do_show_syscall_exit
221#endif
222	mr	r6,r3
223	li	r11,-_LAST_ERRNO
224	cmplw	0,r3,r11
225	rlwinm	r12,r1,0,0,18	/* current_thread_info() */
226	blt+	30f
227	lwz	r11,TI_LOCAL_FLAGS(r12)
228	andi.	r11,r11,_TIFL_FORCE_NOERROR
229	bne	30f
230	neg	r3,r3
231	lwz	r10,_CCR(r1)	/* Set SO bit in CR */
232	oris	r10,r10,0x1000
233	stw	r10,_CCR(r1)
234
235	/* disable interrupts so current_thread_info()->flags can't change */
23630:	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
237	SYNC
238	MTMSRD(r10)
239	lwz	r9,TI_FLAGS(r12)
240	andi.	r0,r9,(_TIF_SYSCALL_TRACE|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
241	bne-	syscall_exit_work
242syscall_exit_cont:
243#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
244	/* If the process has its own DBCR0 value, load it up.  The single
245	   step bit tells us that dbcr0 should be loaded. */
246	lwz	r0,THREAD+THREAD_DBCR0(r2)
247	andis.	r10,r0,DBCR0_IC@h
248	bnel-	load_dbcr0
249#endif
250	stwcx.	r0,0,r1			/* to clear the reservation */
251	lwz	r4,_LINK(r1)
252	lwz	r5,_CCR(r1)
253	mtlr	r4
254	mtcr	r5
255	lwz	r7,_NIP(r1)
256	lwz	r8,_MSR(r1)
257	FIX_SRR1(r8, r0)
258	lwz	r2,GPR2(r1)
259	lwz	r1,GPR1(r1)
260	mtspr	SPRN_SRR0,r7
261	mtspr	SPRN_SRR1,r8
262	SYNC
263	RFI
264
26566:	li	r3,-ENOSYS
266	b	ret_from_syscall
267
268	.globl	ret_from_fork
269ret_from_fork:
270	REST_NVGPRS(r1)
271	bl	schedule_tail
272	li	r3,0
273	b	ret_from_syscall
274
275/* Traced system call support */
276syscall_dotrace:
277	SAVE_NVGPRS(r1)
278	li	r0,0xc00
279	stw	r0,TRAP(r1)
280	bl	do_syscall_trace
281	lwz	r0,GPR0(r1)	/* Restore original registers */
282	lwz	r3,GPR3(r1)
283	lwz	r4,GPR4(r1)
284	lwz	r5,GPR5(r1)
285	lwz	r6,GPR6(r1)
286	lwz	r7,GPR7(r1)
287	lwz	r8,GPR8(r1)
288	REST_NVGPRS(r1)
289	b	syscall_dotrace_cont
290
291syscall_exit_work:
292	stw	r6,RESULT(r1)	/* Save result */
293	stw	r3,GPR3(r1)	/* Update return value */
294	andi.	r0,r9,_TIF_SYSCALL_TRACE
295	beq	5f
296	ori	r10,r10,MSR_EE
297	SYNC
298	MTMSRD(r10)		/* re-enable interrupts */
299	lwz	r4,TRAP(r1)
300	andi.	r4,r4,1
301	beq	4f
302	SAVE_NVGPRS(r1)
303	li	r4,0xc00
304	stw	r4,TRAP(r1)
3054:
306	bl	do_syscall_trace
307	REST_NVGPRS(r1)
3082:
309	lwz	r3,GPR3(r1)
310	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
311	SYNC
312	MTMSRD(r10)		/* disable interrupts again */
313	rlwinm	r12,r1,0,0,18	/* current_thread_info() */
314	lwz	r9,TI_FLAGS(r12)
3155:
316	andi.	r0,r9,_TIF_NEED_RESCHED
317	bne	1f
318	lwz	r5,_MSR(r1)
319	andi.	r5,r5,MSR_PR
320	beq	syscall_exit_cont
321	andi.	r0,r9,_TIF_SIGPENDING
322	beq	syscall_exit_cont
323	b	do_user_signal
3241:
325	ori	r10,r10,MSR_EE
326	SYNC
327	MTMSRD(r10)		/* re-enable interrupts */
328	bl	schedule
329	b	2b
330
331#ifdef SHOW_SYSCALLS
332do_show_syscall:
333#ifdef SHOW_SYSCALLS_TASK
334	lis	r11,show_syscalls_task@ha
335	lwz	r11,show_syscalls_task@l(r11)
336	cmp	0,r2,r11
337	bnelr
338#endif
339	stw	r31,GPR31(r1)
340	mflr	r31
341	lis	r3,7f@ha
342	addi	r3,r3,7f@l
343	lwz	r4,GPR0(r1)
344	lwz	r5,GPR3(r1)
345	lwz	r6,GPR4(r1)
346	lwz	r7,GPR5(r1)
347	lwz	r8,GPR6(r1)
348	lwz	r9,GPR7(r1)
349	bl	printk
350	lis	r3,77f@ha
351	addi	r3,r3,77f@l
352	lwz	r4,GPR8(r1)
353	mr	r5,r2
354	bl	printk
355	lwz	r0,GPR0(r1)
356	lwz	r3,GPR3(r1)
357	lwz	r4,GPR4(r1)
358	lwz	r5,GPR5(r1)
359	lwz	r6,GPR6(r1)
360	lwz	r7,GPR7(r1)
361	lwz	r8,GPR8(r1)
362	mtlr	r31
363	lwz	r31,GPR31(r1)
364	blr
365
366do_show_syscall_exit:
367#ifdef SHOW_SYSCALLS_TASK
368	lis	r11,show_syscalls_task@ha
369	lwz	r11,show_syscalls_task@l(r11)
370	cmp	0,r2,r11
371	bnelr
372#endif
373	stw	r31,GPR31(r1)
374	mflr	r31
375	stw	r3,RESULT(r1)	/* Save result */
376	mr	r4,r3
377	lis	r3,79f@ha
378	addi	r3,r3,79f@l
379	bl	printk
380	lwz	r3,RESULT(r1)
381	mtlr	r31
382	lwz	r31,GPR31(r1)
383	blr
384
3857:	.string	"syscall %d(%x, %x, %x, %x, %x, "
38677:	.string	"%x), current=%p\n"
38779:	.string	" -> %x\n"
388	.align	2,0
389
390#ifdef SHOW_SYSCALLS_TASK
391	.data
392	.globl	show_syscalls_task
393show_syscalls_task:
394	.long	-1
395	.text
396#endif
397#endif /* SHOW_SYSCALLS */
398
399/*
400 * The sigsuspend and rt_sigsuspend system calls can call do_signal
401 * and thus put the process into the stopped state where we might
402 * want to examine its user state with ptrace.  Therefore we need
403 * to save all the nonvolatile registers (r13 - r31) before calling
404 * the C code.
405 */
406	.globl	ppc_sigsuspend
407ppc_sigsuspend:
408	SAVE_NVGPRS(r1)
409	lwz	r0,TRAP(r1)
410	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
411	stw	r0,TRAP(r1)		/* register set saved */
412	b	sys_sigsuspend
413
414	.globl	ppc_rt_sigsuspend
415ppc_rt_sigsuspend:
416	SAVE_NVGPRS(r1)
417	lwz	r0,TRAP(r1)
418	rlwinm	r0,r0,0,0,30
419	stw	r0,TRAP(r1)
420	b	sys_rt_sigsuspend
421
422	.globl	ppc_fork
423ppc_fork:
424	SAVE_NVGPRS(r1)
425	lwz	r0,TRAP(r1)
426	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
427	stw	r0,TRAP(r1)		/* register set saved */
428	b	sys_fork
429
430	.globl	ppc_vfork
431ppc_vfork:
432	SAVE_NVGPRS(r1)
433	lwz	r0,TRAP(r1)
434	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
435	stw	r0,TRAP(r1)		/* register set saved */
436	b	sys_vfork
437
438	.globl	ppc_clone
439ppc_clone:
440	SAVE_NVGPRS(r1)
441	lwz	r0,TRAP(r1)
442	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
443	stw	r0,TRAP(r1)		/* register set saved */
444	b	sys_clone
445
446	.globl	ppc_swapcontext
447ppc_swapcontext:
448	SAVE_NVGPRS(r1)
449	lwz	r0,TRAP(r1)
450	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
451	stw	r0,TRAP(r1)		/* register set saved */
452	b	sys_swapcontext
453
454/*
455 * Top-level page fault handling.
456 * This is in assembler because if do_page_fault tells us that
457 * it is a bad kernel page fault, we want to save the non-volatile
458 * registers before calling bad_page_fault.
459 */
460	.globl	handle_page_fault
461handle_page_fault:
462	stw	r4,_DAR(r1)
463	addi	r3,r1,STACK_FRAME_OVERHEAD
464	bl	do_page_fault
465	cmpwi	r3,0
466	beq+	ret_from_except
467	SAVE_NVGPRS(r1)
468	lwz	r0,TRAP(r1)
469	clrrwi	r0,r0,1
470	stw	r0,TRAP(r1)
471	mr	r5,r3
472	addi	r3,r1,STACK_FRAME_OVERHEAD
473	lwz	r4,_DAR(r1)
474	bl	bad_page_fault
475	b	ret_from_except_full
476
477/*
478 * This routine switches between two different tasks.  The process
479 * state of one is saved on its kernel stack.  Then the state
480 * of the other is restored from its kernel stack.  The memory
481 * management hardware is updated to the second process's state.
482 * Finally, we can return to the second process.
483 * On entry, r3 points to the THREAD for the current task, r4
484 * points to the THREAD for the new task.
485 *
486 * This routine is always called with interrupts disabled.
487 *
488 * Note: there are two ways to get to the "going out" portion
489 * of this code; either by coming in via the entry (_switch)
490 * or via "fork" which must set up an environment equivalent
491 * to the "_switch" path.  If you change this , you'll have to
492 * change the fork code also.
493 *
494 * The code which creates the new task context is in 'copy_thread'
495 * in arch/ppc/kernel/process.c
496 */
497_GLOBAL(_switch)
498	stwu	r1,-INT_FRAME_SIZE(r1)
499	mflr	r0
500	stw	r0,INT_FRAME_SIZE+4(r1)
501	/* r3-r12 are caller saved -- Cort */
502	SAVE_NVGPRS(r1)
503	stw	r0,_NIP(r1)	/* Return to switch caller */
504	mfmsr	r11
505	li	r0,MSR_FP	/* Disable floating-point */
506#ifdef CONFIG_ALTIVEC
507BEGIN_FTR_SECTION
508	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
509	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
510	stw	r12,THREAD+THREAD_VRSAVE(r2)
511END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
512#endif /* CONFIG_ALTIVEC */
513#ifdef CONFIG_SPE
514	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
515	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
516	stw	r12,THREAD+THREAD_SPEFSCR(r2)
517#endif /* CONFIG_SPE */
518	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
519	beq+	1f
520	andc	r11,r11,r0
521	MTMSRD(r11)
522	isync
5231:	stw	r11,_MSR(r1)
524	mfcr	r10
525	stw	r10,_CCR(r1)
526	stw	r1,KSP(r3)	/* Set old stack pointer */
527
528#ifdef CONFIG_SMP
529	/* We need a sync somewhere here to make sure that if the
530	 * previous task gets rescheduled on another CPU, it sees all
531	 * stores it has performed on this one.
532	 */
533	sync
534#endif /* CONFIG_SMP */
535
536	tophys(r0,r4)
537	CLR_TOP32(r0)
538	mtspr	SPRN_SPRG3,r0	/* Update current THREAD phys addr */
539	lwz	r1,KSP(r4)	/* Load new stack pointer */
540
541	/* save the old current 'last' for return value */
542	mr	r3,r2
543	addi	r2,r4,-THREAD	/* Update current */
544
545#ifdef CONFIG_ALTIVEC
546BEGIN_FTR_SECTION
547	lwz	r0,THREAD+THREAD_VRSAVE(r2)
548	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
549END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
550#endif /* CONFIG_ALTIVEC */
551#ifdef CONFIG_SPE
552	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
553	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
554#endif /* CONFIG_SPE */
555
556	lwz	r0,_CCR(r1)
557	mtcrf	0xFF,r0
558	/* r3-r12 are destroyed -- Cort */
559	REST_NVGPRS(r1)
560
561	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
562	mtlr	r4
563	addi	r1,r1,INT_FRAME_SIZE
564	blr
565
566	.globl	sigreturn_exit
567sigreturn_exit:
568	subi	r1,r3,STACK_FRAME_OVERHEAD
569	rlwinm	r12,r1,0,0,18	/* current_thread_info() */
570	lwz	r9,TI_FLAGS(r12)
571	andi.	r0,r9,_TIF_SYSCALL_TRACE
572	bnel-	do_syscall_trace
573	/* fall through */
574
575	.globl	ret_from_except_full
576ret_from_except_full:
577	REST_NVGPRS(r1)
578	/* fall through */
579
580	.globl	ret_from_except
581ret_from_except:
582	/* Hard-disable interrupts so that current_thread_info()->flags
583	 * can't change between when we test it and when we return
584	 * from the interrupt. */
585	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
586	SYNC			/* Some chip revs have problems here... */
587	MTMSRD(r10)		/* disable interrupts */
588
589	lwz	r3,_MSR(r1)	/* Returning to user mode? */
590	andi.	r0,r3,MSR_PR
591	beq	resume_kernel
592
593user_exc_return:		/* r10 contains MSR_KERNEL here */
594	/* Check current_thread_info()->flags */
595	rlwinm	r9,r1,0,0,18
596	lwz	r9,TI_FLAGS(r9)
597	andi.	r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
598	bne	do_work
599
600restore_user:
601#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
602	/* Check whether this process has its own DBCR0 value.  The single
603	   step bit tells us that dbcr0 should be loaded. */
604	lwz	r0,THREAD+THREAD_DBCR0(r2)
605	andis.	r10,r0,DBCR0_IC@h
606	bnel-	load_dbcr0
607#endif
608
609#ifdef CONFIG_PREEMPT
610	b	restore
611
612/* N.B. the only way to get here is from the beq following ret_from_except. */
613resume_kernel:
614	/* check current_thread_info->preempt_count */
615	rlwinm	r9,r1,0,0,18
616	lwz	r0,TI_PREEMPT(r9)
617	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
618	bne	restore
619	lwz	r0,TI_FLAGS(r9)
620	andi.	r0,r0,_TIF_NEED_RESCHED
621	beq+	restore
622	andi.	r0,r3,MSR_EE	/* interrupts off? */
623	beq	restore		/* don't schedule if so */
6241:	bl	preempt_schedule_irq
625	rlwinm	r9,r1,0,0,18
626	lwz	r3,TI_FLAGS(r9)
627	andi.	r0,r3,_TIF_NEED_RESCHED
628	bne-	1b
629#else
630resume_kernel:
631#endif /* CONFIG_PREEMPT */
632
633	/* interrupts are hard-disabled at this point */
634restore:
635	lwz	r0,GPR0(r1)
636	lwz	r2,GPR2(r1)
637	REST_4GPRS(3, r1)
638	REST_2GPRS(7, r1)
639
640	lwz	r10,_XER(r1)
641	lwz	r11,_CTR(r1)
642	mtspr	SPRN_XER,r10
643	mtctr	r11
644
645	PPC405_ERR77(0,r1)
646	stwcx.	r0,0,r1			/* to clear the reservation */
647
648#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
649	lwz	r9,_MSR(r1)
650	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
651	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
652
653	lwz	r10,_CCR(r1)
654	lwz	r11,_LINK(r1)
655	mtcrf	0xFF,r10
656	mtlr	r11
657
658	/*
659	 * Once we put values in SRR0 and SRR1, we are in a state
660	 * where exceptions are not recoverable, since taking an
661	 * exception will trash SRR0 and SRR1.  Therefore we clear the
662	 * MSR:RI bit to indicate this.  If we do take an exception,
663	 * we can't return to the point of the exception but we
664	 * can restart the exception exit path at the label
665	 * exc_exit_restart below.  -- paulus
666	 */
667	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
668	SYNC
669	MTMSRD(r10)		/* clear the RI bit */
670	.globl exc_exit_restart
671exc_exit_restart:
672	lwz	r9,_MSR(r1)
673	lwz	r12,_NIP(r1)
674	FIX_SRR1(r9,r10)
675	mtspr	SPRN_SRR0,r12
676	mtspr	SPRN_SRR1,r9
677	REST_4GPRS(9, r1)
678	lwz	r1,GPR1(r1)
679	.globl exc_exit_restart_end
680exc_exit_restart_end:
681	SYNC
682	RFI
683
684#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
685	/*
686	 * This is a bit different on 4xx/Book-E because it doesn't have
687	 * the RI bit in the MSR.
688	 * The TLB miss handler checks if we have interrupted
689	 * the exception exit path and restarts it if so
690	 * (well maybe one day it will... :).
691	 */
692	lwz	r11,_LINK(r1)
693	mtlr	r11
694	lwz	r10,_CCR(r1)
695	mtcrf	0xff,r10
696	REST_2GPRS(9, r1)
697	.globl exc_exit_restart
698exc_exit_restart:
699	lwz	r11,_NIP(r1)
700	lwz	r12,_MSR(r1)
701exc_exit_start:
702	mtspr	SPRN_SRR0,r11
703	mtspr	SPRN_SRR1,r12
704	REST_2GPRS(11, r1)
705	lwz	r1,GPR1(r1)
706	.globl exc_exit_restart_end
707exc_exit_restart_end:
708	PPC405_ERR77_SYNC
709	rfi
710	b	.			/* prevent prefetch past rfi */
711
712/*
713 * Returning from a critical interrupt in user mode doesn't need
714 * to be any different from a normal exception.  For a critical
715 * interrupt in the kernel, we just return (without checking for
716 * preemption) since the interrupt may have happened at some crucial
717 * place (e.g. inside the TLB miss handler), and because we will be
718 * running with r1 pointing into critical_stack, not the current
719 * process's kernel stack (and therefore current_thread_info() will
720 * give the wrong answer).
721 * We have to restore various SPRs that may have been in use at the
722 * time of the critical interrupt.
723 *
724 */
725	.globl	ret_from_crit_exc
726ret_from_crit_exc:
727	REST_NVGPRS(r1)
728	lwz	r3,_MSR(r1)
729	andi.	r3,r3,MSR_PR
730	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
731	bne	user_exc_return
732
733	lwz	r0,GPR0(r1)
734	lwz	r2,GPR2(r1)
735	REST_4GPRS(3, r1)
736	REST_2GPRS(7, r1)
737
738	lwz	r10,_XER(r1)
739	lwz	r11,_CTR(r1)
740	mtspr	SPRN_XER,r10
741	mtctr	r11
742
743	PPC405_ERR77(0,r1)
744	stwcx.	r0,0,r1			/* to clear the reservation */
745
746	lwz	r11,_LINK(r1)
747	mtlr	r11
748	lwz	r10,_CCR(r1)
749	mtcrf	0xff,r10
750#ifdef CONFIG_40x
751	/* avoid any possible TLB misses here by turning off MSR.DR, we
752	 * assume the instructions here are mapped by a pinned TLB entry */
753	li	r10,MSR_IR
754	mtmsr	r10
755	isync
756	tophys(r1, r1)
757#endif
758	lwz	r9,_DEAR(r1)
759	lwz	r10,_ESR(r1)
760	mtspr	SPRN_DEAR,r9
761	mtspr	SPRN_ESR,r10
762	lwz	r11,_NIP(r1)
763	lwz	r12,_MSR(r1)
764	mtspr	SPRN_CSRR0,r11
765	mtspr	SPRN_CSRR1,r12
766	lwz	r9,GPR9(r1)
767	lwz	r12,GPR12(r1)
768	lwz	r10,GPR10(r1)
769	lwz	r11,GPR11(r1)
770	lwz	r1,GPR1(r1)
771	PPC405_ERR77_SYNC
772	rfci
773	b	.		/* prevent prefetch past rfci */
774
775#ifdef CONFIG_BOOKE
776/*
777 * Return from a machine check interrupt, similar to a critical
778 * interrupt.
779 */
780	.globl	ret_from_mcheck_exc
781ret_from_mcheck_exc:
782	REST_NVGPRS(r1)
783	lwz	r3,_MSR(r1)
784	andi.	r3,r3,MSR_PR
785	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
786	bne	user_exc_return
787
788	lwz	r0,GPR0(r1)
789	lwz	r2,GPR2(r1)
790	REST_4GPRS(3, r1)
791	REST_2GPRS(7, r1)
792
793	lwz	r10,_XER(r1)
794	lwz	r11,_CTR(r1)
795	mtspr	SPRN_XER,r10
796	mtctr	r11
797
798	stwcx.	r0,0,r1			/* to clear the reservation */
799
800	lwz	r11,_LINK(r1)
801	mtlr	r11
802	lwz	r10,_CCR(r1)
803	mtcrf	0xff,r10
804	lwz	r9,_DEAR(r1)
805	lwz	r10,_ESR(r1)
806	mtspr	SPRN_DEAR,r9
807	mtspr	SPRN_ESR,r10
808	lwz	r11,_NIP(r1)
809	lwz	r12,_MSR(r1)
810	mtspr	SPRN_MCSRR0,r11
811	mtspr	SPRN_MCSRR1,r12
812	lwz	r9,GPR9(r1)
813	lwz	r12,GPR12(r1)
814	lwz	r10,GPR10(r1)
815	lwz	r11,GPR11(r1)
816	lwz	r1,GPR1(r1)
817	RFMCI
818#endif /* CONFIG_BOOKE */
819
820/*
821 * Load the DBCR0 value for a task that is being ptraced,
822 * having first saved away the global DBCR0.  Note that r0
823 * has the dbcr0 value to set upon entry to this.
824 */
825load_dbcr0:
826	mfmsr	r10		/* first disable debug exceptions */
827	rlwinm	r10,r10,0,~MSR_DE
828	mtmsr	r10
829	isync
830	mfspr	r10,SPRN_DBCR0
831	lis	r11,global_dbcr0@ha
832	addi	r11,r11,global_dbcr0@l
833	stw	r10,0(r11)
834	mtspr	SPRN_DBCR0,r0
835	lwz	r10,4(r11)
836	addi	r10,r10,1
837	stw	r10,4(r11)
838	li	r11,-1
839	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
840	blr
841
842	.comm	global_dbcr0,8
843#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
844
845do_work:			/* r10 contains MSR_KERNEL here */
846	andi.	r0,r9,_TIF_NEED_RESCHED
847	beq	do_user_signal
848
849do_resched:			/* r10 contains MSR_KERNEL here */
850	ori	r10,r10,MSR_EE
851	SYNC
852	MTMSRD(r10)		/* hard-enable interrupts */
853	bl	schedule
854recheck:
855	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
856	SYNC
857	MTMSRD(r10)		/* disable interrupts */
858	rlwinm	r9,r1,0,0,18
859	lwz	r9,TI_FLAGS(r9)
860	andi.	r0,r9,_TIF_NEED_RESCHED
861	bne-	do_resched
862	andi.	r0,r9,_TIF_SIGPENDING
863	beq	restore_user
864do_user_signal:			/* r10 contains MSR_KERNEL here */
865	ori	r10,r10,MSR_EE
866	SYNC
867	MTMSRD(r10)		/* hard-enable interrupts */
868	/* save r13-r31 in the exception frame, if not already done */
869	lwz	r3,TRAP(r1)
870	andi.	r0,r3,1
871	beq	2f
872	SAVE_NVGPRS(r1)
873	rlwinm	r3,r3,0,0,30
874	stw	r3,TRAP(r1)
8752:	li	r3,0
876	addi	r4,r1,STACK_FRAME_OVERHEAD
877	bl	do_signal
878	REST_NVGPRS(r1)
879	b	recheck
880
881/*
882 * We come here when we are at the end of handling an exception
883 * that occurred at a place where taking an exception will lose
884 * state information, such as the contents of SRR0 and SRR1.
885 */
886nonrecoverable:
887	lis	r10,exc_exit_restart_end@ha
888	addi	r10,r10,exc_exit_restart_end@l
889	cmplw	r12,r10
890	bge	3f
891	lis	r11,exc_exit_restart@ha
892	addi	r11,r11,exc_exit_restart@l
893	cmplw	r12,r11
894	blt	3f
895	lis	r10,ee_restarts@ha
896	lwz	r12,ee_restarts@l(r10)
897	addi	r12,r12,1
898	stw	r12,ee_restarts@l(r10)
899	mr	r12,r11		/* restart at exc_exit_restart */
900	blr
9013:	/* OK, we can't recover, kill this process */
902	/* but the 601 doesn't implement the RI bit, so assume it's OK */
903BEGIN_FTR_SECTION
904	blr
905END_FTR_SECTION_IFSET(CPU_FTR_601)
906	lwz	r3,TRAP(r1)
907	andi.	r0,r3,1
908	beq	4f
909	SAVE_NVGPRS(r1)
910	rlwinm	r3,r3,0,0,30
911	stw	r3,TRAP(r1)
9124:	addi	r3,r1,STACK_FRAME_OVERHEAD
913	bl	nonrecoverable_exception
914	/* shouldn't return */
915	b	4b
916
917	.comm	ee_restarts,4
918
919/*
920 * PROM code for specific machines follows.  Put it
921 * here so it's easy to add arch-specific sections later.
922 * -- Cort
923 */
924#ifdef CONFIG_PPC_OF
925/*
926 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
927 * called with the MMU off.
928 */
929_GLOBAL(enter_rtas)
930	stwu	r1,-INT_FRAME_SIZE(r1)
931	mflr	r0
932	stw	r0,INT_FRAME_SIZE+4(r1)
933	lis	r4,rtas_data@ha
934	lwz	r4,rtas_data@l(r4)
935	lis	r6,1f@ha	/* physical return address for rtas */
936	addi	r6,r6,1f@l
937	tophys(r6,r6)
938	tophys(r7,r1)
939	lis	r8,rtas_entry@ha
940	lwz	r8,rtas_entry@l(r8)
941	mfmsr	r9
942	stw	r9,8(r1)
943	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
944	SYNC			/* disable interrupts so SRR0/1 */
945	MTMSRD(r0)		/* don't get trashed */
946	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
947	mtlr	r6
948	CLR_TOP32(r7)
949	mtspr	SPRN_SPRG2,r7
950	mtspr	SPRN_SRR0,r8
951	mtspr	SPRN_SRR1,r9
952	RFI
9531:	tophys(r9,r1)
954	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
955	lwz	r9,8(r9)	/* original msr value */
956	FIX_SRR1(r9,r0)
957	addi	r1,r1,INT_FRAME_SIZE
958	li	r0,0
959	mtspr	SPRN_SPRG2,r0
960	mtspr	SPRN_SRR0,r8
961	mtspr	SPRN_SRR1,r9
962	RFI			/* return to caller */
963
964	.globl	machine_check_in_rtas
965machine_check_in_rtas:
966	twi	31,0,0
967	/* XXX load up BATs and panic */
968
969#endif /* CONFIG_PPC_OF */