PageRenderTime 85ms CodeModel.GetById 22ms app.highlight 51ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/ppc/kernel/head.S

https://bitbucket.org/evzijst/gittest
Assembly | 1710 lines | 1666 code | 44 blank | 0 comment | 32 complexity | 9953012ebfc6ccb82734353e76dcafcb MD5 | raw file
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
   6 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
   7 *  Adapted for Power Macintosh by Paul Mackerras.
   8 *  Low-level exception handlers and MMU support
   9 *  rewritten by Paul Mackerras.
  10 *    Copyright (C) 1996 Paul Mackerras.
  11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  12 *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
  13 *
  14 *  This file contains the low-level support and setup for the
  15 *  PowerPC platform, including trap and interrupt dispatch.
  16 *  (The PPC 8xx embedded CPUs use head_8xx.S instead.)
  17 *
  18 *  This program is free software; you can redistribute it and/or
  19 *  modify it under the terms of the GNU General Public License
  20 *  as published by the Free Software Foundation; either version
  21 *  2 of the License, or (at your option) any later version.
  22 *
  23 */
  24
  25#include <linux/config.h>
  26#include <asm/processor.h>
  27#include <asm/page.h>
  28#include <asm/mmu.h>
  29#include <asm/pgtable.h>
  30#include <asm/cputable.h>
  31#include <asm/cache.h>
  32#include <asm/thread_info.h>
  33#include <asm/ppc_asm.h>
  34#include <asm/offsets.h>
  35
  36#ifdef CONFIG_APUS
  37#include <asm/amigappc.h>
  38#endif
  39
  40#ifdef CONFIG_PPC64BRIDGE
  41#define LOAD_BAT(n, reg, RA, RB)	\
  42	ld	RA,(n*32)+0(reg);	\
  43	ld	RB,(n*32)+8(reg);	\
  44	mtspr	SPRN_IBAT##n##U,RA;	\
  45	mtspr	SPRN_IBAT##n##L,RB;	\
  46	ld	RA,(n*32)+16(reg);	\
  47	ld	RB,(n*32)+24(reg);	\
  48	mtspr	SPRN_DBAT##n##U,RA;	\
  49	mtspr	SPRN_DBAT##n##L,RB;	\
  50
  51#else /* CONFIG_PPC64BRIDGE */
  52
  53/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
  54#define LOAD_BAT(n, reg, RA, RB)	\
  55	/* see the comment for clear_bats() -- Cort */ \
  56	li	RA,0;			\
  57	mtspr	SPRN_IBAT##n##U,RA;	\
  58	mtspr	SPRN_DBAT##n##U,RA;	\
  59	lwz	RA,(n*16)+0(reg);	\
  60	lwz	RB,(n*16)+4(reg);	\
  61	mtspr	SPRN_IBAT##n##U,RA;	\
  62	mtspr	SPRN_IBAT##n##L,RB;	\
  63	beq	1f;			\
  64	lwz	RA,(n*16)+8(reg);	\
  65	lwz	RB,(n*16)+12(reg);	\
  66	mtspr	SPRN_DBAT##n##U,RA;	\
  67	mtspr	SPRN_DBAT##n##L,RB;	\
  681:
  69#endif /* CONFIG_PPC64BRIDGE */
  70
  71	.text
  72	.stabs	"arch/ppc/kernel/",N_SO,0,0,0f
  73	.stabs	"head.S",N_SO,0,0,0f
  740:
  75	.globl	_stext
  76_stext:
  77
  78/*
  79 * _start is defined this way because the XCOFF loader in the OpenFirmware
  80 * on the powermac expects the entry point to be a procedure descriptor.
  81 */
  82	.text
  83	.globl	_start
  84_start:
  85	/*
  86	 * These are here for legacy reasons, the kernel used to
  87	 * need to look like a coff function entry for the pmac
  88	 * but we're always started by some kind of bootloader now.
  89	 *  -- Cort
  90	 */
  91	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
  92	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
  93	nop
  94
  95/* PMAC
  96 * Enter here with the kernel text, data and bss loaded starting at
  97 * 0, running with virtual == physical mapping.
  98 * r5 points to the prom entry point (the client interface handler
  99 * address).  Address translation is turned on, with the prom
 100 * managing the hash table.  Interrupts are disabled.  The stack
 101 * pointer (r1) points to just below the end of the half-meg region
 102 * from 0x380000 - 0x400000, which is mapped in already.
 103 *
 104 * If we are booted from MacOS via BootX, we enter with the kernel
 105 * image loaded somewhere, and the following values in registers:
 106 *  r3: 'BooX' (0x426f6f58)
 107 *  r4: virtual address of boot_infos_t
 108 *  r5: 0
 109 *
 110 * APUS
 111 *   r3: 'APUS'
 112 *   r4: physical address of memory base
 113 *   Linux/m68k style BootInfo structure at &_end.
 114 *
 115 * PREP
 116 * This is jumped to on prep systems right after the kernel is relocated
 117 * to its proper place in memory by the boot loader.  The expected layout
 118 * of the regs is:
 119 *   r3: ptr to residual data
 120 *   r4: initrd_start or if no initrd then 0
 121 *   r5: initrd_end - unused if r4 is 0
 122 *   r6: Start of command line string
 123 *   r7: End of command line string
 124 *
 125 * This just gets a minimal mmu environment setup so we can call
 126 * start_here() to do the real work.
 127 * -- Cort
 128 */
 129
 130	.globl	__start
 131__start:
 132/*
 133 * We have to do any OF calls before we map ourselves to KERNELBASE,
 134 * because OF may have I/O devices mapped into that area
 135 * (particularly on CHRP).
 136 */
 137	mr	r31,r3			/* save parameters */
 138	mr	r30,r4
 139	mr	r29,r5
 140	mr	r28,r6
 141	mr	r27,r7
 142	li	r24,0			/* cpu # */
 143
 144/*
 145 * early_init() does the early machine identification and does
 146 * the necessary low-level setup and clears the BSS
 147 *  -- Cort <cort@fsmlabs.com>
 148 */
 149	bl	early_init
 150
 151/*
 152 * On POWER4, we first need to tweak some CPU configuration registers
 153 * like real mode cache inhibit or exception base
 154 */
 155#ifdef CONFIG_POWER4
 156	bl	__970_cpu_preinit
 157#endif /* CONFIG_POWER4 */
 158
 159#ifdef CONFIG_APUS
 160/* On APUS the __va/__pa constants need to be set to the correct
 161 * values before continuing.
 162 */
 163	mr	r4,r30
 164	bl	fix_mem_constants
 165#endif /* CONFIG_APUS */
 166
 167/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
 168 * the physical address we are running at, returned by early_init()
 169 */
 170 	bl	mmu_off
 171__after_mmu_off:
 172#ifndef CONFIG_POWER4
 173	bl	clear_bats
 174	bl	flush_tlbs
 175
 176	bl	initial_bats
 177#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT)
 178	bl	setup_disp_bat
 179#endif
 180#else /* CONFIG_POWER4 */
 181	bl	reloc_offset
 182	bl	initial_mm_power4
 183#endif /* CONFIG_POWER4 */
 184
 185/*
 186 * Call setup_cpu for CPU 0 and initialize 6xx Idle
 187 */
 188	bl	reloc_offset
 189	li	r24,0			/* cpu# */
 190	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
 191#ifdef CONFIG_6xx
 192	bl	reloc_offset
 193	bl	init_idle_6xx
 194#endif /* CONFIG_6xx */
 195#ifdef CONFIG_POWER4
 196	bl	reloc_offset
 197	bl	init_idle_power4
 198#endif /* CONFIG_POWER4 */
 199
 200
 201#ifndef CONFIG_APUS
 202/*
 203 * We need to run with _start at physical address 0.
 204 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
 205 * the exception vectors at 0 (and therefore this copy
 206 * overwrites OF's exception vectors with our own).
 207 * If the MMU is already turned on, we copy stuff to KERNELBASE,
 208 * otherwise we copy it to 0.
 209 */
 210	bl	reloc_offset
 211	mr	r26,r3
 212	addis	r4,r3,KERNELBASE@h	/* current address of _start */
 213	cmpwi	0,r4,0			/* are we already running at 0? */
 214	bne	relocate_kernel
 215#endif /* CONFIG_APUS */
 216/*
 217 * we now have the 1st 16M of ram mapped with the bats.
 218 * prep needs the mmu to be turned on here, but pmac already has it on.
 219 * this shouldn't bother the pmac since it just gets turned on again
 220 * as we jump to our code at KERNELBASE. -- Cort
 221 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
 222 * off, and in other cases, we now turn it off before changing BATs above.
 223 */
 224turn_on_mmu:
 225	mfmsr	r0
 226	ori	r0,r0,MSR_DR|MSR_IR
 227	mtspr	SPRN_SRR1,r0
 228	lis	r0,start_here@h
 229	ori	r0,r0,start_here@l
 230	mtspr	SPRN_SRR0,r0
 231	SYNC
 232	RFI				/* enables MMU */
 233
 234/*
 235 * We need __secondary_hold as a place to hold the other cpus on
 236 * an SMP machine, even when we are running a UP kernel.
 237 */
 238	. = 0xc0			/* for prep bootloader */
 239	li	r3,1			/* MTX only has 1 cpu */
 240	.globl	__secondary_hold
 241__secondary_hold:
 242	/* tell the master we're here */
 243	stw	r3,4(0)
 244#ifdef CONFIG_SMP
 245100:	lwz	r4,0(0)
 246	/* wait until we're told to start */
 247	cmpw	0,r4,r3
 248	bne	100b
 249	/* our cpu # was at addr 0 - go */
 250	mr	r24,r3			/* cpu # */
 251	b	__secondary_start
 252#else
 253	b	.
 254#endif /* CONFIG_SMP */
 255
 256/*
 257 * Exception entry code.  This code runs with address translation
 258 * turned off, i.e. using physical addresses.
 259 * We assume sprg3 has the physical address of the current
 260 * task's thread_struct.
 261 */
 262#define EXCEPTION_PROLOG	\
 263	mtspr	SPRN_SPRG0,r10;	\
 264	mtspr	SPRN_SPRG1,r11;	\
 265	mfcr	r10;		\
 266	EXCEPTION_PROLOG_1;	\
 267	EXCEPTION_PROLOG_2
 268
 269#define EXCEPTION_PROLOG_1	\
 270	mfspr	r11,SPRN_SRR1;		/* check whether user or kernel */ \
 271	andi.	r11,r11,MSR_PR;	\
 272	tophys(r11,r1);			/* use tophys(r1) if kernel */ \
 273	beq	1f;		\
 274	mfspr	r11,SPRN_SPRG3;	\
 275	lwz	r11,THREAD_INFO-THREAD(r11);	\
 276	addi	r11,r11,THREAD_SIZE;	\
 277	tophys(r11,r11);	\
 2781:	subi	r11,r11,INT_FRAME_SIZE	/* alloc exc. frame */
 279
 280
 281#define EXCEPTION_PROLOG_2	\
 282	CLR_TOP32(r11);		\
 283	stw	r10,_CCR(r11);		/* save registers */ \
 284	stw	r12,GPR12(r11);	\
 285	stw	r9,GPR9(r11);	\
 286	mfspr	r10,SPRN_SPRG0;	\
 287	stw	r10,GPR10(r11);	\
 288	mfspr	r12,SPRN_SPRG1;	\
 289	stw	r12,GPR11(r11);	\
 290	mflr	r10;		\
 291	stw	r10,_LINK(r11);	\
 292	mfspr	r12,SPRN_SRR0;	\
 293	mfspr	r9,SPRN_SRR1;	\
 294	stw	r1,GPR1(r11);	\
 295	stw	r1,0(r11);	\
 296	tovirt(r1,r11);			/* set new kernel sp */	\
 297	li	r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
 298	MTMSRD(r10);			/* (except for mach check in rtas) */ \
 299	stw	r0,GPR0(r11);	\
 300	SAVE_4GPRS(3, r11);	\
 301	SAVE_2GPRS(7, r11)
 302
 303/*
 304 * Note: code which follows this uses cr0.eq (set if from kernel),
 305 * r11, r12 (SRR0), and r9 (SRR1).
 306 *
 307 * Note2: once we have set r1 we are in a position to take exceptions
 308 * again, and we could thus set MSR:RI at that point.
 309 */
 310
 311/*
 312 * Exception vectors.
 313 */
 314#define EXCEPTION(n, label, hdlr, xfer)		\
 315	. = n;					\
 316label:						\
 317	EXCEPTION_PROLOG;			\
 318	addi	r3,r1,STACK_FRAME_OVERHEAD;	\
 319	xfer(n, hdlr)
 320
 321#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret)	\
 322	li	r10,trap;					\
 323	stw	r10,TRAP(r11);					\
 324	li	r10,MSR_KERNEL;					\
 325	copyee(r10, r9);					\
 326	bl	tfer;						\
 327i##n:								\
 328	.long	hdlr;						\
 329	.long	ret
 330
 331#define COPY_EE(d, s)		rlwimi d,s,0,16,16
 332#define NOCOPY(d, s)
 333
 334#define EXC_XFER_STD(n, hdlr)		\
 335	EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full,	\
 336			  ret_from_except_full)
 337
 338#define EXC_XFER_LITE(n, hdlr)		\
 339	EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
 340			  ret_from_except)
 341
 342#define EXC_XFER_EE(n, hdlr)		\
 343	EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
 344			  ret_from_except_full)
 345
 346#define EXC_XFER_EE_LITE(n, hdlr)	\
 347	EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
 348			  ret_from_except)
 349
 350/* System reset */
 351/* core99 pmac starts the seconary here by changing the vector, and
 352   putting it back to what it was (UnknownException) when done.  */
 353#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP)
 354	. = 0x100
 355	b	__secondary_start_gemini
 356#else
 357	EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD)
 358#endif
 359
 360/* Machine check */
 361/*
 362 * On CHRP, this is complicated by the fact that we could get a
 363 * machine check inside RTAS, and we have no guarantee that certain
 364 * critical registers will have the values we expect.  The set of
 365 * registers that might have bad values includes all the GPRs
 366 * and all the BATs.  We indicate that we are in RTAS by putting
 367 * a non-zero value, the address of the exception frame to use,
 368 * in SPRG2.  The machine check handler checks SPRG2 and uses its
 369 * value if it is non-zero.  If we ever needed to free up SPRG2,
 370 * we could use a field in the thread_info or thread_struct instead.
 371 * (Other exception handlers assume that r1 is a valid kernel stack
 372 * pointer when we take an exception from supervisor mode.)
 373 *	-- paulus.
 374 */
 375	. = 0x200
 376	mtspr	SPRN_SPRG0,r10
 377	mtspr	SPRN_SPRG1,r11
 378	mfcr	r10
 379#ifdef CONFIG_PPC_CHRP
 380	mfspr	r11,SPRN_SPRG2
 381	cmpwi	0,r11,0
 382	bne	7f
 383#endif /* CONFIG_PPC_CHRP */
 384	EXCEPTION_PROLOG_1
 3857:	EXCEPTION_PROLOG_2
 386	addi	r3,r1,STACK_FRAME_OVERHEAD
 387#ifdef CONFIG_PPC_CHRP
 388	mfspr	r4,SPRN_SPRG2
 389	cmpwi	cr1,r4,0
 390	bne	cr1,1f
 391#endif
 392	EXC_XFER_STD(0x200, MachineCheckException)
 393#ifdef CONFIG_PPC_CHRP
 3941:	b	machine_check_in_rtas
 395#endif
 396
 397/* Data access exception. */
 398	. = 0x300
 399#ifdef CONFIG_PPC64BRIDGE
 400	b	DataAccess
 401DataAccessCont:
 402#else
 403DataAccess:
 404	EXCEPTION_PROLOG
 405#endif /* CONFIG_PPC64BRIDGE */
 406	mfspr	r10,SPRN_DSISR
 407	andis.	r0,r10,0xa470		/* weird error? */
 408	bne	1f			/* if not, try to put a PTE */
 409	mfspr	r4,SPRN_DAR		/* into the hash table */
 410	rlwinm	r3,r10,32-15,21,21	/* DSISR_STORE -> _PAGE_RW */
 411	bl	hash_page
 4121:	stw	r10,_DSISR(r11)
 413	mr	r5,r10
 414	mfspr	r4,SPRN_DAR
 415	EXC_XFER_EE_LITE(0x300, handle_page_fault)
 416
 417#ifdef CONFIG_PPC64BRIDGE
 418/* SLB fault on data access. */
 419	. = 0x380
 420	b	DataSegment
 421#endif /* CONFIG_PPC64BRIDGE */
 422
 423/* Instruction access exception. */
 424	. = 0x400
 425#ifdef CONFIG_PPC64BRIDGE
 426	b	InstructionAccess
 427InstructionAccessCont:
 428#else
 429InstructionAccess:
 430	EXCEPTION_PROLOG
 431#endif /* CONFIG_PPC64BRIDGE */
 432	andis.	r0,r9,0x4000		/* no pte found? */
 433	beq	1f			/* if so, try to put a PTE */
 434	li	r3,0			/* into the hash table */
 435	mr	r4,r12			/* SRR0 is fault address */
 436	bl	hash_page
 4371:	mr	r4,r12
 438	mr	r5,r9
 439	EXC_XFER_EE_LITE(0x400, handle_page_fault)
 440
 441#ifdef CONFIG_PPC64BRIDGE
 442/* SLB fault on instruction access. */
 443	. = 0x480
 444	b	InstructionSegment
 445#endif /* CONFIG_PPC64BRIDGE */
 446
 447/* External interrupt */
 448	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
 449
 450/* Alignment exception */
 451	. = 0x600
 452Alignment:
 453	EXCEPTION_PROLOG
 454	mfspr	r4,SPRN_DAR
 455	stw	r4,_DAR(r11)
 456	mfspr	r5,SPRN_DSISR
 457	stw	r5,_DSISR(r11)
 458	addi	r3,r1,STACK_FRAME_OVERHEAD
 459	EXC_XFER_EE(0x600, AlignmentException)
 460
 461/* Program check exception */
 462	EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD)
 463
 464/* Floating-point unavailable */
 465	. = 0x800
 466FPUnavailable:
 467	EXCEPTION_PROLOG
 468	bne	load_up_fpu		/* if from user, just load it up */
 469	addi	r3,r1,STACK_FRAME_OVERHEAD
 470	EXC_XFER_EE_LITE(0x800, KernelFP)
 471
 472/* Decrementer */
 473	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
 474
 475	EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE)
 476	EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE)
 477
 478/* System call */
 479	. = 0xc00
 480SystemCall:
 481	EXCEPTION_PROLOG
 482	EXC_XFER_EE_LITE(0xc00, DoSyscall)
 483
 484/* Single step - not used on 601 */
 485	EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD)
 486	EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE)
 487
 488/*
 489 * The Altivec unavailable trap is at 0x0f20.  Foo.
 490 * We effectively remap it to 0x3000.
 491 * We include an altivec unavailable exception vector even if
 492 * not configured for Altivec, so that you can't panic a
 493 * non-altivec kernel running on a machine with altivec just
 494 * by executing an altivec instruction.
 495 */
 496	. = 0xf00
 497	b	Trap_0f
 498
 499	. = 0xf20
 500	b	AltiVecUnavailable
 501
 502Trap_0f:
 503	EXCEPTION_PROLOG
 504	addi	r3,r1,STACK_FRAME_OVERHEAD
 505	EXC_XFER_EE(0xf00, UnknownException)
 506
 507/*
 508 * Handle TLB miss for instruction on 603/603e.
 509 * Note: we get an alternate set of r0 - r3 to use automatically.
 510 */
 511	. = 0x1000
 512InstructionTLBMiss:
 513/*
 514 * r0:	stored ctr
 515 * r1:	linux style pte ( later becomes ppc hardware pte )
 516 * r2:	ptr to linux-style pte
 517 * r3:	scratch
 518 */
 519	mfctr	r0
 520	/* Get PTE (linux-style) and check access */
 521	mfspr	r3,SPRN_IMISS
 522	lis	r1,KERNELBASE@h		/* check if kernel address */
 523	cmplw	0,r3,r1
 524	mfspr	r2,SPRN_SPRG3
 525	li	r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
 526	lwz	r2,PGDIR(r2)
 527	blt+	112f
 528	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */
 529	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */
 530	mfspr	r1,SPRN_SRR1		/* and MSR_PR bit from SRR1 */
 531	rlwinm	r1,r1,32-12,29,29	/* shift MSR_PR to _PAGE_USER posn */
 532112:	tophys(r2,r2)
 533	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
 534	lwz	r2,0(r2)		/* get pmd entry */
 535	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
 536	beq-	InstructionAddressInvalid	/* return if no mapping */
 537	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
 538	lwz	r3,0(r2)		/* get linux-style pte */
 539	andc.	r1,r1,r3		/* check access & ~permission */
 540	bne-	InstructionAddressInvalid /* return if access not permitted */
 541	ori	r3,r3,_PAGE_ACCESSED	/* set _PAGE_ACCESSED in pte */
 542	/*
 543	 * NOTE! We are assuming this is not an SMP system, otherwise
 544	 * we would need to update the pte atomically with lwarx/stwcx.
 545	 */
 546	stw	r3,0(r2)		/* update PTE (accessed bit) */
 547	/* Convert linux-style PTE to low word of PPC-style PTE */
 548	rlwinm	r1,r3,32-10,31,31	/* _PAGE_RW -> PP lsb */
 549	rlwinm	r2,r3,32-7,31,31	/* _PAGE_DIRTY -> PP lsb */
 550	and	r1,r1,r2		/* writable if _RW and _DIRTY */
 551	rlwimi	r3,r3,32-1,30,30	/* _PAGE_USER -> PP msb */
 552	rlwimi	r3,r3,32-1,31,31	/* _PAGE_USER -> PP lsb */
 553	ori	r1,r1,0xe14		/* clear out reserved bits and M */
 554	andc	r1,r3,r1		/* PP = user? (rw&dirty? 2: 3): 0 */
 555	mtspr	SPRN_RPA,r1
 556	mfspr	r3,SPRN_IMISS
 557	tlbli	r3
 558	mfspr	r3,SPRN_SRR1		/* Need to restore CR0 */
 559	mtcrf	0x80,r3
 560	rfi
 561InstructionAddressInvalid:
 562	mfspr	r3,SPRN_SRR1
 563	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
 564
 565	addis	r1,r1,0x2000
 566	mtspr	SPRN_DSISR,r1	/* (shouldn't be needed) */
 567	mtctr	r0		/* Restore CTR */
 568	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
 569	or	r2,r2,r1
 570	mtspr	SPRN_SRR1,r2
 571	mfspr	r1,SPRN_IMISS	/* Get failing address */
 572	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
 573	rlwimi	r2,r2,1,30,30	/* change 1 -> 3 */
 574	xor	r1,r1,r2
 575	mtspr	SPRN_DAR,r1	/* Set fault address */
 576	mfmsr	r0		/* Restore "normal" registers */
 577	xoris	r0,r0,MSR_TGPR>>16
 578	mtcrf	0x80,r3		/* Restore CR0 */
 579	mtmsr	r0
 580	b	InstructionAccess
 581
 582/*
 583 * Handle TLB miss for DATA Load operation on 603/603e
 584 */
 585	. = 0x1100
 586DataLoadTLBMiss:
 587/*
 588 * r0:	stored ctr
 589 * r1:	linux style pte ( later becomes ppc hardware pte )
 590 * r2:	ptr to linux-style pte
 591 * r3:	scratch
 592 */
 593	mfctr	r0
 594	/* Get PTE (linux-style) and check access */
 595	mfspr	r3,SPRN_DMISS
 596	lis	r1,KERNELBASE@h		/* check if kernel address */
 597	cmplw	0,r3,r1
 598	mfspr	r2,SPRN_SPRG3
 599	li	r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
 600	lwz	r2,PGDIR(r2)
 601	blt+	112f
 602	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */
 603	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */
 604	mfspr	r1,SPRN_SRR1		/* and MSR_PR bit from SRR1 */
 605	rlwinm	r1,r1,32-12,29,29	/* shift MSR_PR to _PAGE_USER posn */
 606112:	tophys(r2,r2)
 607	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
 608	lwz	r2,0(r2)		/* get pmd entry */
 609	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
 610	beq-	DataAddressInvalid	/* return if no mapping */
 611	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
 612	lwz	r3,0(r2)		/* get linux-style pte */
 613	andc.	r1,r1,r3		/* check access & ~permission */
 614	bne-	DataAddressInvalid	/* return if access not permitted */
 615	ori	r3,r3,_PAGE_ACCESSED	/* set _PAGE_ACCESSED in pte */
 616	/*
 617	 * NOTE! We are assuming this is not an SMP system, otherwise
 618	 * we would need to update the pte atomically with lwarx/stwcx.
 619	 */
 620	stw	r3,0(r2)		/* update PTE (accessed bit) */
 621	/* Convert linux-style PTE to low word of PPC-style PTE */
 622	rlwinm	r1,r3,32-10,31,31	/* _PAGE_RW -> PP lsb */
 623	rlwinm	r2,r3,32-7,31,31	/* _PAGE_DIRTY -> PP lsb */
 624	and	r1,r1,r2		/* writable if _RW and _DIRTY */
 625	rlwimi	r3,r3,32-1,30,30	/* _PAGE_USER -> PP msb */
 626	rlwimi	r3,r3,32-1,31,31	/* _PAGE_USER -> PP lsb */
 627	ori	r1,r1,0xe14		/* clear out reserved bits and M */
 628	andc	r1,r3,r1		/* PP = user? (rw&dirty? 2: 3): 0 */
 629	mtspr	SPRN_RPA,r1
 630	mfspr	r3,SPRN_DMISS
 631	tlbld	r3
 632	mfspr	r3,SPRN_SRR1		/* Need to restore CR0 */
 633	mtcrf	0x80,r3
 634	rfi
 635DataAddressInvalid:
 636	mfspr	r3,SPRN_SRR1
 637	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
 638	addis	r1,r1,0x2000
 639	mtspr	SPRN_DSISR,r1
 640	mtctr	r0		/* Restore CTR */
 641	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
 642	mtspr	SPRN_SRR1,r2
 643	mfspr	r1,SPRN_DMISS	/* Get failing address */
 644	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
 645	beq	20f		/* Jump if big endian */
 646	xori	r1,r1,3
 64720:	mtspr	SPRN_DAR,r1	/* Set fault address */
 648	mfmsr	r0		/* Restore "normal" registers */
 649	xoris	r0,r0,MSR_TGPR>>16
 650	mtcrf	0x80,r3		/* Restore CR0 */
 651	mtmsr	r0
 652	b	DataAccess
 653
 654/*
 655 * Handle TLB miss for DATA Store on 603/603e
 656 */
 657	. = 0x1200
 658DataStoreTLBMiss:
 659/*
 660 * r0:	stored ctr
 661 * r1:	linux style pte ( later becomes ppc hardware pte )
 662 * r2:	ptr to linux-style pte
 663 * r3:	scratch
 664 */
 665	mfctr	r0
 666	/* Get PTE (linux-style) and check access */
 667	mfspr	r3,SPRN_DMISS
 668	lis	r1,KERNELBASE@h		/* check if kernel address */
 669	cmplw	0,r3,r1
 670	mfspr	r2,SPRN_SPRG3
 671	li	r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
 672	lwz	r2,PGDIR(r2)
 673	blt+	112f
 674	lis	r2,swapper_pg_dir@ha	/* if kernel address, use */
 675	addi	r2,r2,swapper_pg_dir@l	/* kernel page table */
 676	mfspr	r1,SPRN_SRR1		/* and MSR_PR bit from SRR1 */
 677	rlwinm	r1,r1,32-12,29,29	/* shift MSR_PR to _PAGE_USER posn */
 678112:	tophys(r2,r2)
 679	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
 680	lwz	r2,0(r2)		/* get pmd entry */
 681	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
 682	beq-	DataAddressInvalid	/* return if no mapping */
 683	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
 684	lwz	r3,0(r2)		/* get linux-style pte */
 685	andc.	r1,r1,r3		/* check access & ~permission */
 686	bne-	DataAddressInvalid	/* return if access not permitted */
 687	ori	r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY
 688	/*
 689	 * NOTE! We are assuming this is not an SMP system, otherwise
 690	 * we would need to update the pte atomically with lwarx/stwcx.
 691	 */
 692	stw	r3,0(r2)		/* update PTE (accessed/dirty bits) */
 693	/* Convert linux-style PTE to low word of PPC-style PTE */
 694	rlwimi	r3,r3,32-1,30,30	/* _PAGE_USER -> PP msb */
 695	li	r1,0xe15		/* clear out reserved bits and M */
 696	andc	r1,r3,r1		/* PP = user? 2: 0 */
 697	mtspr	SPRN_RPA,r1
 698	mfspr	r3,SPRN_DMISS
 699	tlbld	r3
 700	mfspr	r3,SPRN_SRR1		/* Need to restore CR0 */
 701	mtcrf	0x80,r3
 702	rfi
 703
 704#ifndef CONFIG_ALTIVEC
 705#define AltivecAssistException	UnknownException
 706#endif
 707
 708	EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, EXC_XFER_EE)
 709	EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
 710	EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE)
 711#ifdef CONFIG_POWER4
 712	EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE)
 713	EXCEPTION(0x1700, Trap_17, AltivecAssistException, EXC_XFER_EE)
 714	EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD)
 715#else /* !CONFIG_POWER4 */
 716	EXCEPTION(0x1600, Trap_16, AltivecAssistException, EXC_XFER_EE)
 717	EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
 718	EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE)
 719#endif /* CONFIG_POWER4 */
 720	EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE)
 721	EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE)
 722	EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE)
 723	EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE)
 724	EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE)
 725	EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE)
 726	EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE)
 727	EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
 728	EXCEPTION(0x2100, Trap_21, UnknownException, EXC_XFER_EE)
 729	EXCEPTION(0x2200, Trap_22, UnknownException, EXC_XFER_EE)
 730	EXCEPTION(0x2300, Trap_23, UnknownException, EXC_XFER_EE)
 731	EXCEPTION(0x2400, Trap_24, UnknownException, EXC_XFER_EE)
 732	EXCEPTION(0x2500, Trap_25, UnknownException, EXC_XFER_EE)
 733	EXCEPTION(0x2600, Trap_26, UnknownException, EXC_XFER_EE)
 734	EXCEPTION(0x2700, Trap_27, UnknownException, EXC_XFER_EE)
 735	EXCEPTION(0x2800, Trap_28, UnknownException, EXC_XFER_EE)
 736	EXCEPTION(0x2900, Trap_29, UnknownException, EXC_XFER_EE)
 737	EXCEPTION(0x2a00, Trap_2a, UnknownException, EXC_XFER_EE)
 738	EXCEPTION(0x2b00, Trap_2b, UnknownException, EXC_XFER_EE)
 739	EXCEPTION(0x2c00, Trap_2c, UnknownException, EXC_XFER_EE)
 740	EXCEPTION(0x2d00, Trap_2d, UnknownException, EXC_XFER_EE)
 741	EXCEPTION(0x2e00, Trap_2e, UnknownException, EXC_XFER_EE)
 742	EXCEPTION(0x2f00, MOLTrampoline, UnknownException, EXC_XFER_EE_LITE)
 743
 744	.globl mol_trampoline
 745	.set mol_trampoline, i0x2f00
 746
 747	. = 0x3000
 748
 749AltiVecUnavailable:
 750	EXCEPTION_PROLOG
 751#ifdef CONFIG_ALTIVEC
 752	bne	load_up_altivec		/* if from user, just load it up */
 753#endif /* CONFIG_ALTIVEC */
 754	EXC_XFER_EE_LITE(0xf20, AltivecUnavailException)
 755
 756#ifdef CONFIG_PPC64BRIDGE
 757DataAccess:
 758	EXCEPTION_PROLOG
 759	b	DataAccessCont
 760
 761InstructionAccess:
 762	EXCEPTION_PROLOG
 763	b	InstructionAccessCont
 764
 765DataSegment:
 766	EXCEPTION_PROLOG
 767	addi	r3,r1,STACK_FRAME_OVERHEAD
 768	mfspr	r4,SPRN_DAR
 769	stw	r4,_DAR(r11)
 770	EXC_XFER_STD(0x380, UnknownException)
 771
 772InstructionSegment:
 773	EXCEPTION_PROLOG
 774	addi	r3,r1,STACK_FRAME_OVERHEAD
 775	EXC_XFER_STD(0x480, UnknownException)
 776#endif /* CONFIG_PPC64BRIDGE */
 777
 778/*
 779 * This task wants to use the FPU now.
 780 * On UP, disable FP for the task which had the FPU previously,
 781 * and save its floating-point registers in its thread_struct.
 782 * Load up this task's FP registers from its thread_struct,
 783 * enable the FPU for the current task and return to the task.
 784 */
 785load_up_fpu:
 786	mfmsr	r5
 787	ori	r5,r5,MSR_FP
 788#ifdef CONFIG_PPC64BRIDGE
 789	clrldi	r5,r5,1			/* turn off 64-bit mode */
 790#endif /* CONFIG_PPC64BRIDGE */
 791	SYNC
 792	MTMSRD(r5)			/* enable use of fpu now */
 793	isync
 794/*
 795 * For SMP, we don't do lazy FPU switching because it just gets too
 796 * horrendously complex, especially when a task switches from one CPU
 797 * to another.  Instead we call giveup_fpu in switch_to.
 798 */
 799#ifndef CONFIG_SMP
 800	tophys(r6,0)			/* get __pa constant */
 801	addis	r3,r6,last_task_used_math@ha
 802	lwz	r4,last_task_used_math@l(r3)
 803	cmpwi	0,r4,0
 804	beq	1f
 805	add	r4,r4,r6
 806	addi	r4,r4,THREAD		/* want last_task_used_math->thread */
 807	SAVE_32FPRS(0, r4)
 808	mffs	fr0
 809	stfd	fr0,THREAD_FPSCR-4(r4)
 810	lwz	r5,PT_REGS(r4)
 811	add	r5,r5,r6
 812	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 813	li	r10,MSR_FP|MSR_FE0|MSR_FE1
 814	andc	r4,r4,r10		/* disable FP for previous task */
 815	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 8161:
 817#endif /* CONFIG_SMP */
 818	/* enable use of FP after return */
 819	mfspr	r5,SPRN_SPRG3		/* current task's THREAD (phys) */
 820	lwz	r4,THREAD_FPEXC_MODE(r5)
 821	ori	r9,r9,MSR_FP		/* enable FP for current */
 822	or	r9,r9,r4
 823	lfd	fr0,THREAD_FPSCR-4(r5)
 824	mtfsf	0xff,fr0
 825	REST_32FPRS(0, r5)
 826#ifndef CONFIG_SMP
 827	subi	r4,r5,THREAD
 828	sub	r4,r4,r6
 829	stw	r4,last_task_used_math@l(r3)
 830#endif /* CONFIG_SMP */
 831	/* restore registers and return */
 832	/* we haven't used ctr or xer or lr */
 833	/* fall through to fast_exception_return */
 834
 835	.globl	fast_exception_return
 836fast_exception_return:
 837	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
 838	beq	1f			/* if not, we've got problems */
 8392:	REST_4GPRS(3, r11)
 840	lwz	r10,_CCR(r11)
 841	REST_GPR(1, r11)
 842	mtcr	r10
 843	lwz	r10,_LINK(r11)
 844	mtlr	r10
 845	REST_GPR(10, r11)
 846	mtspr	SPRN_SRR1,r9
 847	mtspr	SPRN_SRR0,r12
 848	REST_GPR(9, r11)
 849	REST_GPR(12, r11)
 850	lwz	r11,GPR11(r11)
 851	SYNC
 852	RFI
 853
 854/* check if the exception happened in a restartable section */
 8551:	lis	r3,exc_exit_restart_end@ha
 856	addi	r3,r3,exc_exit_restart_end@l
 857	cmplw	r12,r3
 858	bge	3f
 859	lis	r4,exc_exit_restart@ha
 860	addi	r4,r4,exc_exit_restart@l
 861	cmplw	r12,r4
 862	blt	3f
 863	lis	r3,fee_restarts@ha
 864	tophys(r3,r3)
 865	lwz	r5,fee_restarts@l(r3)
 866	addi	r5,r5,1
 867	stw	r5,fee_restarts@l(r3)
 868	mr	r12,r4		/* restart at exc_exit_restart */
 869	b	2b
 870
 871	.comm	fee_restarts,4
 872
 873/* aargh, a nonrecoverable interrupt, panic */
 874/* aargh, we don't know which trap this is */
 875/* but the 601 doesn't implement the RI bit, so assume it's OK */
 8763:
 877BEGIN_FTR_SECTION
 878	b	2b
 879END_FTR_SECTION_IFSET(CPU_FTR_601)
 880	li	r10,-1
 881	stw	r10,TRAP(r11)
 882	addi	r3,r1,STACK_FRAME_OVERHEAD
 883	li	r10,MSR_KERNEL
 884	bl	transfer_to_handler_full
 885	.long	nonrecoverable_exception
 886	.long	ret_from_except
 887
 888/*
 889 * FP unavailable trap from kernel - print a message, but let
 890 * the task use FP in the kernel until it returns to user mode.
 891 */
 892KernelFP:
 893	lwz	r3,_MSR(r1)
 894	ori	r3,r3,MSR_FP
 895	stw	r3,_MSR(r1)		/* enable use of FP after return */
 896	lis	r3,86f@h
 897	ori	r3,r3,86f@l
 898	mr	r4,r2			/* current */
 899	lwz	r5,_NIP(r1)
 900	bl	printk
 901	b	ret_from_except
 90286:	.string	"floating point used in kernel (task=%p, pc=%x)\n"
 903	.align	4,0
 904
 905#ifdef CONFIG_ALTIVEC
 906/* Note that the AltiVec support is closely modeled after the FP
 907 * support.  Changes to one are likely to be applicable to the
 908 * other!  */
 909load_up_altivec:
 910/*
 911 * Disable AltiVec for the task which had AltiVec previously,
 912 * and save its AltiVec registers in its thread_struct.
 913 * Enables AltiVec for use in the kernel on return.
 914 * On SMP we know the AltiVec units are free, since we give it up every
 915 * switch.  -- Kumar
 916 */
 917	mfmsr	r5
 918	oris	r5,r5,MSR_VEC@h
 919	MTMSRD(r5)			/* enable use of AltiVec now */
 920	isync
 921/*
 922 * For SMP, we don't do lazy AltiVec switching because it just gets too
 923 * horrendously complex, especially when a task switches from one CPU
 924 * to another.  Instead we call giveup_altivec in switch_to.
 925 */
 926#ifndef CONFIG_SMP
 927	tophys(r6,0)
 928	addis	r3,r6,last_task_used_altivec@ha
 929	lwz	r4,last_task_used_altivec@l(r3)
 930	cmpwi	0,r4,0
 931	beq	1f
 932	add	r4,r4,r6
 933	addi	r4,r4,THREAD	/* want THREAD of last_task_used_altivec */
 934	SAVE_32VR(0,r10,r4)
 935	mfvscr	vr0
 936	li	r10,THREAD_VSCR
 937	stvx	vr0,r10,r4
 938	lwz	r5,PT_REGS(r4)
 939	add	r5,r5,r6
 940	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 941	lis	r10,MSR_VEC@h
 942	andc	r4,r4,r10	/* disable altivec for previous task */
 943	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 9441:
 945#endif /* CONFIG_SMP */
 946	/* enable use of AltiVec after return */
 947	oris	r9,r9,MSR_VEC@h
 948	mfspr	r5,SPRN_SPRG3		/* current task's THREAD (phys) */
 949	li	r4,1
 950	li	r10,THREAD_VSCR
 951	stw	r4,THREAD_USED_VR(r5)
 952	lvx	vr0,r10,r5
 953	mtvscr	vr0
 954	REST_32VR(0,r10,r5)
 955#ifndef CONFIG_SMP
 956	subi	r4,r5,THREAD
 957	sub	r4,r4,r6
 958	stw	r4,last_task_used_altivec@l(r3)
 959#endif /* CONFIG_SMP */
 960	/* restore registers and return */
 961	/* we haven't used ctr or xer or lr */
 962	b	fast_exception_return
 963
 964/*
 965 * AltiVec unavailable trap from kernel - print a message, but let
 966 * the task use AltiVec in the kernel until it returns to user mode.
 967 */
 968KernelAltiVec:
 969	lwz	r3,_MSR(r1)
 970	oris	r3,r3,MSR_VEC@h
 971	stw	r3,_MSR(r1)	/* enable use of AltiVec after return */
 972	lis	r3,87f@h
 973	ori	r3,r3,87f@l
 974	mr	r4,r2		/* current */
 975	lwz	r5,_NIP(r1)
 976	bl	printk
 977	b	ret_from_except
 97887:	.string	"AltiVec used in kernel  (task=%p, pc=%x)  \n"
 979	.align	4,0
 980
 981/*
 982 * giveup_altivec(tsk)
 983 * Disable AltiVec for the task given as the argument,
 984 * and save the AltiVec registers in its thread_struct.
 985 * Enables AltiVec for use in the kernel on return.
 986 */
 987
 988	.globl	giveup_altivec
 989giveup_altivec:
 990	mfmsr	r5
 991	oris	r5,r5,MSR_VEC@h
 992	SYNC
 993	MTMSRD(r5)			/* enable use of AltiVec now */
 994	isync
 995	cmpwi	0,r3,0
 996	beqlr-				/* if no previous owner, done */
 997	addi	r3,r3,THREAD		/* want THREAD of task */
 998	lwz	r5,PT_REGS(r3)
 999	cmpwi	0,r5,0
1000	SAVE_32VR(0, r4, r3)
1001	mfvscr	vr0
1002	li	r4,THREAD_VSCR
1003	stvx	vr0,r4,r3
1004	beq	1f
1005	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1006	lis	r3,MSR_VEC@h
1007	andc	r4,r4,r3		/* disable AltiVec for previous task */
1008	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
10091:
1010#ifndef CONFIG_SMP
1011	li	r5,0
1012	lis	r4,last_task_used_altivec@ha
1013	stw	r5,last_task_used_altivec@l(r4)
1014#endif /* CONFIG_SMP */
1015	blr
1016#endif /* CONFIG_ALTIVEC */
1017
1018/*
1019 * giveup_fpu(tsk)
1020 * Disable FP for the task given as the argument,
1021 * and save the floating-point registers in its thread_struct.
1022 * Enables the FPU for use in the kernel on return.
1023 */
1024	.globl	giveup_fpu
1025giveup_fpu:
1026	mfmsr	r5
1027	ori	r5,r5,MSR_FP
1028	SYNC_601
1029	ISYNC_601
1030	MTMSRD(r5)			/* enable use of fpu now */
1031	SYNC_601
1032	isync
1033	cmpwi	0,r3,0
1034	beqlr-				/* if no previous owner, done */
1035	addi	r3,r3,THREAD	        /* want THREAD of task */
1036	lwz	r5,PT_REGS(r3)
1037	cmpwi	0,r5,0
1038	SAVE_32FPRS(0, r3)
1039	mffs	fr0
1040	stfd	fr0,THREAD_FPSCR-4(r3)
1041	beq	1f
1042	lwz	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1043	li	r3,MSR_FP|MSR_FE0|MSR_FE1
1044	andc	r4,r4,r3		/* disable FP for previous task */
1045	stw	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
10461:
1047#ifndef CONFIG_SMP
1048	li	r5,0
1049	lis	r4,last_task_used_math@ha
1050	stw	r5,last_task_used_math@l(r4)
1051#endif /* CONFIG_SMP */
1052	blr
1053
1054/*
1055 * This code is jumped to from the startup code to copy
1056 * the kernel image to physical address 0.
1057 */
1058relocate_kernel:
1059	addis	r9,r26,klimit@ha	/* fetch klimit */
1060	lwz	r25,klimit@l(r9)
1061	addis	r25,r25,-KERNELBASE@h
1062	li	r3,0			/* Destination base address */
1063	li	r6,0			/* Destination offset */
1064	li	r5,0x4000		/* # bytes of memory to copy */
1065	bl	copy_and_flush		/* copy the first 0x4000 bytes */
1066	addi	r0,r3,4f@l		/* jump to the address of 4f */
1067	mtctr	r0			/* in copy and do the rest. */
1068	bctr				/* jump to the copy */
10694:	mr	r5,r25
1070	bl	copy_and_flush		/* copy the rest */
1071	b	turn_on_mmu
1072
1073/*
1074 * Copy routine used to copy the kernel to start at physical address 0
1075 * and flush and invalidate the caches as needed.
1076 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1077 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1078 */
1079copy_and_flush:
1080	addi	r5,r5,-4
1081	addi	r6,r6,-4
10824:	li	r0,L1_CACHE_LINE_SIZE/4
1083	mtctr	r0
10843:	addi	r6,r6,4			/* copy a cache line */
1085	lwzx	r0,r6,r4
1086	stwx	r0,r6,r3
1087	bdnz	3b
1088	dcbst	r6,r3			/* write it to memory */
1089	sync
1090	icbi	r6,r3			/* flush the icache line */
1091	cmplw	0,r6,r5
1092	blt	4b
1093	sync				/* additional sync needed on g4 */
1094	isync
1095	addi	r5,r5,4
1096	addi	r6,r6,4
1097	blr
1098
1099#ifdef CONFIG_APUS
1100/*
1101 * On APUS the physical base address of the kernel is not known at compile
1102 * time, which means the __pa/__va constants used are incorrect. In the
1103 * __init section is recorded the virtual addresses of instructions using
1104 * these constants, so all that has to be done is fix these before
1105 * continuing the kernel boot.
1106 *
1107 * r4 = The physical address of the kernel base.
1108 */
1109fix_mem_constants:
1110	mr	r10,r4
1111	addis	r10,r10,-KERNELBASE@h    /* virt_to_phys constant */
1112	neg	r11,r10	                 /* phys_to_virt constant */
1113
1114	lis	r12,__vtop_table_begin@h
1115	ori	r12,r12,__vtop_table_begin@l
1116	add	r12,r12,r10	         /* table begin phys address */
1117	lis	r13,__vtop_table_end@h
1118	ori	r13,r13,__vtop_table_end@l
1119	add	r13,r13,r10	         /* table end phys address */
1120	subi	r12,r12,4
1121	subi	r13,r13,4
11221:	lwzu	r14,4(r12)               /* virt address of instruction */
1123	add     r14,r14,r10              /* phys address of instruction */
1124	lwz     r15,0(r14)               /* instruction, now insert top */
1125	rlwimi  r15,r10,16,16,31         /* half of vp const in low half */
1126	stw	r15,0(r14)               /* of instruction and restore. */
1127	dcbst	r0,r14			 /* write it to memory */
1128	sync
1129	icbi	r0,r14			 /* flush the icache line */
1130	cmpw	r12,r13
1131	bne     1b
1132	sync				/* additional sync needed on g4 */
1133	isync
1134
1135/*
1136 * Map the memory where the exception handlers will
1137 * be copied to when hash constants have been patched.
1138 */
1139#ifdef CONFIG_APUS_FAST_EXCEPT
1140	lis	r8,0xfff0
1141#else
1142	lis	r8,0
1143#endif
1144	ori	r8,r8,0x2		/* 128KB, supervisor */
1145	mtspr	SPRN_DBAT3U,r8
1146	mtspr	SPRN_DBAT3L,r8
1147
1148	lis	r12,__ptov_table_begin@h
1149	ori	r12,r12,__ptov_table_begin@l
1150	add	r12,r12,r10	         /* table begin phys address */
1151	lis	r13,__ptov_table_end@h
1152	ori	r13,r13,__ptov_table_end@l
1153	add	r13,r13,r10	         /* table end phys address */
1154	subi	r12,r12,4
1155	subi	r13,r13,4
11561:	lwzu	r14,4(r12)               /* virt address of instruction */
1157	add     r14,r14,r10              /* phys address of instruction */
1158	lwz     r15,0(r14)               /* instruction, now insert top */
1159	rlwimi  r15,r11,16,16,31         /* half of pv const in low half*/
1160	stw	r15,0(r14)               /* of instruction and restore. */
1161	dcbst	r0,r14			 /* write it to memory */
1162	sync
1163	icbi	r0,r14			 /* flush the icache line */
1164	cmpw	r12,r13
1165	bne     1b
1166
1167	sync				/* additional sync needed on g4 */
1168	isync				/* No speculative loading until now */
1169	blr
1170
1171/***********************************************************************
1172 *  Please note that on APUS the exception handlers are located at the
1173 *  physical address 0xfff0000. For this reason, the exception handlers
1174 *  cannot use relative branches to access the code below.
1175 ***********************************************************************/
1176#endif /* CONFIG_APUS */
1177
1178#ifdef CONFIG_SMP
1179#ifdef CONFIG_GEMINI
1180	.globl	__secondary_start_gemini
1181__secondary_start_gemini:
1182        mfspr   r4,SPRN_HID0
1183        ori     r4,r4,HID0_ICFI
1184        li      r3,0
1185        ori     r3,r3,HID0_ICE
1186        andc    r4,r4,r3
1187        mtspr   SPRN_HID0,r4
1188        sync
1189        bl      gemini_prom_init
1190        b       __secondary_start
1191#endif /* CONFIG_GEMINI */
1192	.globl	__secondary_start_psurge
1193__secondary_start_psurge:
1194	li	r24,1			/* cpu # */
1195	b	__secondary_start_psurge99
1196	.globl	__secondary_start_psurge2
1197__secondary_start_psurge2:
1198	li	r24,2			/* cpu # */
1199	b	__secondary_start_psurge99
1200	.globl	__secondary_start_psurge3
1201__secondary_start_psurge3:
1202	li	r24,3			/* cpu # */
1203	b	__secondary_start_psurge99
1204__secondary_start_psurge99:
1205	/* we come in here with IR=0 and DR=1, and DBAT 0
1206	   set to map the 0xf0000000 - 0xffffffff region */
1207	mfmsr	r0
1208	rlwinm	r0,r0,0,28,26		/* clear DR (0x10) */
1209	SYNC
1210	mtmsr	r0
1211	isync
1212
1213	.globl	__secondary_start
1214__secondary_start:
1215#ifdef CONFIG_PPC64BRIDGE
1216	mfmsr	r0
1217	clrldi	r0,r0,1			/* make sure it's in 32-bit mode */
1218	SYNC
1219	MTMSRD(r0)
1220	isync
1221#endif
1222	/* Copy some CPU settings from CPU 0 */
1223	bl	__restore_cpu_setup
1224
1225	lis	r3,-KERNELBASE@h
1226	mr	r4,r24
1227	bl	identify_cpu
1228	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
1229#ifdef CONFIG_6xx
1230	lis	r3,-KERNELBASE@h
1231	bl	init_idle_6xx
1232#endif /* CONFIG_6xx */
1233#ifdef CONFIG_POWER4
1234	lis	r3,-KERNELBASE@h
1235	bl	init_idle_power4
1236#endif /* CONFIG_POWER4 */
1237
1238	/* get current_thread_info and current */
1239	lis	r1,secondary_ti@ha
1240	tophys(r1,r1)
1241	lwz	r1,secondary_ti@l(r1)
1242	tophys(r2,r1)
1243	lwz	r2,TI_TASK(r2)
1244
1245	/* stack */
1246	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1247	li	r0,0
1248	tophys(r3,r1)
1249	stw	r0,0(r3)
1250
1251	/* load up the MMU */
1252	bl	load_up_mmu
1253
1254	/* ptr to phys current thread */
1255	tophys(r4,r2)
1256	addi	r4,r4,THREAD	/* phys address of our thread_struct */
1257	CLR_TOP32(r4)
1258	mtspr	SPRN_SPRG3,r4
1259	li	r3,0
1260	mtspr	SPRN_SPRG2,r3	/* 0 => not in RTAS */
1261
1262	/* enable MMU and jump to start_secondary */
1263	li	r4,MSR_KERNEL
1264	FIX_SRR1(r4,r5)
1265	lis	r3,start_secondary@h
1266	ori	r3,r3,start_secondary@l
1267	mtspr	SPRN_SRR0,r3
1268	mtspr	SPRN_SRR1,r4
1269	SYNC
1270	RFI
1271#endif /* CONFIG_SMP */
1272
1273/*
1274 * Those generic dummy functions are kept for CPUs not
1275 * included in CONFIG_6xx
1276 */
1277_GLOBAL(__setup_cpu_power3)
1278	blr
1279_GLOBAL(__setup_cpu_generic)
1280	blr
1281
1282#if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4)
1283_GLOBAL(__save_cpu_setup)
1284	blr
1285_GLOBAL(__restore_cpu_setup)
1286	blr
1287#endif /* !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) */
1288
1289
1290/*
1291 * Load stuff into the MMU.  Intended to be called with
1292 * IR=0 and DR=0.
1293 */
1294load_up_mmu:
1295	sync			/* Force all PTE updates to finish */
1296	isync
1297	tlbia			/* Clear all TLB entries */
1298	sync			/* wait for tlbia/tlbie to finish */
1299	TLBSYNC			/* ... on all CPUs */
1300	/* Load the SDR1 register (hash table base & size) */
1301	lis	r6,_SDR1@ha
1302	tophys(r6,r6)
1303	lwz	r6,_SDR1@l(r6)
1304	mtspr	SPRN_SDR1,r6
1305#ifdef CONFIG_PPC64BRIDGE
1306	/* clear the ASR so we only use the pseudo-segment registers. */
1307	li	r6,0
1308	mtasr	r6
1309#endif /* CONFIG_PPC64BRIDGE */
1310	li	r0,16		/* load up segment register values */
1311	mtctr	r0		/* for context 0 */
1312	lis	r3,0x2000	/* Ku = 1, VSID = 0 */
1313	li	r4,0
13143:	mtsrin	r3,r4
1315	addi	r3,r3,0x111	/* increment VSID */
1316	addis	r4,r4,0x1000	/* address of next segment */
1317	bdnz	3b
1318#ifndef CONFIG_POWER4
1319/* Load the BAT registers with the values set up by MMU_init.
1320   MMU_init takes care of whether we're on a 601 or not. */
1321	mfpvr	r3
1322	srwi	r3,r3,16
1323	cmpwi	r3,1
1324	lis	r3,BATS@ha
1325	addi	r3,r3,BATS@l
1326	tophys(r3,r3)
1327	LOAD_BAT(0,r3,r4,r5)
1328	LOAD_BAT(1,r3,r4,r5)
1329	LOAD_BAT(2,r3,r4,r5)
1330	LOAD_BAT(3,r3,r4,r5)
1331#endif /* CONFIG_POWER4 */
1332	blr
1333
1334/*
1335 * This is where the main kernel code starts.
1336 */
1337start_here:
1338	/* ptr to current */
1339	lis	r2,init_task@h
1340	ori	r2,r2,init_task@l
1341	/* Set up for using our exception vectors */
1342	/* ptr to phys current thread */
1343	tophys(r4,r2)
1344	addi	r4,r4,THREAD	/* init task's THREAD */
1345	CLR_TOP32(r4)
1346	mtspr	SPRN_SPRG3,r4
1347	li	r3,0
1348	mtspr	SPRN_SPRG2,r3	/* 0 => not in RTAS */
1349
1350	/* stack */
1351	lis	r1,init_thread_union@ha
1352	addi	r1,r1,init_thread_union@l
1353	li	r0,0
1354	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1355/*
1356 * Do early bootinfo parsing, platform-specific initialization,
1357 * and set up the MMU.
1358 */
1359	mr	r3,r31
1360	mr	r4,r30
1361	mr	r5,r29
1362	mr	r6,r28
1363	mr	r7,r27
1364	bl	machine_init
1365	bl	MMU_init
1366
1367#ifdef CONFIG_APUS
1368	/* Copy exception code to exception vector base on APUS. */
1369	lis	r4,KERNELBASE@h
1370#ifdef CONFIG_APUS_FAST_EXCEPT
1371	lis	r3,0xfff0		/* Copy to 0xfff00000 */
1372#else
1373	lis	r3,0			/* Copy to 0x00000000 */
1374#endif
1375	li	r5,0x4000		/* # bytes of memory to copy */
1376	li	r6,0
1377	bl	copy_and_flush		/* copy the first 0x4000 bytes */
1378#endif  /* CONFIG_APUS */
1379
1380/*
1381 * Go back to running unmapped so we can load up new values
1382 * for SDR1 (hash table pointer) and the segment registers
1383 * and change to using our exception vectors.
1384 */
1385	lis	r4,2f@h
1386	ori	r4,r4,2f@l
1387	tophys(r4,r4)
1388	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1389	FIX_SRR1(r3,r5)
1390	mtspr	SPRN_SRR0,r4
1391	mtspr	SPRN_SRR1,r3
1392	SYNC
1393	RFI
1394/* Load up the kernel context */
13952:	bl	load_up_mmu
1396
1397#ifdef CONFIG_BDI_SWITCH
1398	/* Add helper information for the Abatron bdiGDB debugger.
1399	 * We do this here because we know the mmu is disabled, and
1400	 * will be enabled for real in just a few instructions.
1401	 */
1402	lis	r5, abatron_pteptrs@h
1403	ori	r5, r5, abatron_pteptrs@l
1404	stw	r5, 0xf0(r0)	/* This much match your Abatron config */
1405	lis	r6, swapper_pg_dir@h
1406	ori	r6, r6, swapper_pg_dir@l
1407	tophys(r5, r5)
1408	stw	r6, 0(r5)
1409#endif /* CONFIG_BDI_SWITCH */
1410
1411/* Now turn on the MMU for real! */
1412	li	r4,MSR_KERNEL
1413	FIX_SRR1(r4,r5)
1414	lis	r3,start_kernel@h
1415	ori	r3,r3,start_kernel@l
1416	mtspr	SPRN_SRR0,r3
1417	mtspr	SPRN_SRR1,r4
1418	SYNC
1419	RFI
1420
1421/*
1422 * Set up the segment registers for a new context.
1423 */
1424_GLOBAL(set_context)
1425	mulli	r3,r3,897	/* multiply context by skew factor */
1426	rlwinm	r3,r3,4,8,27	/* VSID = (context & 0xfffff) << 4 */
1427	addis	r3,r3,0x6000	/* Set Ks, Ku bits */
1428	li	r0,NUM_USER_SEGMENTS
1429	mtctr	r0
1430
1431#ifdef CONFIG_BDI_SWITCH
1432	/* Context switch the PTE pointer for the Abatron BDI2000.
1433	 * The PGDIR is passed as second argument.
1434	 */
1435	lis	r5, KERNELBASE@h
1436	lwz	r5, 0xf0(r5)
1437	stw	r4, 0x4(r5)
1438#endif
1439	li	r4,0
1440	isync
14413:
1442#ifdef CONFIG_PPC64BRIDGE
1443	slbie	r4
1444#endif /* CONFIG_PPC64BRIDGE */
1445	mtsrin	r3,r4
1446	addi	r3,r3,0x111	/* next VSID */
1447	rlwinm	r3,r3,0,8,3	/* clear out any overflow from VSID field */
1448	addis	r4,r4,0x1000	/* address of next segment */
1449	bdnz	3b
1450	sync
1451	isync
1452	blr
1453
1454/*
1455 * An undocumented "feature" of 604e requires that the v bit
1456 * be cleared before changing BAT values.
1457 *
1458 * Also, newer IBM firmware does not clear bat3 and 4 so
1459 * this makes sure it's done.
1460 *  -- Cort
1461 */
1462clear_bats:
1463	li	r10,0
1464	mfspr	r9,SPRN_PVR
1465	rlwinm	r9,r9,16,16,31		/* r9 = 1 for 601, 4 for 604 */
1466	cmpwi	r9, 1
1467	beq	1f
1468
1469	mtspr	SPRN_DBAT0U,r10
1470	mtspr	SPRN_DBAT0L,r10
1471	mtspr	SPRN_DBAT1U,r10
1472	mtspr	SPRN_DBAT1L,r10
1473	mtspr	SPRN_DBAT2U,r10
1474	mtspr	SPRN_DBAT2L,r10
1475	mtspr	SPRN_DBAT3U,r10
1476	mtspr	SPRN_DBAT3L,r10
14771:
1478	mtspr	SPRN_IBAT0U,r10
1479	mtspr	SPRN_IBAT0L,r10
1480	mtspr	SPRN_IBAT1U,r10
1481	mtspr	SPRN_IBAT1L,r10
1482	mtspr	SPRN_IBAT2U,r10
1483	mtspr	SPRN_IBAT2L,r10
1484	mtspr	SPRN_IBAT3U,r10
1485	mtspr	SPRN_IBAT3L,r10
1486BEGIN_FTR_SECTION
1487	/* Here's a tweak: at this point, CPU setup have
1488	 * not been called yet, so HIGH_BAT_EN may not be
1489	 * set in HID0 for the 745x processors. However, it
1490	 * seems that doesn't affect our ability to actually
1491	 * write to these SPRs.
1492	 */
1493	mtspr	SPRN_DBAT4U,r10
1494	mtspr	SPRN_DBAT4L,r10
1495	mtspr	SPRN_DBAT5U,r10
1496	mtspr	SPRN_DBAT5L,r10
1497	mtspr	SPRN_DBAT6U,r10
1498	mtspr	SPRN_DBAT6L,r10
1499	mtspr	SPRN_DBAT7U,r10
1500	mtspr	SPRN_DBAT7L,r10
1501	mtspr	SPRN_IBAT4U,r10
1502	mtspr	SPRN_IBAT4L,r10
1503	mtspr	SPRN_IBAT5U,r10
1504	mtspr	SPRN_IBAT5L,r10
1505	mtspr	SPRN_IBAT6U,r10
1506	mtspr	SPRN_IBAT6L,r10
1507	mtspr	SPRN_IBAT7U,r10
1508	mtspr	SPRN_IBAT7L,r10
1509END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
1510	blr
1511
1512flush_tlbs:
1513	lis	r10, 0x40
15141:	addic.	r10, r10, -0x1000
1515	tlbie	r10
1516	blt	1b
1517	sync
1518	blr
1519
1520mmu_off:
1521 	addi	r4, r3, __after_mmu_off - _start
1522	mfmsr	r3
1523	andi.	r0,r3,MSR_DR|MSR_IR		/* MMU enabled? */
1524	beqlr
1525	andc	r3,r3,r0
1526	mtspr	SPRN_SRR0,r4
1527	mtspr	SPRN_SRR1,r3
1528	sync
1529	RFI
1530
1531#ifndef CONFIG_POWER4
1532/*
1533 * Use the first pair of BAT registers to map the 1st 16MB
1534 * of RAM to KERNELBASE.  From this point on we can't safely
1535 * call OF any more.
1536 */
1537initial_bats:
1538	lis	r11,KERNELBASE@h
1539#ifndef CONFIG_PPC64BRIDGE
1540	mfspr	r9,SPRN_PVR
1541	rlwinm	r9,r9,16,16,31		/* r9 = 1 for 601, 4 for 604 */
1542	cmpwi	0,r9,1
1543	bne	4f
1544	ori	r11,r11,4		/* set up BAT registers for 601 */
1545	li	r8,0x7f			/* valid, block length = 8MB */
1546	oris	r9,r11,0x800000@h	/* set up BAT reg for 2nd 8M */
1547	oris	r10,r8,0x800000@h	/* set up BAT reg for 2nd 8M */
1548	mtspr	SPRN_IBAT0U,r11		/* N.B. 601 has valid bit in */
1549	mtspr	SPRN_IBAT0L,r8		/* lower BAT register */
1550	mtspr	SPRN_IBAT1U,r9
1551	mtspr	SPRN_IBAT1L,r10
1552	isync
1553	blr
1554#endif /* CONFIG_PPC64BRIDGE */
1555
15564:	tophys(r8,r11)
1557#ifdef CONFIG_SMP
1558	ori	r8,r8,0x12		/* R/W access, M=1 */
1559#else
1560	ori	r8,r8,2			/* R/W access */
1561#endif /* CONFIG_SMP */
1562#ifdef CONFIG_APUS
1563	ori	r11,r11,BL_8M<<2|0x2	/* set up 8MB BAT registers for 604 */
1564#else
1565	ori	r11,r11,BL_256M<<2|0x2	/* set up BAT registers for 604 */
1566#endif /* CONFIG_APUS */
1567
1568#ifdef CONFIG_PPC64BRIDGE
1569	/* clear out the high 32 bits in the BAT */
1570	clrldi	r11,r11,32
1571	clrldi	r8,r8,32
1572#endif /* CONFIG_PPC64BRIDGE */
1573	mtspr	SPRN_DBAT0L,r8		/* N.B. 6xx (not 601) have valid */
1574	mtspr	SPRN_DBAT0U,r11		/* bit in upper BAT register */
1575	mtspr	SPRN_IBAT0L,r8
1576	mtspr	SPRN_IBAT0U,r11
1577	isync
1578	blr
1579
1580#if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT)
1581setup_disp_bat:
1582	/*
1583	 * setup the display bat prepared for us in prom.c
1584	 */
1585	mflr	r8
1586	bl	reloc_offset
1587	mtlr	r8
1588	addis	r8,r3,disp_BAT@ha
1589	addi	r8,r8,disp_BAT@l
1590	lwz	r11,0(r8)
1591	lwz	r8,4(r8)
1592	mfspr	r9,SPRN_PVR
1593	rlwinm	r9,r9,16,16,31		/* r9 = 1 for 601, 4 for 604 */
1594	cmpwi	0,r9,1
1595	beq	1f
1596	mtspr	SPRN_DBAT3L,r8
1597	mtspr	SPRN_DBAT3U,r11
1598	blr
15991:	mtspr	SPRN_IBAT3L,r8
1600	mtspr	SPRN_IBAT3U,r11
1601	blr
1602
1603#endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */
1604
1605#else /* CONFIG_POWER4 */
1606/*
1607 * Load up the SDR1 and segment register values now
1608 * since we don't have the BATs.
1609 * Also make sure we are running in 32-bit mode.
1610 */
1611
1612initial_mm_power4:
1613	addis	r14,r3,_SDR1@ha		/* get the value from _SDR1 */
1614	lwz	r14,_SDR1@l(r14)	/* assume hash table below 4GB */
1615	mtspr	SPRN_SDR1,r14
1616	slbia
1617	lis	r4,0x2000		/* set pseudo-segment reg 12 */
1618	ori	r5,r4,0x0ccc
1619	mtsr	12,r5
1620#if 0
1621	ori	r5,r4,0x0888		/* set pseudo-segment reg 8 */
1622	mtsr	8,r5			/* (for access to serial port) */
1623#endif
1624#ifdef CONFIG_BOOTX_TEXT
1625	ori	r5,r4,0x0999		/* set pseudo-segment reg 9 */
1626	mtsr	9,r5			/* (for access to screen) */
1627#endif
1628	mfmsr	r0
1629	clrldi	r0,r0,1
1630	sync
1631	mtmsr	r0
1632	isync
1633	blr
1634
1635#endif /* CONFIG_POWER4 */
1636
1637#ifdef CONFIG_8260
1638/* Jump into the system reset for the rom.
1639 * We first disable the MMU, and then jump to the ROM reset address.
1640 *
1641 * r3 is the board info structure, r4 is the location for starting.
1642 * I use this for building a small kernel that can load other kernels,
1643 * rather than trying to write or rely on a rom monitor that can tftp load.
1644 */
1645       .globl  m8260_gorom
1646m8260_gorom:
1647	mfmsr	r0
1648	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */
1649	sync
1650	mtmsr	r0
1651	sync
1652	mfspr	r11, SPRN_HID0
1653	lis	r10, 0
1654	ori	r10,r10,HID0_ICE|HID0_DCE
1655	andc	r11, r11, r10
1656	mtspr	SPRN_HID0, r11
1657	isync
1658	li	r5, MSR_ME|MSR_RI
1659	lis	r6,2f@h
1660	addis	r6,r6,-KERNELBASE@h
1661	ori	r6,r6,2f@l
1662	mtspr	SPRN_SRR0,r6
1663	mtspr	SPRN_SRR1,r5
1664	isync
1665	sync
1666	rfi
16672:
1668	mtlr	r4
1669	blr
1670#endif
1671
1672
1673/*
1674 * We put a few things here that have to be page-aligned.
1675 * This stuff goes at the beginning of the data segment,
1676 * which is page-aligned.
1677 */
1678	.data
1679	.globl	sdata
1680sdata:
1681	.globl	empty_zero_page
1682empty_zero_page:
1683	.space	4096
1684
1685	.globl	swapper_pg_dir
1686swapper_pg_dir:
1687	.space	4096
1688
1689/*
1690 * This space gets a copy of optional info passed to us by the bootstrap
1691 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1692 */
1693	.globl	cmd_line
1694cmd_line:
1695	.space	512
1696
1697	.globl intercept_table
1698intercept_table:
1699	.long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700
1700	.long i0x800, 0, 0, 0, 0, i0xd00, 0, 0
1701	.long 0, 0, 0, i0x1300, 0, 0, 0, 0
1702	.long 0, 0, 0, 0, 0, 0, 0, 0
1703	.long 0, 0, 0, 0, 0, 0, 0, 0
1704	.long 0, 0, 0, 0, 0, 0, 0, 0
1705
1706/* Room for two PTE pointers, usually the kernel and current user pointers
1707 * to their respective root page table.
1708 */
1709abatron_pteptrs:
1710	.space	8