PageRenderTime 102ms CodeModel.GetById 14ms app.highlight 75ms RepoModel.GetById 1ms app.codeStats 1ms

/arch/sparc64/kernel/traps.c

https://bitbucket.org/evzijst/gittest
C | 2118 lines | 1662 code | 256 blank | 200 comment | 255 complexity | 429fcf8b37c9667e80ab350d1cfc84cc MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
   2 * arch/sparc64/kernel/traps.c
   3 *
   4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
   5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
   6 */
   7
   8/*
   9 * I like traps on v9, :))))
  10 */
  11
  12#include <linux/config.h>
  13#include <linux/module.h>
  14#include <linux/sched.h>  /* for jiffies */
  15#include <linux/kernel.h>
  16#include <linux/kallsyms.h>
  17#include <linux/signal.h>
  18#include <linux/smp.h>
  19#include <linux/smp_lock.h>
  20#include <linux/mm.h>
  21#include <linux/init.h>
  22
  23#include <asm/delay.h>
  24#include <asm/system.h>
  25#include <asm/ptrace.h>
  26#include <asm/oplib.h>
  27#include <asm/page.h>
  28#include <asm/pgtable.h>
  29#include <asm/unistd.h>
  30#include <asm/uaccess.h>
  31#include <asm/fpumacro.h>
  32#include <asm/lsu.h>
  33#include <asm/dcu.h>
  34#include <asm/estate.h>
  35#include <asm/chafsr.h>
  36#include <asm/psrcompat.h>
  37#include <asm/processor.h>
  38#include <asm/timer.h>
  39#include <asm/kdebug.h>
  40#ifdef CONFIG_KMOD
  41#include <linux/kmod.h>
  42#endif
  43
  44struct notifier_block *sparc64die_chain;
  45static DEFINE_SPINLOCK(die_notifier_lock);
  46
  47int register_die_notifier(struct notifier_block *nb)
  48{
  49	int err = 0;
  50	unsigned long flags;
  51	spin_lock_irqsave(&die_notifier_lock, flags);
  52	err = notifier_chain_register(&sparc64die_chain, nb);
  53	spin_unlock_irqrestore(&die_notifier_lock, flags);
  54	return err;
  55}
  56
  57/* When an irrecoverable trap occurs at tl > 0, the trap entry
  58 * code logs the trap state registers at every level in the trap
  59 * stack.  It is found at (pt_regs + sizeof(pt_regs)) and the layout
  60 * is as follows:
  61 */
  62struct tl1_traplog {
  63	struct {
  64		unsigned long tstate;
  65		unsigned long tpc;
  66		unsigned long tnpc;
  67		unsigned long tt;
  68	} trapstack[4];
  69	unsigned long tl;
  70};
  71
  72static void dump_tl1_traplog(struct tl1_traplog *p)
  73{
  74	int i;
  75
  76	printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
  77	       p->tl);
  78	for (i = 0; i < 4; i++) {
  79		printk(KERN_CRIT
  80		       "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
  81		       "TNPC[%016lx] TT[%lx]\n",
  82		       i + 1,
  83		       p->trapstack[i].tstate, p->trapstack[i].tpc,
  84		       p->trapstack[i].tnpc, p->trapstack[i].tt);
  85	}
  86}
  87
  88void do_call_debug(struct pt_regs *regs) 
  89{ 
  90	notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT); 
  91}
  92
  93void bad_trap(struct pt_regs *regs, long lvl)
  94{
  95	char buffer[32];
  96	siginfo_t info;
  97
  98	if (notify_die(DIE_TRAP, "bad trap", regs,
  99		       0, lvl, SIGTRAP) == NOTIFY_STOP)
 100		return;
 101
 102	if (lvl < 0x100) {
 103		sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
 104		die_if_kernel(buffer, regs);
 105	}
 106
 107	lvl -= 0x100;
 108	if (regs->tstate & TSTATE_PRIV) {
 109		sprintf(buffer, "Kernel bad sw trap %lx", lvl);
 110		die_if_kernel(buffer, regs);
 111	}
 112	if (test_thread_flag(TIF_32BIT)) {
 113		regs->tpc &= 0xffffffff;
 114		regs->tnpc &= 0xffffffff;
 115	}
 116	info.si_signo = SIGILL;
 117	info.si_errno = 0;
 118	info.si_code = ILL_ILLTRP;
 119	info.si_addr = (void __user *)regs->tpc;
 120	info.si_trapno = lvl;
 121	force_sig_info(SIGILL, &info, current);
 122}
 123
 124void bad_trap_tl1(struct pt_regs *regs, long lvl)
 125{
 126	char buffer[32];
 127	
 128	if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
 129		       0, lvl, SIGTRAP) == NOTIFY_STOP)
 130		return;
 131
 132	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 133
 134	sprintf (buffer, "Bad trap %lx at tl>0", lvl);
 135	die_if_kernel (buffer, regs);
 136}
 137
 138#ifdef CONFIG_DEBUG_BUGVERBOSE
 139void do_BUG(const char *file, int line)
 140{
 141	bust_spinlocks(1);
 142	printk("kernel BUG at %s:%d!\n", file, line);
 143}
 144#endif
 145
 146void instruction_access_exception(struct pt_regs *regs,
 147				  unsigned long sfsr, unsigned long sfar)
 148{
 149	siginfo_t info;
 150
 151	if (notify_die(DIE_TRAP, "instruction access exception", regs,
 152		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
 153		return;
 154
 155	if (regs->tstate & TSTATE_PRIV) {
 156		printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
 157		       sfsr, sfar);
 158		die_if_kernel("Iax", regs);
 159	}
 160	if (test_thread_flag(TIF_32BIT)) {
 161		regs->tpc &= 0xffffffff;
 162		regs->tnpc &= 0xffffffff;
 163	}
 164	info.si_signo = SIGSEGV;
 165	info.si_errno = 0;
 166	info.si_code = SEGV_MAPERR;
 167	info.si_addr = (void __user *)regs->tpc;
 168	info.si_trapno = 0;
 169	force_sig_info(SIGSEGV, &info, current);
 170}
 171
 172void instruction_access_exception_tl1(struct pt_regs *regs,
 173				      unsigned long sfsr, unsigned long sfar)
 174{
 175	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
 176		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
 177		return;
 178
 179	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 180	instruction_access_exception(regs, sfsr, sfar);
 181}
 182
 183void data_access_exception(struct pt_regs *regs,
 184			   unsigned long sfsr, unsigned long sfar)
 185{
 186	siginfo_t info;
 187
 188	if (notify_die(DIE_TRAP, "data access exception", regs,
 189		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
 190		return;
 191
 192	if (regs->tstate & TSTATE_PRIV) {
 193		/* Test if this comes from uaccess places. */
 194		unsigned long fixup;
 195		unsigned long g2 = regs->u_regs[UREG_G2];
 196
 197		if ((fixup = search_extables_range(regs->tpc, &g2))) {
 198			/* Ouch, somebody is trying ugly VM hole tricks on us... */
 199#ifdef DEBUG_EXCEPTIONS
 200			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
 201			printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
 202			       "g2<%016lx>\n", regs->tpc, fixup, g2);
 203#endif
 204			regs->tpc = fixup;
 205			regs->tnpc = regs->tpc + 4;
 206			regs->u_regs[UREG_G2] = g2;
 207			return;
 208		}
 209		/* Shit... */
 210		printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
 211		       sfsr, sfar);
 212		die_if_kernel("Dax", regs);
 213	}
 214
 215	info.si_signo = SIGSEGV;
 216	info.si_errno = 0;
 217	info.si_code = SEGV_MAPERR;
 218	info.si_addr = (void __user *)sfar;
 219	info.si_trapno = 0;
 220	force_sig_info(SIGSEGV, &info, current);
 221}
 222
 223#ifdef CONFIG_PCI
 224/* This is really pathetic... */
 225extern volatile int pci_poke_in_progress;
 226extern volatile int pci_poke_cpu;
 227extern volatile int pci_poke_faulted;
 228#endif
 229
 230/* When access exceptions happen, we must do this. */
 231static void spitfire_clean_and_reenable_l1_caches(void)
 232{
 233	unsigned long va;
 234
 235	if (tlb_type != spitfire)
 236		BUG();
 237
 238	/* Clean 'em. */
 239	for (va =  0; va < (PAGE_SIZE << 1); va += 32) {
 240		spitfire_put_icache_tag(va, 0x0);
 241		spitfire_put_dcache_tag(va, 0x0);
 242	}
 243
 244	/* Re-enable in LSU. */
 245	__asm__ __volatile__("flush %%g6\n\t"
 246			     "membar #Sync\n\t"
 247			     "stxa %0, [%%g0] %1\n\t"
 248			     "membar #Sync"
 249			     : /* no outputs */
 250			     : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
 251				    LSU_CONTROL_IM | LSU_CONTROL_DM),
 252			     "i" (ASI_LSU_CONTROL)
 253			     : "memory");
 254}
 255
 256void do_iae(struct pt_regs *regs)
 257{
 258	siginfo_t info;
 259
 260	spitfire_clean_and_reenable_l1_caches();
 261
 262	if (notify_die(DIE_TRAP, "instruction access exception", regs,
 263		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
 264		return;
 265
 266	info.si_signo = SIGBUS;
 267	info.si_errno = 0;
 268	info.si_code = BUS_OBJERR;
 269	info.si_addr = (void *)0;
 270	info.si_trapno = 0;
 271	force_sig_info(SIGBUS, &info, current);
 272}
 273
 274void do_dae(struct pt_regs *regs)
 275{
 276	siginfo_t info;
 277
 278#ifdef CONFIG_PCI
 279	if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
 280		spitfire_clean_and_reenable_l1_caches();
 281
 282		pci_poke_faulted = 1;
 283
 284		/* Why the fuck did they have to change this? */
 285		if (tlb_type == cheetah || tlb_type == cheetah_plus)
 286			regs->tpc += 4;
 287
 288		regs->tnpc = regs->tpc + 4;
 289		return;
 290	}
 291#endif
 292	spitfire_clean_and_reenable_l1_caches();
 293
 294	if (notify_die(DIE_TRAP, "data access exception", regs,
 295		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
 296		return;
 297
 298	info.si_signo = SIGBUS;
 299	info.si_errno = 0;
 300	info.si_code = BUS_OBJERR;
 301	info.si_addr = (void *)0;
 302	info.si_trapno = 0;
 303	force_sig_info(SIGBUS, &info, current);
 304}
 305
 306static char ecc_syndrome_table[] = {
 307	0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
 308	0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
 309	0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
 310	0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
 311	0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
 312	0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
 313	0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
 314	0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
 315	0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
 316	0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
 317	0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
 318	0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
 319	0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
 320	0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
 321	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
 322	0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
 323	0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
 324	0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
 325	0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
 326	0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
 327	0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
 328	0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
 329	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
 330	0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
 331	0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
 332	0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
 333	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
 334	0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
 335	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
 336	0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
 337	0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
 338	0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
 339};
 340
 341/* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status
 342 * in the following format.  The AFAR is left as is, with
 343 * reserved bits cleared, and is a raw 40-bit physical
 344 * address.
 345 */
 346#define CE_STATUS_UDBH_UE		(1UL << (43 + 9))
 347#define CE_STATUS_UDBH_CE		(1UL << (43 + 8))
 348#define CE_STATUS_UDBH_ESYNDR		(0xffUL << 43)
 349#define CE_STATUS_UDBH_SHIFT		43
 350#define CE_STATUS_UDBL_UE		(1UL << (33 + 9))
 351#define CE_STATUS_UDBL_CE		(1UL << (33 + 8))
 352#define CE_STATUS_UDBL_ESYNDR		(0xffUL << 33)
 353#define CE_STATUS_UDBL_SHIFT		33
 354#define CE_STATUS_AFSR_MASK		(0x1ffffffffUL)
 355#define CE_STATUS_AFSR_ME		(1UL << 32)
 356#define CE_STATUS_AFSR_PRIV		(1UL << 31)
 357#define CE_STATUS_AFSR_ISAP		(1UL << 30)
 358#define CE_STATUS_AFSR_ETP		(1UL << 29)
 359#define CE_STATUS_AFSR_IVUE		(1UL << 28)
 360#define CE_STATUS_AFSR_TO		(1UL << 27)
 361#define CE_STATUS_AFSR_BERR		(1UL << 26)
 362#define CE_STATUS_AFSR_LDP		(1UL << 25)
 363#define CE_STATUS_AFSR_CP		(1UL << 24)
 364#define CE_STATUS_AFSR_WP		(1UL << 23)
 365#define CE_STATUS_AFSR_EDP		(1UL << 22)
 366#define CE_STATUS_AFSR_UE		(1UL << 21)
 367#define CE_STATUS_AFSR_CE		(1UL << 20)
 368#define CE_STATUS_AFSR_ETS		(0xfUL << 16)
 369#define CE_STATUS_AFSR_ETS_SHIFT	16
 370#define CE_STATUS_AFSR_PSYND		(0xffffUL << 0)
 371#define CE_STATUS_AFSR_PSYND_SHIFT	0
 372
 373/* Layout of Ecache TAG Parity Syndrome of AFSR */
 374#define AFSR_ETSYNDROME_7_0		0x1UL /* E$-tag bus bits  <7:0> */
 375#define AFSR_ETSYNDROME_15_8		0x2UL /* E$-tag bus bits <15:8> */
 376#define AFSR_ETSYNDROME_21_16		0x4UL /* E$-tag bus bits <21:16> */
 377#define AFSR_ETSYNDROME_24_22		0x8UL /* E$-tag bus bits <24:22> */
 378
 379static char *syndrome_unknown = "<Unknown>";
 380
 381asmlinkage void cee_log(unsigned long ce_status,
 382			unsigned long afar,
 383			struct pt_regs *regs)
 384{
 385	char memmod_str[64];
 386	char *p;
 387	unsigned short scode, udb_reg;
 388
 389	printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
 390	       "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n",
 391	       smp_processor_id(),
 392	       (ce_status & CE_STATUS_AFSR_MASK),
 393	       afar,
 394	       ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL),
 395	       ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL));
 396
 397	udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL);
 398	if (udb_reg & (1 << 8)) {
 399		scode = ecc_syndrome_table[udb_reg & 0xff];
 400		if (prom_getunumber(scode, afar,
 401				    memmod_str, sizeof(memmod_str)) == -1)
 402			p = syndrome_unknown;
 403		else
 404			p = memmod_str;
 405		printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
 406		       "Memory Module \"%s\"\n",
 407		       smp_processor_id(), scode, p);
 408	}
 409
 410	udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL);
 411	if (udb_reg & (1 << 8)) {
 412		scode = ecc_syndrome_table[udb_reg & 0xff];
 413		if (prom_getunumber(scode, afar,
 414				    memmod_str, sizeof(memmod_str)) == -1)
 415			p = syndrome_unknown;
 416		else
 417			p = memmod_str;
 418		printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
 419		       "Memory Module \"%s\"\n",
 420		       smp_processor_id(), scode, p);
 421	}
 422}
 423
 424/* Cheetah error trap handling. */
 425static unsigned long ecache_flush_physbase;
 426static unsigned long ecache_flush_linesize;
 427static unsigned long ecache_flush_size;
 428
 429/* WARNING: The error trap handlers in assembly know the precise
 430 *	    layout of the following structure.
 431 *
 432 * C-level handlers below use this information to log the error
 433 * and then determine how to recover (if possible).
 434 */
 435struct cheetah_err_info {
 436/*0x00*/u64 afsr;
 437/*0x08*/u64 afar;
 438
 439	/* D-cache state */
 440/*0x10*/u64 dcache_data[4];	/* The actual data	*/
 441/*0x30*/u64 dcache_index;	/* D-cache index	*/
 442/*0x38*/u64 dcache_tag;		/* D-cache tag/valid	*/
 443/*0x40*/u64 dcache_utag;	/* D-cache microtag	*/
 444/*0x48*/u64 dcache_stag;	/* D-cache snooptag	*/
 445
 446	/* I-cache state */
 447/*0x50*/u64 icache_data[8];	/* The actual insns + predecode	*/
 448/*0x90*/u64 icache_index;	/* I-cache index	*/
 449/*0x98*/u64 icache_tag;		/* I-cache phys tag	*/
 450/*0xa0*/u64 icache_utag;	/* I-cache microtag	*/
 451/*0xa8*/u64 icache_stag;	/* I-cache snooptag	*/
 452/*0xb0*/u64 icache_upper;	/* I-cache upper-tag	*/
 453/*0xb8*/u64 icache_lower;	/* I-cache lower-tag	*/
 454
 455	/* E-cache state */
 456/*0xc0*/u64 ecache_data[4];	/* 32 bytes from staging registers */
 457/*0xe0*/u64 ecache_index;	/* E-cache index	*/
 458/*0xe8*/u64 ecache_tag;		/* E-cache tag/state	*/
 459
 460/*0xf0*/u64 __pad[32 - 30];
 461};
 462#define CHAFSR_INVALID		((u64)-1L)
 463
 464/* This table is ordered in priority of errors and matches the
 465 * AFAR overwrite policy as well.
 466 */
 467
 468struct afsr_error_table {
 469	unsigned long mask;
 470	const char *name;
 471};
 472
 473static const char CHAFSR_PERR_msg[] =
 474	"System interface protocol error";
 475static const char CHAFSR_IERR_msg[] =
 476	"Internal processor error";
 477static const char CHAFSR_ISAP_msg[] =
 478	"System request parity error on incoming addresss";
 479static const char CHAFSR_UCU_msg[] =
 480	"Uncorrectable E-cache ECC error for ifetch/data";
 481static const char CHAFSR_UCC_msg[] =
 482	"SW Correctable E-cache ECC error for ifetch/data";
 483static const char CHAFSR_UE_msg[] =
 484	"Uncorrectable system bus data ECC error for read";
 485static const char CHAFSR_EDU_msg[] =
 486	"Uncorrectable E-cache ECC error for stmerge/blkld";
 487static const char CHAFSR_EMU_msg[] =
 488	"Uncorrectable system bus MTAG error";
 489static const char CHAFSR_WDU_msg[] =
 490	"Uncorrectable E-cache ECC error for writeback";
 491static const char CHAFSR_CPU_msg[] =
 492	"Uncorrectable ECC error for copyout";
 493static const char CHAFSR_CE_msg[] =
 494	"HW corrected system bus data ECC error for read";
 495static const char CHAFSR_EDC_msg[] =
 496	"HW corrected E-cache ECC error for stmerge/blkld";
 497static const char CHAFSR_EMC_msg[] =
 498	"HW corrected system bus MTAG ECC error";
 499static const char CHAFSR_WDC_msg[] =
 500	"HW corrected E-cache ECC error for writeback";
 501static const char CHAFSR_CPC_msg[] =
 502	"HW corrected ECC error for copyout";
 503static const char CHAFSR_TO_msg[] =
 504	"Unmapped error from system bus";
 505static const char CHAFSR_BERR_msg[] =
 506	"Bus error response from system bus";
 507static const char CHAFSR_IVC_msg[] =
 508	"HW corrected system bus data ECC error for ivec read";
 509static const char CHAFSR_IVU_msg[] =
 510	"Uncorrectable system bus data ECC error for ivec read";
 511static struct afsr_error_table __cheetah_error_table[] = {
 512	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
 513	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
 514	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
 515	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
 516	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
 517	{	CHAFSR_UE,	CHAFSR_UE_msg		},
 518	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
 519	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
 520	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
 521	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
 522	{	CHAFSR_CE,	CHAFSR_CE_msg		},
 523	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
 524	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
 525	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
 526	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
 527	{	CHAFSR_TO,	CHAFSR_TO_msg		},
 528	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
 529	/* These two do not update the AFAR. */
 530	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
 531	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
 532	{	0,		NULL			},
 533};
 534static const char CHPAFSR_DTO_msg[] =
 535	"System bus unmapped error for prefetch/storequeue-read";
 536static const char CHPAFSR_DBERR_msg[] =
 537	"System bus error for prefetch/storequeue-read";
 538static const char CHPAFSR_THCE_msg[] =
 539	"Hardware corrected E-cache Tag ECC error";
 540static const char CHPAFSR_TSCE_msg[] =
 541	"SW handled correctable E-cache Tag ECC error";
 542static const char CHPAFSR_TUE_msg[] =
 543	"Uncorrectable E-cache Tag ECC error";
 544static const char CHPAFSR_DUE_msg[] =
 545	"System bus uncorrectable data ECC error due to prefetch/store-fill";
 546static struct afsr_error_table __cheetah_plus_error_table[] = {
 547	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
 548	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
 549	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
 550	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
 551	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
 552	{	CHAFSR_UE,	CHAFSR_UE_msg		},
 553	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
 554	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
 555	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
 556	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
 557	{	CHAFSR_CE,	CHAFSR_CE_msg		},
 558	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
 559	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
 560	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
 561	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
 562	{	CHAFSR_TO,	CHAFSR_TO_msg		},
 563	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
 564	{	CHPAFSR_DTO,	CHPAFSR_DTO_msg		},
 565	{	CHPAFSR_DBERR,	CHPAFSR_DBERR_msg	},
 566	{	CHPAFSR_THCE,	CHPAFSR_THCE_msg	},
 567	{	CHPAFSR_TSCE,	CHPAFSR_TSCE_msg	},
 568	{	CHPAFSR_TUE,	CHPAFSR_TUE_msg		},
 569	{	CHPAFSR_DUE,	CHPAFSR_DUE_msg		},
 570	/* These two do not update the AFAR. */
 571	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
 572	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
 573	{	0,		NULL			},
 574};
 575static const char JPAFSR_JETO_msg[] =
 576	"System interface protocol error, hw timeout caused";
 577static const char JPAFSR_SCE_msg[] =
 578	"Parity error on system snoop results";
 579static const char JPAFSR_JEIC_msg[] =
 580	"System interface protocol error, illegal command detected";
 581static const char JPAFSR_JEIT_msg[] =
 582	"System interface protocol error, illegal ADTYPE detected";
 583static const char JPAFSR_OM_msg[] =
 584	"Out of range memory error has occurred";
 585static const char JPAFSR_ETP_msg[] =
 586	"Parity error on L2 cache tag SRAM";
 587static const char JPAFSR_UMS_msg[] =
 588	"Error due to unsupported store";
 589static const char JPAFSR_RUE_msg[] =
 590	"Uncorrectable ECC error from remote cache/memory";
 591static const char JPAFSR_RCE_msg[] =
 592	"Correctable ECC error from remote cache/memory";
 593static const char JPAFSR_BP_msg[] =
 594	"JBUS parity error on returned read data";
 595static const char JPAFSR_WBP_msg[] =
 596	"JBUS parity error on data for writeback or block store";
 597static const char JPAFSR_FRC_msg[] =
 598	"Foreign read to DRAM incurring correctable ECC error";
 599static const char JPAFSR_FRU_msg[] =
 600	"Foreign read to DRAM incurring uncorrectable ECC error";
 601static struct afsr_error_table __jalapeno_error_table[] = {
 602	{	JPAFSR_JETO,	JPAFSR_JETO_msg		},
 603	{	JPAFSR_SCE,	JPAFSR_SCE_msg		},
 604	{	JPAFSR_JEIC,	JPAFSR_JEIC_msg		},
 605	{	JPAFSR_JEIT,	JPAFSR_JEIT_msg		},
 606	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
 607	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
 608	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
 609	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
 610	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
 611	{	CHAFSR_UE,	CHAFSR_UE_msg		},
 612	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
 613	{	JPAFSR_OM,	JPAFSR_OM_msg		},
 614	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
 615	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
 616	{	CHAFSR_CE,	CHAFSR_CE_msg		},
 617	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
 618	{	JPAFSR_ETP,	JPAFSR_ETP_msg		},
 619	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
 620	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
 621	{	CHAFSR_TO,	CHAFSR_TO_msg		},
 622	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
 623	{	JPAFSR_UMS,	JPAFSR_UMS_msg		},
 624	{	JPAFSR_RUE,	JPAFSR_RUE_msg		},
 625	{	JPAFSR_RCE,	JPAFSR_RCE_msg		},
 626	{	JPAFSR_BP,	JPAFSR_BP_msg		},
 627	{	JPAFSR_WBP,	JPAFSR_WBP_msg		},
 628	{	JPAFSR_FRC,	JPAFSR_FRC_msg		},
 629	{	JPAFSR_FRU,	JPAFSR_FRU_msg		},
 630	/* These two do not update the AFAR. */
 631	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
 632	{	0,		NULL			},
 633};
 634static struct afsr_error_table *cheetah_error_table;
 635static unsigned long cheetah_afsr_errors;
 636
 637/* This is allocated at boot time based upon the largest hardware
 638 * cpu ID in the system.  We allocate two entries per cpu, one for
 639 * TL==0 logging and one for TL >= 1 logging.
 640 */
 641struct cheetah_err_info *cheetah_error_log;
 642
 643static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
 644{
 645	struct cheetah_err_info *p;
 646	int cpu = smp_processor_id();
 647
 648	if (!cheetah_error_log)
 649		return NULL;
 650
 651	p = cheetah_error_log + (cpu * 2);
 652	if ((afsr & CHAFSR_TL1) != 0UL)
 653		p++;
 654
 655	return p;
 656}
 657
 658extern unsigned int tl0_icpe[], tl1_icpe[];
 659extern unsigned int tl0_dcpe[], tl1_dcpe[];
 660extern unsigned int tl0_fecc[], tl1_fecc[];
 661extern unsigned int tl0_cee[], tl1_cee[];
 662extern unsigned int tl0_iae[], tl1_iae[];
 663extern unsigned int tl0_dae[], tl1_dae[];
 664extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
 665extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
 666extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
 667extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
 668extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
 669
 670void __init cheetah_ecache_flush_init(void)
 671{
 672	unsigned long largest_size, smallest_linesize, order, ver;
 673	int node, i, instance;
 674
 675	/* Scan all cpu device tree nodes, note two values:
 676	 * 1) largest E-cache size
 677	 * 2) smallest E-cache line size
 678	 */
 679	largest_size = 0UL;
 680	smallest_linesize = ~0UL;
 681
 682	instance = 0;
 683	while (!cpu_find_by_instance(instance, &node, NULL)) {
 684		unsigned long val;
 685
 686		val = prom_getintdefault(node, "ecache-size",
 687					 (2 * 1024 * 1024));
 688		if (val > largest_size)
 689			largest_size = val;
 690		val = prom_getintdefault(node, "ecache-line-size", 64);
 691		if (val < smallest_linesize)
 692			smallest_linesize = val;
 693		instance++;
 694	}
 695
 696	if (largest_size == 0UL || smallest_linesize == ~0UL) {
 697		prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
 698			    "parameters.\n");
 699		prom_halt();
 700	}
 701
 702	ecache_flush_size = (2 * largest_size);
 703	ecache_flush_linesize = smallest_linesize;
 704
 705	/* Discover a physically contiguous chunk of physical
 706	 * memory in 'sp_banks' of size ecache_flush_size calculated
 707	 * above.  Store the physical base of this area at
 708	 * ecache_flush_physbase.
 709	 */
 710	for (node = 0; ; node++) {
 711		if (sp_banks[node].num_bytes == 0)
 712			break;
 713		if (sp_banks[node].num_bytes >= ecache_flush_size) {
 714			ecache_flush_physbase = sp_banks[node].base_addr;
 715			break;
 716		}
 717	}
 718
 719	/* Note: Zero would be a valid value of ecache_flush_physbase so
 720	 * don't use that as the success test. :-)
 721	 */
 722	if (sp_banks[node].num_bytes == 0) {
 723		prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
 724			    "contiguous physical memory.\n", ecache_flush_size);
 725		prom_halt();
 726	}
 727
 728	/* Now allocate error trap reporting scoreboard. */
 729	node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
 730	for (order = 0; order < MAX_ORDER; order++) {
 731		if ((PAGE_SIZE << order) >= node)
 732			break;
 733	}
 734	cheetah_error_log = (struct cheetah_err_info *)
 735		__get_free_pages(GFP_KERNEL, order);
 736	if (!cheetah_error_log) {
 737		prom_printf("cheetah_ecache_flush_init: Failed to allocate "
 738			    "error logging scoreboard (%d bytes).\n", node);
 739		prom_halt();
 740	}
 741	memset(cheetah_error_log, 0, PAGE_SIZE << order);
 742
 743	/* Mark all AFSRs as invalid so that the trap handler will
 744	 * log new new information there.
 745	 */
 746	for (i = 0; i < 2 * NR_CPUS; i++)
 747		cheetah_error_log[i].afsr = CHAFSR_INVALID;
 748
 749	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
 750	if ((ver >> 32) == 0x003e0016) {
 751		cheetah_error_table = &__jalapeno_error_table[0];
 752		cheetah_afsr_errors = JPAFSR_ERRORS;
 753	} else if ((ver >> 32) == 0x003e0015) {
 754		cheetah_error_table = &__cheetah_plus_error_table[0];
 755		cheetah_afsr_errors = CHPAFSR_ERRORS;
 756	} else {
 757		cheetah_error_table = &__cheetah_error_table[0];
 758		cheetah_afsr_errors = CHAFSR_ERRORS;
 759	}
 760
 761	/* Now patch trap tables. */
 762	memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
 763	memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
 764	memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
 765	memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
 766	memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
 767	memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
 768	memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
 769	memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
 770	if (tlb_type == cheetah_plus) {
 771		memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
 772		memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
 773		memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
 774		memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
 775	}
 776	flushi(PAGE_OFFSET);
 777}
 778
 779static void cheetah_flush_ecache(void)
 780{
 781	unsigned long flush_base = ecache_flush_physbase;
 782	unsigned long flush_linesize = ecache_flush_linesize;
 783	unsigned long flush_size = ecache_flush_size;
 784
 785	__asm__ __volatile__("1: subcc	%0, %4, %0\n\t"
 786			     "   bne,pt	%%xcc, 1b\n\t"
 787			     "    ldxa	[%2 + %0] %3, %%g0\n\t"
 788			     : "=&r" (flush_size)
 789			     : "0" (flush_size), "r" (flush_base),
 790			       "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
 791}
 792
 793static void cheetah_flush_ecache_line(unsigned long physaddr)
 794{
 795	unsigned long alias;
 796
 797	physaddr &= ~(8UL - 1UL);
 798	physaddr = (ecache_flush_physbase +
 799		    (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
 800	alias = physaddr + (ecache_flush_size >> 1UL);
 801	__asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
 802			     "ldxa [%1] %2, %%g0\n\t"
 803			     "membar #Sync"
 804			     : /* no outputs */
 805			     : "r" (physaddr), "r" (alias),
 806			       "i" (ASI_PHYS_USE_EC));
 807}
 808
 809/* Unfortunately, the diagnostic access to the I-cache tags we need to
 810 * use to clear the thing interferes with I-cache coherency transactions.
 811 *
 812 * So we must only flush the I-cache when it is disabled.
 813 */
 814static void __cheetah_flush_icache(void)
 815{
 816	unsigned long i;
 817
 818	/* Clear the valid bits in all the tags. */
 819	for (i = 0; i < (1 << 15); i += (1 << 5)) {
 820		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
 821				     "membar #Sync"
 822				     : /* no outputs */
 823				     : "r" (i | (2 << 3)), "i" (ASI_IC_TAG));
 824	}
 825}
 826
 827static void cheetah_flush_icache(void)
 828{
 829	unsigned long dcu_save;
 830
 831	/* Save current DCU, disable I-cache. */
 832	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
 833			     "or %0, %2, %%g1\n\t"
 834			     "stxa %%g1, [%%g0] %1\n\t"
 835			     "membar #Sync"
 836			     : "=r" (dcu_save)
 837			     : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
 838			     : "g1");
 839
 840	__cheetah_flush_icache();
 841
 842	/* Restore DCU register */
 843	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
 844			     "membar #Sync"
 845			     : /* no outputs */
 846			     : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
 847}
 848
 849static void cheetah_flush_dcache(void)
 850{
 851	unsigned long i;
 852
 853	for (i = 0; i < (1 << 16); i += (1 << 5)) {
 854		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
 855				     "membar #Sync"
 856				     : /* no outputs */
 857				     : "r" (i), "i" (ASI_DCACHE_TAG));
 858	}
 859}
 860
 861/* In order to make the even parity correct we must do two things.
 862 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
 863 * Next, we clear out all 32-bytes of data for that line.  Data of
 864 * all-zero + tag parity value of zero == correct parity.
 865 */
 866static void cheetah_plus_zap_dcache_parity(void)
 867{
 868	unsigned long i;
 869
 870	for (i = 0; i < (1 << 16); i += (1 << 5)) {
 871		unsigned long tag = (i >> 14);
 872		unsigned long j;
 873
 874		__asm__ __volatile__("membar	#Sync\n\t"
 875				     "stxa	%0, [%1] %2\n\t"
 876				     "membar	#Sync"
 877				     : /* no outputs */
 878				     : "r" (tag), "r" (i),
 879				       "i" (ASI_DCACHE_UTAG));
 880		for (j = i; j < i + (1 << 5); j += (1 << 3))
 881			__asm__ __volatile__("membar	#Sync\n\t"
 882					     "stxa	%%g0, [%0] %1\n\t"
 883					     "membar	#Sync"
 884					     : /* no outputs */
 885					     : "r" (j), "i" (ASI_DCACHE_DATA));
 886	}
 887}
 888
 889/* Conversion tables used to frob Cheetah AFSR syndrome values into
 890 * something palatable to the memory controller driver get_unumber
 891 * routine.
 892 */
 893#define MT0	137
 894#define MT1	138
 895#define MT2	139
 896#define NONE	254
 897#define MTC0	140
 898#define MTC1	141
 899#define MTC2	142
 900#define MTC3	143
 901#define C0	128
 902#define C1	129
 903#define C2	130
 904#define C3	131
 905#define C4	132
 906#define C5	133
 907#define C6	134
 908#define C7	135
 909#define C8	136
 910#define M2	144
 911#define M3	145
 912#define M4	146
 913#define M	147
 914static unsigned char cheetah_ecc_syntab[] = {
 915/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
 916/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
 917/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
 918/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
 919/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
 920/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
 921/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
 922/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
 923/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
 924/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
 925/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
 926/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
 927/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
 928/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
 929/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
 930/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
 931/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
 932/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
 933/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
 934/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
 935/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
 936/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
 937/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
 938/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
 939/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
 940/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
 941/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
 942/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
 943/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
 944/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
 945/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
 946/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
 947};
 948static unsigned char cheetah_mtag_syntab[] = {
 949       NONE, MTC0,
 950       MTC1, NONE,
 951       MTC2, NONE,
 952       NONE, MT0,
 953       MTC3, NONE,
 954       NONE, MT1,
 955       NONE, MT2,
 956       NONE, NONE
 957};
 958
 959/* Return the highest priority error conditon mentioned. */
 960static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
 961{
 962	unsigned long tmp = 0;
 963	int i;
 964
 965	for (i = 0; cheetah_error_table[i].mask; i++) {
 966		if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
 967			return tmp;
 968	}
 969	return tmp;
 970}
 971
 972static const char *cheetah_get_string(unsigned long bit)
 973{
 974	int i;
 975
 976	for (i = 0; cheetah_error_table[i].mask; i++) {
 977		if ((bit & cheetah_error_table[i].mask) != 0UL)
 978			return cheetah_error_table[i].name;
 979	}
 980	return "???";
 981}
 982
 983extern int chmc_getunumber(int, unsigned long, char *, int);
 984
 985static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
 986			       unsigned long afsr, unsigned long afar, int recoverable)
 987{
 988	unsigned long hipri;
 989	char unum[256];
 990
 991	printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
 992	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
 993	       afsr, afar,
 994	       (afsr & CHAFSR_TL1) ? 1 : 0);
 995	printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
 996	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
 997	       regs->tpc, regs->tnpc, regs->tstate);
 998	printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
 999	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1000	       (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1001	       (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1002	       (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1003	       (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1004	hipri = cheetah_get_hipri(afsr);
1005	printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1006	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1007	       hipri, cheetah_get_string(hipri));
1008
1009	/* Try to get unumber if relevant. */
1010#define ESYND_ERRORS	(CHAFSR_IVC | CHAFSR_IVU | \
1011			 CHAFSR_CPC | CHAFSR_CPU | \
1012			 CHAFSR_UE  | CHAFSR_CE  | \
1013			 CHAFSR_EDC | CHAFSR_EDU  | \
1014			 CHAFSR_UCC | CHAFSR_UCU  | \
1015			 CHAFSR_WDU | CHAFSR_WDC)
1016#define MSYND_ERRORS	(CHAFSR_EMC | CHAFSR_EMU)
1017	if (afsr & ESYND_ERRORS) {
1018		int syndrome;
1019		int ret;
1020
1021		syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1022		syndrome = cheetah_ecc_syntab[syndrome];
1023		ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1024		if (ret != -1)
1025			printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1026			       (recoverable ? KERN_WARNING : KERN_CRIT),
1027			       smp_processor_id(), unum);
1028	} else if (afsr & MSYND_ERRORS) {
1029		int syndrome;
1030		int ret;
1031
1032		syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1033		syndrome = cheetah_mtag_syntab[syndrome];
1034		ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1035		if (ret != -1)
1036			printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1037			       (recoverable ? KERN_WARNING : KERN_CRIT),
1038			       smp_processor_id(), unum);
1039	}
1040
1041	/* Now dump the cache snapshots. */
1042	printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1043	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1044	       (int) info->dcache_index,
1045	       info->dcache_tag,
1046	       info->dcache_utag,
1047	       info->dcache_stag);
1048	printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1049	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1050	       info->dcache_data[0],
1051	       info->dcache_data[1],
1052	       info->dcache_data[2],
1053	       info->dcache_data[3]);
1054	printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1055	       "u[%016lx] l[%016lx]\n",
1056	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1057	       (int) info->icache_index,
1058	       info->icache_tag,
1059	       info->icache_utag,
1060	       info->icache_stag,
1061	       info->icache_upper,
1062	       info->icache_lower);
1063	printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1064	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1065	       info->icache_data[0],
1066	       info->icache_data[1],
1067	       info->icache_data[2],
1068	       info->icache_data[3]);
1069	printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1070	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1071	       info->icache_data[4],
1072	       info->icache_data[5],
1073	       info->icache_data[6],
1074	       info->icache_data[7]);
1075	printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1076	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1077	       (int) info->ecache_index, info->ecache_tag);
1078	printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1079	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1080	       info->ecache_data[0],
1081	       info->ecache_data[1],
1082	       info->ecache_data[2],
1083	       info->ecache_data[3]);
1084
1085	afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1086	while (afsr != 0UL) {
1087		unsigned long bit = cheetah_get_hipri(afsr);
1088
1089		printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1090		       (recoverable ? KERN_WARNING : KERN_CRIT),
1091		       bit, cheetah_get_string(bit));
1092
1093		afsr &= ~bit;
1094	}
1095
1096	if (!recoverable)
1097		printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1098}
1099
1100static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1101{
1102	unsigned long afsr, afar;
1103	int ret = 0;
1104
1105	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1106			     : "=r" (afsr)
1107			     : "i" (ASI_AFSR));
1108	if ((afsr & cheetah_afsr_errors) != 0) {
1109		if (logp != NULL) {
1110			__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1111					     : "=r" (afar)
1112					     : "i" (ASI_AFAR));
1113			logp->afsr = afsr;
1114			logp->afar = afar;
1115		}
1116		ret = 1;
1117	}
1118	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1119			     "membar #Sync\n\t"
1120			     : : "r" (afsr), "i" (ASI_AFSR));
1121
1122	return ret;
1123}
1124
1125void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1126{
1127	struct cheetah_err_info local_snapshot, *p;
1128	int recoverable;
1129
1130	/* Flush E-cache */
1131	cheetah_flush_ecache();
1132
1133	p = cheetah_get_error_log(afsr);
1134	if (!p) {
1135		prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1136			    afsr, afar);
1137		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1138			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1139		prom_halt();
1140	}
1141
1142	/* Grab snapshot of logged error. */
1143	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1144
1145	/* If the current trap snapshot does not match what the
1146	 * trap handler passed along into our args, big trouble.
1147	 * In such a case, mark the local copy as invalid.
1148	 *
1149	 * Else, it matches and we mark the afsr in the non-local
1150	 * copy as invalid so we may log new error traps there.
1151	 */
1152	if (p->afsr != afsr || p->afar != afar)
1153		local_snapshot.afsr = CHAFSR_INVALID;
1154	else
1155		p->afsr = CHAFSR_INVALID;
1156
1157	cheetah_flush_icache();
1158	cheetah_flush_dcache();
1159
1160	/* Re-enable I-cache/D-cache */
1161	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1162			     "or %%g1, %1, %%g1\n\t"
1163			     "stxa %%g1, [%%g0] %0\n\t"
1164			     "membar #Sync"
1165			     : /* no outputs */
1166			     : "i" (ASI_DCU_CONTROL_REG),
1167			       "i" (DCU_DC | DCU_IC)
1168			     : "g1");
1169
1170	/* Re-enable error reporting */
1171	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1172			     "or %%g1, %1, %%g1\n\t"
1173			     "stxa %%g1, [%%g0] %0\n\t"
1174			     "membar #Sync"
1175			     : /* no outputs */
1176			     : "i" (ASI_ESTATE_ERROR_EN),
1177			       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1178			     : "g1");
1179
1180	/* Decide if we can continue after handling this trap and
1181	 * logging the error.
1182	 */
1183	recoverable = 1;
1184	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1185		recoverable = 0;
1186
1187	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1188	 * error was logged while we had error reporting traps disabled.
1189	 */
1190	if (cheetah_recheck_errors(&local_snapshot)) {
1191		unsigned long new_afsr = local_snapshot.afsr;
1192
1193		/* If we got a new asynchronous error, die... */
1194		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1195				CHAFSR_WDU | CHAFSR_CPU |
1196				CHAFSR_IVU | CHAFSR_UE |
1197				CHAFSR_BERR | CHAFSR_TO))
1198			recoverable = 0;
1199	}
1200
1201	/* Log errors. */
1202	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1203
1204	if (!recoverable)
1205		panic("Irrecoverable Fast-ECC error trap.\n");
1206
1207	/* Flush E-cache to kick the error trap handlers out. */
1208	cheetah_flush_ecache();
1209}
1210
1211/* Try to fix a correctable error by pushing the line out from
1212 * the E-cache.  Recheck error reporting registers to see if the
1213 * problem is intermittent.
1214 */
1215static int cheetah_fix_ce(unsigned long physaddr)
1216{
1217	unsigned long orig_estate;
1218	unsigned long alias1, alias2;
1219	int ret;
1220
1221	/* Make sure correctable error traps are disabled. */
1222	__asm__ __volatile__("ldxa	[%%g0] %2, %0\n\t"
1223			     "andn	%0, %1, %%g1\n\t"
1224			     "stxa	%%g1, [%%g0] %2\n\t"
1225			     "membar	#Sync"
1226			     : "=&r" (orig_estate)
1227			     : "i" (ESTATE_ERROR_CEEN),
1228			       "i" (ASI_ESTATE_ERROR_EN)
1229			     : "g1");
1230
1231	/* We calculate alias addresses that will force the
1232	 * cache line in question out of the E-cache.  Then
1233	 * we bring it back in with an atomic instruction so
1234	 * that we get it in some modified/exclusive state,
1235	 * then we displace it again to try and get proper ECC
1236	 * pushed back into the system.
1237	 */
1238	physaddr &= ~(8UL - 1UL);
1239	alias1 = (ecache_flush_physbase +
1240		  (physaddr & ((ecache_flush_size >> 1) - 1)));
1241	alias2 = alias1 + (ecache_flush_size >> 1);
1242	__asm__ __volatile__("ldxa	[%0] %3, %%g0\n\t"
1243			     "ldxa	[%1] %3, %%g0\n\t"
1244			     "casxa	[%2] %3, %%g0, %%g0\n\t"
1245			     "membar	#StoreLoad | #StoreStore\n\t"
1246			     "ldxa	[%0] %3, %%g0\n\t"
1247			     "ldxa	[%1] %3, %%g0\n\t"
1248			     "membar	#Sync"
1249			     : /* no outputs */
1250			     : "r" (alias1), "r" (alias2),
1251			       "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1252
1253	/* Did that trigger another error? */
1254	if (cheetah_recheck_errors(NULL)) {
1255		/* Try one more time. */
1256		__asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1257				     "membar #Sync"
1258				     : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1259		if (cheetah_recheck_errors(NULL))
1260			ret = 2;
1261		else
1262			ret = 1;
1263	} else {
1264		/* No new error, intermittent problem. */
1265		ret = 0;
1266	}
1267
1268	/* Restore error enables. */
1269	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
1270			     "membar	#Sync"
1271			     : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1272
1273	return ret;
1274}
1275
1276/* Return non-zero if PADDR is a valid physical memory address. */
1277static int cheetah_check_main_memory(unsigned long paddr)
1278{
1279	int i;
1280
1281	for (i = 0; ; i++) {
1282		if (sp_banks[i].num_bytes == 0)
1283			break;
1284		if (paddr >= sp_banks[i].base_addr &&
1285		    paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
1286			return 1;
1287	}
1288	return 0;
1289}
1290
1291void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1292{
1293	struct cheetah_err_info local_snapshot, *p;
1294	int recoverable, is_memory;
1295
1296	p = cheetah_get_error_log(afsr);
1297	if (!p) {
1298		prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1299			    afsr, afar);
1300		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1301			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1302		prom_halt();
1303	}
1304
1305	/* Grab snapshot of logged error. */
1306	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1307
1308	/* If the current trap snapshot does not match what the
1309	 * trap handler passed along into our args, big trouble.
1310	 * In such a case, mark the local copy as invalid.
1311	 *
1312	 * Else, it matches and we mark the afsr in the non-local
1313	 * copy as invalid so we may log new error traps there.
1314	 */
1315	if (p->afsr != afsr || p->afar != afar)
1316		local_snapshot.afsr = CHAFSR_INVALID;
1317	else
1318		p->afsr = CHAFSR_INVALID;
1319
1320	is_memory = cheetah_check_main_memory(afar);
1321
1322	if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1323		/* XXX Might want to log the results of this operation
1324		 * XXX somewhere... -DaveM
1325		 */
1326		cheetah_fix_ce(afar);
1327	}
1328
1329	{
1330		int flush_all, flush_line;
1331
1332		flush_all = flush_line = 0;
1333		if ((afsr & CHAFSR_EDC) != 0UL) {
1334			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1335				flush_line = 1;
1336			else
1337				flush_all = 1;
1338		} else if ((afsr & CHAFSR_CPC) != 0UL) {
1339			if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1340				flush_line = 1;
1341			else
1342				flush_all = 1;
1343		}
1344
1345		/* Trap handler only disabled I-cache, flush it. */
1346		cheetah_flush_icache();
1347
1348		/* Re-enable I-cache */
1349		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1350				     "or %%g1, %1, %%g1\n\t"
1351				     "stxa %%g1, [%%g0] %0\n\t"
1352				     "membar #Sync"
1353				     : /* no outputs */
1354				     : "i" (ASI_DCU_CONTROL_REG),
1355				     "i" (DCU_IC)
1356				     : "g1");
1357
1358		if (flush_all)
1359			cheetah_flush_ecache();
1360		else if (flush_line)
1361			cheetah_flush_ecache_line(afar);
1362	}
1363
1364	/* Re-enable error reporting */
1365	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1366			     "or %%g1, %1, %%g1\n\t"
1367			     "stxa %%g1, [%%g0] %0\n\t"
1368			     "membar #Sync"
1369			     : /* no outputs */
1370			     : "i" (ASI_ESTATE_ERROR_EN),
1371			       "i" (ESTATE_ERROR_CEEN)
1372			     : "g1");
1373
1374	/* Decide if we can continue after handling this trap and
1375	 * logging the error.
1376	 */
1377	recoverable = 1;
1378	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1379		recoverable = 0;
1380
1381	/* Re-check AFSR/AFAR */
1382	(void) cheetah_recheck_errors(&local_snapshot);
1383
1384	/* Log errors. */
1385	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1386
1387	if (!recoverable)
1388		panic("Irrecoverable Correctable-ECC error trap.\n");
1389}
1390
1391void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1392{
1393	struct cheetah_err_info local_snapshot, *p;
1394	int recoverable, is_memory;
1395
1396#ifdef CONFIG_PCI
1397	/* Check for the special PCI poke sequence. */
1398	if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1399		cheetah_flush_icache();
1400		cheetah_flush_dcache();
1401
1402		/* Re-enable I-cache/D-cache */
1403		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1404				     "or %%g1, %1, %%g1\n\t"
1405				     "stxa %%g1, [%%g0] %0\n\t"
1406				     "membar #Sync"
1407				     : /* no outputs */
1408				     : "i" (ASI_DCU_CONTROL_REG),
1409				       "i" (DCU_DC | DCU_IC)
1410				     : "g1");
1411
1412		/* Re-enable error reporting */
1413		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1414				     "or %%g1, %1, %%g1\n\t"
1415				     "stxa %%g1, [%%g0] %0\n\t"
1416				     "membar #Sync"
1417				     : /* no outputs */
1418				     : "i" (ASI_ESTATE_ERROR_EN),
1419				       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1420				     : "g1");
1421
1422		(void) cheetah_recheck_errors(NULL);
1423
1424		pci_poke_faulted = 1;
1425		regs->tpc += 4;
1426		regs->tnpc = regs->tpc + 4;
1427		return;
1428	}
1429#endif
1430
1431	p = cheetah_get_error_log(afsr);
1432	if (!p) {
1433		prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1434			    afsr, afar);
1435		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1436			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1437		prom_halt();
1438	}
1439
1440	/* Grab snapshot of logged error. */
1441	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1442
1443	/* If the current trap snapshot does not match what the
1444	 * trap handler passed along into our args, big trouble.
1445	 * In such a case, mark the local copy as invalid.
1446	 *
1447	 * Else, it matches and we mark the afsr in the non-local
1448	 * copy as invalid so we may log new error traps there.
1449	 */
1450	if (p->afsr != afsr || p->afar != afar)
1451		local_snapshot.afsr = CHAFSR_INVALID;
1452	else
1453		p->afsr = CHAFSR_INVALID;
1454
1455	is_memory = cheetah_check_main_memory(afar);
1456
1457	{
1458		int flush_all, flush_line;
1459
1460		flush_all = flush_line = 0;
1461		if ((afsr & CHAFSR_EDU) != 0UL) {
1462			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1463				flush_line = 1;
1464			else
1465				flush_all = 1;
1466		} else if ((afsr & CHAFSR_BERR) != 0UL) {
1467			if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1468				flush_line = 1;
1469			else
1470				flush_all = 1;
1471		}
1472
1473		cheetah_flush_icache();
1474		cheetah_flush_dcache();
1475
1476		/* Re-enable I/D caches */
1477		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1478				     "or %%g1, %1, %%g1\n\t"
1479				     "stxa %%g1, [%%g0] %0\n\t"
1480				     "membar #Sync"
1481				     : /* no outputs */
1482				     : "i" (ASI_DCU_CONTROL_REG),
1483				     "i" (DCU_IC | DCU_DC)
1484				     : "g1");
1485
1486		if (flush_all)
1487			cheetah_flush_ecache();
1488		else if (flush_line)
1489			cheetah_flush_ecache_line(afar);
1490	}
1491
1492	/* Re-enable error reporting */
1493	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1494			     "or %%g1, %1, %%g1\n\t"
1495			     "stxa %%g1, [%%g0] %0\n\t"
1496			     "membar #Sync"
1497			     : /* no outputs */
1498			     : "i" (ASI_ESTATE_ERROR_EN),
1499			     "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1500			     : "g1");
1501
1502	/* Decide if we can continue after handling this trap and
1503	 * logging the error.
1504	 */
1505	recoverable = 1;
1506	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1507		recoverable = 0;
1508
1509	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1510	 * error was logged while we had error reporting traps disabled.
1511	 */
1512	if (cheetah_recheck_errors(&local_snapshot)) {
1513		unsigned long new_afsr = local_snapshot.afsr;
1514
1515		/* If we got a new asynchronous error, die... */
1516		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1517				CHAFSR_WDU | CHAFSR_CPU |
1518				CHAFSR_IVU | CHAFSR_UE |
1519				CHAFSR_BERR | CHAFSR_TO))
1520			recoverable = 0;
1521	}
1522
1523	/* Log errors. */
1524	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1525
1526	/* "Recoverable" here means we try to yank the page from ever
1527	 * being newly used again.  This depends upon a few things:
1528	 * 1) Must be main memory, and AFAR must be valid.
1529	 * 2) If we trapped from user, OK.
1530	 * 3) Else, if we trapped from kernel we must find exception
1531	 *    table entry (ie. we have to have been accessing user
1532	 *    space).
1533	 *
1534	 * If AFAR is not in main memory, or we trapped from kernel
1535	 * and cannot find an exception table entry, it is unacceptable
1536	 * to try and continue.
1537	 */
1538	if (recoverable && is_memory) {
1539		if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1540			/* OK, usermode access. */
1541			recoverable = 1;
1542		} else {
1543			unsigned long g2 = regs->u_regs[UREG_G2];
1544			unsigned long fixup = search_extables_range(regs->tpc, &g2);
1545
1546			if (fixup != 0UL) {
1547				/* OK, kernel access to userspace. */
1548				recoverable = 1;
1549
1550			} else {
1551				/* BAD, privileged state is corrupted. */
1552				recoverable = 0;
1553			}
1554
1555			if (recoverable) {
1556				if (pfn_valid(afar >> PAGE_SHIFT))
1557					get_page(pfn_to_page(afar >> PAGE_SHIFT));
1558				else
1559					recoverable = 0;
1560
1561				/* Only perform fixup if we still have a
1562				 * recoverable condition.
1563				 */
1564				if (recoverable) {
1565					regs->tpc = fixup;
1566					regs->tnpc = regs->tpc + 4;
1567					regs->u_regs[UREG_G2] = g2;
1568				}
1569			}
1570		}
1571	} else {
1572		recoverable = 0;
1573	}
1574
1575	if (!recoverable)
1576		panic("Irrecoverable deferred error trap.\n");
1577}
1578
1579/* Handle a D/I cache parity error trap.  TYPE is encoded as:
1580 *
1581 * Bit0:	0=dcache,1=icache
1582 * Bit1:	0=recoverable,1=unrecoverable
1583 *
1584 * The hardware has disabled both the I-cache and D-cache in
1585 * the %dcr register.  
1586 */
1587void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1588{
1589	if (type & 0x1)
1590		__cheetah_flush_icache();
1591	else
1592		cheetah_plus_zap_dcache_parity();
1593	cheetah_flush_dcache();
1594
1595	/* Re-enable I-cache/D-cache */
1596	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1597			     "or %%g1, %1, %%g1\n\t"
1598			     "stxa %%g1, [%%g0] %0\n\t"
1599			     "membar #Sync"
1600			     : /* no outputs */
1601			     : "i" (ASI_DCU_CONTROL_REG),
1602			       "i" (DCU_DC | DCU_IC)
1603			     : "g1");
1604
1605	if (type & 0x2) {
1606		printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1607		       smp_processor_id(),
1608		       (type & 0x1) ? 'I' : 'D',
1609		       regs->tpc);
1610		panic("Irrecoverable Cheetah+ parity error.");
1611	}
1612
1613	printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1614	       smp_processor_id(),
1615	       (type & 0x1) ? 'I' : 'D',
1616	       regs->tpc);
1617}
1618
1619void do_fpe_common(struct pt_regs *regs)
1620{
1621	if (regs->tstate & TSTATE_PRIV) {
1622		regs->tpc = regs->tnpc;
1623		regs->tnpc += 4;
1624	} else {
1625		unsigned long fsr = current_thread_info()->xfsr[0];
1626		sigi

Large files files are truncated, but you can click here to view the full file