PageRenderTime 143ms CodeModel.GetById 15ms app.highlight 112ms RepoModel.GetById 1ms app.codeStats 1ms

/arch/mips/kernel/traps.c

http://github.com/mirrors/linux
C | 2472 lines | 1727 code | 366 blank | 379 comment | 330 complexity | feec4225bb4818f89b5fb703aa91380c MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
   7 * Copyright (C) 1995, 1996 Paul M. Antoine
   8 * Copyright (C) 1998 Ulf Carlsson
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
  12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  13 * Copyright (C) 2014, Imagination Technologies Ltd.
  14 */
  15#include <linux/bitops.h>
  16#include <linux/bug.h>
  17#include <linux/compiler.h>
  18#include <linux/context_tracking.h>
  19#include <linux/cpu_pm.h>
  20#include <linux/kexec.h>
  21#include <linux/init.h>
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/extable.h>
  25#include <linux/mm.h>
  26#include <linux/sched/mm.h>
  27#include <linux/sched/debug.h>
  28#include <linux/smp.h>
  29#include <linux/spinlock.h>
  30#include <linux/kallsyms.h>
  31#include <linux/memblock.h>
  32#include <linux/interrupt.h>
  33#include <linux/ptrace.h>
  34#include <linux/kgdb.h>
  35#include <linux/kdebug.h>
  36#include <linux/kprobes.h>
  37#include <linux/notifier.h>
  38#include <linux/kdb.h>
  39#include <linux/irq.h>
  40#include <linux/perf_event.h>
  41
  42#include <asm/addrspace.h>
  43#include <asm/bootinfo.h>
  44#include <asm/branch.h>
  45#include <asm/break.h>
  46#include <asm/cop2.h>
  47#include <asm/cpu.h>
  48#include <asm/cpu-type.h>
  49#include <asm/dsp.h>
  50#include <asm/fpu.h>
  51#include <asm/fpu_emulator.h>
  52#include <asm/idle.h>
  53#include <asm/isa-rev.h>
  54#include <asm/mips-cps.h>
  55#include <asm/mips-r2-to-r6-emul.h>
  56#include <asm/mipsregs.h>
  57#include <asm/mipsmtregs.h>
  58#include <asm/module.h>
  59#include <asm/msa.h>
  60#include <asm/pgtable.h>
  61#include <asm/ptrace.h>
  62#include <asm/sections.h>
  63#include <asm/siginfo.h>
  64#include <asm/tlbdebug.h>
  65#include <asm/traps.h>
  66#include <linux/uaccess.h>
  67#include <asm/watch.h>
  68#include <asm/mmu_context.h>
  69#include <asm/types.h>
  70#include <asm/stacktrace.h>
  71#include <asm/tlbex.h>
  72#include <asm/uasm.h>
  73
  74extern void check_wait(void);
  75extern asmlinkage void rollback_handle_int(void);
  76extern asmlinkage void handle_int(void);
  77extern asmlinkage void handle_adel(void);
  78extern asmlinkage void handle_ades(void);
  79extern asmlinkage void handle_ibe(void);
  80extern asmlinkage void handle_dbe(void);
  81extern asmlinkage void handle_sys(void);
  82extern asmlinkage void handle_bp(void);
  83extern asmlinkage void handle_ri(void);
  84extern asmlinkage void handle_ri_rdhwr_tlbp(void);
  85extern asmlinkage void handle_ri_rdhwr(void);
  86extern asmlinkage void handle_cpu(void);
  87extern asmlinkage void handle_ov(void);
  88extern asmlinkage void handle_tr(void);
  89extern asmlinkage void handle_msa_fpe(void);
  90extern asmlinkage void handle_fpe(void);
  91extern asmlinkage void handle_ftlb(void);
  92extern asmlinkage void handle_msa(void);
  93extern asmlinkage void handle_mdmx(void);
  94extern asmlinkage void handle_watch(void);
  95extern asmlinkage void handle_mt(void);
  96extern asmlinkage void handle_dsp(void);
  97extern asmlinkage void handle_mcheck(void);
  98extern asmlinkage void handle_reserved(void);
  99extern void tlb_do_page_fault_0(void);
 100
 101void (*board_be_init)(void);
 102int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
 103void (*board_nmi_handler_setup)(void);
 104void (*board_ejtag_handler_setup)(void);
 105void (*board_bind_eic_interrupt)(int irq, int regset);
 106void (*board_ebase_setup)(void);
 107void(*board_cache_error_setup)(void);
 108
 109static void show_raw_backtrace(unsigned long reg29)
 110{
 111	unsigned long *sp = (unsigned long *)(reg29 & ~3);
 112	unsigned long addr;
 113
 114	printk("Call Trace:");
 115#ifdef CONFIG_KALLSYMS
 116	printk("\n");
 117#endif
 118	while (!kstack_end(sp)) {
 119		unsigned long __user *p =
 120			(unsigned long __user *)(unsigned long)sp++;
 121		if (__get_user(addr, p)) {
 122			printk(" (Bad stack address)");
 123			break;
 124		}
 125		if (__kernel_text_address(addr))
 126			print_ip_sym(addr);
 127	}
 128	printk("\n");
 129}
 130
 131#ifdef CONFIG_KALLSYMS
 132int raw_show_trace;
 133static int __init set_raw_show_trace(char *str)
 134{
 135	raw_show_trace = 1;
 136	return 1;
 137}
 138__setup("raw_show_trace", set_raw_show_trace);
 139#endif
 140
 141static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
 142{
 143	unsigned long sp = regs->regs[29];
 144	unsigned long ra = regs->regs[31];
 145	unsigned long pc = regs->cp0_epc;
 146
 147	if (!task)
 148		task = current;
 149
 150	if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
 151		show_raw_backtrace(sp);
 152		return;
 153	}
 154	printk("Call Trace:\n");
 155	do {
 156		print_ip_sym(pc);
 157		pc = unwind_stack(task, &sp, pc, &ra);
 158	} while (pc);
 159	pr_cont("\n");
 160}
 161
 162/*
 163 * This routine abuses get_user()/put_user() to reference pointers
 164 * with at least a bit of error checking ...
 165 */
 166static void show_stacktrace(struct task_struct *task,
 167	const struct pt_regs *regs)
 168{
 169	const int field = 2 * sizeof(unsigned long);
 170	long stackdata;
 171	int i;
 172	unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
 173
 174	printk("Stack :");
 175	i = 0;
 176	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
 177		if (i && ((i % (64 / field)) == 0)) {
 178			pr_cont("\n");
 179			printk("       ");
 180		}
 181		if (i > 39) {
 182			pr_cont(" ...");
 183			break;
 184		}
 185
 186		if (__get_user(stackdata, sp++)) {
 187			pr_cont(" (Bad stack address)");
 188			break;
 189		}
 190
 191		pr_cont(" %0*lx", field, stackdata);
 192		i++;
 193	}
 194	pr_cont("\n");
 195	show_backtrace(task, regs);
 196}
 197
 198void show_stack(struct task_struct *task, unsigned long *sp)
 199{
 200	struct pt_regs regs;
 201	mm_segment_t old_fs = get_fs();
 202
 203	regs.cp0_status = KSU_KERNEL;
 204	if (sp) {
 205		regs.regs[29] = (unsigned long)sp;
 206		regs.regs[31] = 0;
 207		regs.cp0_epc = 0;
 208	} else {
 209		if (task && task != current) {
 210			regs.regs[29] = task->thread.reg29;
 211			regs.regs[31] = 0;
 212			regs.cp0_epc = task->thread.reg31;
 213		} else {
 214			prepare_frametrace(&regs);
 215		}
 216	}
 217	/*
 218	 * show_stack() deals exclusively with kernel mode, so be sure to access
 219	 * the stack in the kernel (not user) address space.
 220	 */
 221	set_fs(KERNEL_DS);
 222	show_stacktrace(task, &regs);
 223	set_fs(old_fs);
 224}
 225
 226static void show_code(unsigned int __user *pc)
 227{
 228	long i;
 229	unsigned short __user *pc16 = NULL;
 230
 231	printk("Code:");
 232
 233	if ((unsigned long)pc & 1)
 234		pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
 235	for(i = -3 ; i < 6 ; i++) {
 236		unsigned int insn;
 237		if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
 238			pr_cont(" (Bad address in epc)\n");
 239			break;
 240		}
 241		pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
 242	}
 243	pr_cont("\n");
 244}
 245
 246static void __show_regs(const struct pt_regs *regs)
 247{
 248	const int field = 2 * sizeof(unsigned long);
 249	unsigned int cause = regs->cp0_cause;
 250	unsigned int exccode;
 251	int i;
 252
 253	show_regs_print_info(KERN_DEFAULT);
 254
 255	/*
 256	 * Saved main processor registers
 257	 */
 258	for (i = 0; i < 32; ) {
 259		if ((i % 4) == 0)
 260			printk("$%2d   :", i);
 261		if (i == 0)
 262			pr_cont(" %0*lx", field, 0UL);
 263		else if (i == 26 || i == 27)
 264			pr_cont(" %*s", field, "");
 265		else
 266			pr_cont(" %0*lx", field, regs->regs[i]);
 267
 268		i++;
 269		if ((i % 4) == 0)
 270			pr_cont("\n");
 271	}
 272
 273#ifdef CONFIG_CPU_HAS_SMARTMIPS
 274	printk("Acx    : %0*lx\n", field, regs->acx);
 275#endif
 276	if (MIPS_ISA_REV < 6) {
 277		printk("Hi    : %0*lx\n", field, regs->hi);
 278		printk("Lo    : %0*lx\n", field, regs->lo);
 279	}
 280
 281	/*
 282	 * Saved cp0 registers
 283	 */
 284	printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
 285	       (void *) regs->cp0_epc);
 286	printk("ra    : %0*lx %pS\n", field, regs->regs[31],
 287	       (void *) regs->regs[31]);
 288
 289	printk("Status: %08x	", (uint32_t) regs->cp0_status);
 290
 291	if (cpu_has_3kex) {
 292		if (regs->cp0_status & ST0_KUO)
 293			pr_cont("KUo ");
 294		if (regs->cp0_status & ST0_IEO)
 295			pr_cont("IEo ");
 296		if (regs->cp0_status & ST0_KUP)
 297			pr_cont("KUp ");
 298		if (regs->cp0_status & ST0_IEP)
 299			pr_cont("IEp ");
 300		if (regs->cp0_status & ST0_KUC)
 301			pr_cont("KUc ");
 302		if (regs->cp0_status & ST0_IEC)
 303			pr_cont("IEc ");
 304	} else if (cpu_has_4kex) {
 305		if (regs->cp0_status & ST0_KX)
 306			pr_cont("KX ");
 307		if (regs->cp0_status & ST0_SX)
 308			pr_cont("SX ");
 309		if (regs->cp0_status & ST0_UX)
 310			pr_cont("UX ");
 311		switch (regs->cp0_status & ST0_KSU) {
 312		case KSU_USER:
 313			pr_cont("USER ");
 314			break;
 315		case KSU_SUPERVISOR:
 316			pr_cont("SUPERVISOR ");
 317			break;
 318		case KSU_KERNEL:
 319			pr_cont("KERNEL ");
 320			break;
 321		default:
 322			pr_cont("BAD_MODE ");
 323			break;
 324		}
 325		if (regs->cp0_status & ST0_ERL)
 326			pr_cont("ERL ");
 327		if (regs->cp0_status & ST0_EXL)
 328			pr_cont("EXL ");
 329		if (regs->cp0_status & ST0_IE)
 330			pr_cont("IE ");
 331	}
 332	pr_cont("\n");
 333
 334	exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
 335	printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
 336
 337	if (1 <= exccode && exccode <= 5)
 338		printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
 339
 340	printk("PrId  : %08x (%s)\n", read_c0_prid(),
 341	       cpu_name_string());
 342}
 343
 344/*
 345 * FIXME: really the generic show_regs should take a const pointer argument.
 346 */
 347void show_regs(struct pt_regs *regs)
 348{
 349	__show_regs(regs);
 350	dump_stack();
 351}
 352
 353void show_registers(struct pt_regs *regs)
 354{
 355	const int field = 2 * sizeof(unsigned long);
 356	mm_segment_t old_fs = get_fs();
 357
 358	__show_regs(regs);
 359	print_modules();
 360	printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
 361	       current->comm, current->pid, current_thread_info(), current,
 362	      field, current_thread_info()->tp_value);
 363	if (cpu_has_userlocal) {
 364		unsigned long tls;
 365
 366		tls = read_c0_userlocal();
 367		if (tls != current_thread_info()->tp_value)
 368			printk("*HwTLS: %0*lx\n", field, tls);
 369	}
 370
 371	if (!user_mode(regs))
 372		/* Necessary for getting the correct stack content */
 373		set_fs(KERNEL_DS);
 374	show_stacktrace(current, regs);
 375	show_code((unsigned int __user *) regs->cp0_epc);
 376	printk("\n");
 377	set_fs(old_fs);
 378}
 379
 380static DEFINE_RAW_SPINLOCK(die_lock);
 381
 382void __noreturn die(const char *str, struct pt_regs *regs)
 383{
 384	static int die_counter;
 385	int sig = SIGSEGV;
 386
 387	oops_enter();
 388
 389	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
 390		       SIGSEGV) == NOTIFY_STOP)
 391		sig = 0;
 392
 393	console_verbose();
 394	raw_spin_lock_irq(&die_lock);
 395	bust_spinlocks(1);
 396
 397	printk("%s[#%d]:\n", str, ++die_counter);
 398	show_registers(regs);
 399	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 400	raw_spin_unlock_irq(&die_lock);
 401
 402	oops_exit();
 403
 404	if (in_interrupt())
 405		panic("Fatal exception in interrupt");
 406
 407	if (panic_on_oops)
 408		panic("Fatal exception");
 409
 410	if (regs && kexec_should_crash(current))
 411		crash_kexec(regs);
 412
 413	do_exit(sig);
 414}
 415
 416extern struct exception_table_entry __start___dbe_table[];
 417extern struct exception_table_entry __stop___dbe_table[];
 418
 419__asm__(
 420"	.section	__dbe_table, \"a\"\n"
 421"	.previous			\n");
 422
 423/* Given an address, look for it in the exception tables. */
 424static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
 425{
 426	const struct exception_table_entry *e;
 427
 428	e = search_extable(__start___dbe_table,
 429			   __stop___dbe_table - __start___dbe_table, addr);
 430	if (!e)
 431		e = search_module_dbetables(addr);
 432	return e;
 433}
 434
 435asmlinkage void do_be(struct pt_regs *regs)
 436{
 437	const int field = 2 * sizeof(unsigned long);
 438	const struct exception_table_entry *fixup = NULL;
 439	int data = regs->cp0_cause & 4;
 440	int action = MIPS_BE_FATAL;
 441	enum ctx_state prev_state;
 442
 443	prev_state = exception_enter();
 444	/* XXX For now.	 Fixme, this searches the wrong table ...  */
 445	if (data && !user_mode(regs))
 446		fixup = search_dbe_tables(exception_epc(regs));
 447
 448	if (fixup)
 449		action = MIPS_BE_FIXUP;
 450
 451	if (board_be_handler)
 452		action = board_be_handler(regs, fixup != NULL);
 453	else
 454		mips_cm_error_report();
 455
 456	switch (action) {
 457	case MIPS_BE_DISCARD:
 458		goto out;
 459	case MIPS_BE_FIXUP:
 460		if (fixup) {
 461			regs->cp0_epc = fixup->nextinsn;
 462			goto out;
 463		}
 464		break;
 465	default:
 466		break;
 467	}
 468
 469	/*
 470	 * Assume it would be too dangerous to continue ...
 471	 */
 472	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
 473	       data ? "Data" : "Instruction",
 474	       field, regs->cp0_epc, field, regs->regs[31]);
 475	if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
 476		       SIGBUS) == NOTIFY_STOP)
 477		goto out;
 478
 479	die_if_kernel("Oops", regs);
 480	force_sig(SIGBUS);
 481
 482out:
 483	exception_exit(prev_state);
 484}
 485
 486/*
 487 * ll/sc, rdhwr, sync emulation
 488 */
 489
 490#define OPCODE 0xfc000000
 491#define BASE   0x03e00000
 492#define RT     0x001f0000
 493#define OFFSET 0x0000ffff
 494#define LL     0xc0000000
 495#define SC     0xe0000000
 496#define SPEC0  0x00000000
 497#define SPEC3  0x7c000000
 498#define RD     0x0000f800
 499#define FUNC   0x0000003f
 500#define SYNC   0x0000000f
 501#define RDHWR  0x0000003b
 502
 503/*  microMIPS definitions   */
 504#define MM_POOL32A_FUNC 0xfc00ffff
 505#define MM_RDHWR        0x00006b3c
 506#define MM_RS           0x001f0000
 507#define MM_RT           0x03e00000
 508
 509/*
 510 * The ll_bit is cleared by r*_switch.S
 511 */
 512
 513unsigned int ll_bit;
 514struct task_struct *ll_task;
 515
 516static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
 517{
 518	unsigned long value, __user *vaddr;
 519	long offset;
 520
 521	/*
 522	 * analyse the ll instruction that just caused a ri exception
 523	 * and put the referenced address to addr.
 524	 */
 525
 526	/* sign extend offset */
 527	offset = opcode & OFFSET;
 528	offset <<= 16;
 529	offset >>= 16;
 530
 531	vaddr = (unsigned long __user *)
 532		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 533
 534	if ((unsigned long)vaddr & 3)
 535		return SIGBUS;
 536	if (get_user(value, vaddr))
 537		return SIGSEGV;
 538
 539	preempt_disable();
 540
 541	if (ll_task == NULL || ll_task == current) {
 542		ll_bit = 1;
 543	} else {
 544		ll_bit = 0;
 545	}
 546	ll_task = current;
 547
 548	preempt_enable();
 549
 550	regs->regs[(opcode & RT) >> 16] = value;
 551
 552	return 0;
 553}
 554
 555static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
 556{
 557	unsigned long __user *vaddr;
 558	unsigned long reg;
 559	long offset;
 560
 561	/*
 562	 * analyse the sc instruction that just caused a ri exception
 563	 * and put the referenced address to addr.
 564	 */
 565
 566	/* sign extend offset */
 567	offset = opcode & OFFSET;
 568	offset <<= 16;
 569	offset >>= 16;
 570
 571	vaddr = (unsigned long __user *)
 572		((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 573	reg = (opcode & RT) >> 16;
 574
 575	if ((unsigned long)vaddr & 3)
 576		return SIGBUS;
 577
 578	preempt_disable();
 579
 580	if (ll_bit == 0 || ll_task != current) {
 581		regs->regs[reg] = 0;
 582		preempt_enable();
 583		return 0;
 584	}
 585
 586	preempt_enable();
 587
 588	if (put_user(regs->regs[reg], vaddr))
 589		return SIGSEGV;
 590
 591	regs->regs[reg] = 1;
 592
 593	return 0;
 594}
 595
 596/*
 597 * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
 598 * opcodes are supposed to result in coprocessor unusable exceptions if
 599 * executed on ll/sc-less processors.  That's the theory.  In practice a
 600 * few processors such as NEC's VR4100 throw reserved instruction exceptions
 601 * instead, so we're doing the emulation thing in both exception handlers.
 602 */
 603static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
 604{
 605	if ((opcode & OPCODE) == LL) {
 606		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 607				1, regs, 0);
 608		return simulate_ll(regs, opcode);
 609	}
 610	if ((opcode & OPCODE) == SC) {
 611		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 612				1, regs, 0);
 613		return simulate_sc(regs, opcode);
 614	}
 615
 616	return -1;			/* Must be something else ... */
 617}
 618
 619/*
 620 * Simulate trapping 'rdhwr' instructions to provide user accessible
 621 * registers not implemented in hardware.
 622 */
 623static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
 624{
 625	struct thread_info *ti = task_thread_info(current);
 626
 627	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 628			1, regs, 0);
 629	switch (rd) {
 630	case MIPS_HWR_CPUNUM:		/* CPU number */
 631		regs->regs[rt] = smp_processor_id();
 632		return 0;
 633	case MIPS_HWR_SYNCISTEP:	/* SYNCI length */
 634		regs->regs[rt] = min(current_cpu_data.dcache.linesz,
 635				     current_cpu_data.icache.linesz);
 636		return 0;
 637	case MIPS_HWR_CC:		/* Read count register */
 638		regs->regs[rt] = read_c0_count();
 639		return 0;
 640	case MIPS_HWR_CCRES:		/* Count register resolution */
 641		switch (current_cpu_type()) {
 642		case CPU_20KC:
 643		case CPU_25KF:
 644			regs->regs[rt] = 1;
 645			break;
 646		default:
 647			regs->regs[rt] = 2;
 648		}
 649		return 0;
 650	case MIPS_HWR_ULR:		/* Read UserLocal register */
 651		regs->regs[rt] = ti->tp_value;
 652		return 0;
 653	default:
 654		return -1;
 655	}
 656}
 657
 658static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
 659{
 660	if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
 661		int rd = (opcode & RD) >> 11;
 662		int rt = (opcode & RT) >> 16;
 663
 664		simulate_rdhwr(regs, rd, rt);
 665		return 0;
 666	}
 667
 668	/* Not ours.  */
 669	return -1;
 670}
 671
 672static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
 673{
 674	if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
 675		int rd = (opcode & MM_RS) >> 16;
 676		int rt = (opcode & MM_RT) >> 21;
 677		simulate_rdhwr(regs, rd, rt);
 678		return 0;
 679	}
 680
 681	/* Not ours.  */
 682	return -1;
 683}
 684
 685static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
 686{
 687	if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
 688		perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 689				1, regs, 0);
 690		return 0;
 691	}
 692
 693	return -1;			/* Must be something else ... */
 694}
 695
 696asmlinkage void do_ov(struct pt_regs *regs)
 697{
 698	enum ctx_state prev_state;
 699
 700	prev_state = exception_enter();
 701	die_if_kernel("Integer overflow", regs);
 702
 703	force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc);
 704	exception_exit(prev_state);
 705}
 706
 707#ifdef CONFIG_MIPS_FP_SUPPORT
 708
 709/*
 710 * Send SIGFPE according to FCSR Cause bits, which must have already
 711 * been masked against Enable bits.  This is impotant as Inexact can
 712 * happen together with Overflow or Underflow, and `ptrace' can set
 713 * any bits.
 714 */
 715void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
 716		     struct task_struct *tsk)
 717{
 718	int si_code = FPE_FLTUNK;
 719
 720	if (fcr31 & FPU_CSR_INV_X)
 721		si_code = FPE_FLTINV;
 722	else if (fcr31 & FPU_CSR_DIV_X)
 723		si_code = FPE_FLTDIV;
 724	else if (fcr31 & FPU_CSR_OVF_X)
 725		si_code = FPE_FLTOVF;
 726	else if (fcr31 & FPU_CSR_UDF_X)
 727		si_code = FPE_FLTUND;
 728	else if (fcr31 & FPU_CSR_INE_X)
 729		si_code = FPE_FLTRES;
 730
 731	force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
 732}
 733
 734int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
 735{
 736	int si_code;
 737	struct vm_area_struct *vma;
 738
 739	switch (sig) {
 740	case 0:
 741		return 0;
 742
 743	case SIGFPE:
 744		force_fcr31_sig(fcr31, fault_addr, current);
 745		return 1;
 746
 747	case SIGBUS:
 748		force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
 749		return 1;
 750
 751	case SIGSEGV:
 752		down_read(&current->mm->mmap_sem);
 753		vma = find_vma(current->mm, (unsigned long)fault_addr);
 754		if (vma && (vma->vm_start <= (unsigned long)fault_addr))
 755			si_code = SEGV_ACCERR;
 756		else
 757			si_code = SEGV_MAPERR;
 758		up_read(&current->mm->mmap_sem);
 759		force_sig_fault(SIGSEGV, si_code, fault_addr);
 760		return 1;
 761
 762	default:
 763		force_sig(sig);
 764		return 1;
 765	}
 766}
 767
 768static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
 769		       unsigned long old_epc, unsigned long old_ra)
 770{
 771	union mips_instruction inst = { .word = opcode };
 772	void __user *fault_addr;
 773	unsigned long fcr31;
 774	int sig;
 775
 776	/* If it's obviously not an FP instruction, skip it */
 777	switch (inst.i_format.opcode) {
 778	case cop1_op:
 779	case cop1x_op:
 780	case lwc1_op:
 781	case ldc1_op:
 782	case swc1_op:
 783	case sdc1_op:
 784		break;
 785
 786	default:
 787		return -1;
 788	}
 789
 790	/*
 791	 * do_ri skipped over the instruction via compute_return_epc, undo
 792	 * that for the FPU emulator.
 793	 */
 794	regs->cp0_epc = old_epc;
 795	regs->regs[31] = old_ra;
 796
 797	/* Run the emulator */
 798	sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 799				       &fault_addr);
 800
 801	/*
 802	 * We can't allow the emulated instruction to leave any
 803	 * enabled Cause bits set in $fcr31.
 804	 */
 805	fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
 806	current->thread.fpu.fcr31 &= ~fcr31;
 807
 808	/* Restore the hardware register state */
 809	own_fpu(1);
 810
 811	/* Send a signal if required.  */
 812	process_fpemu_return(sig, fault_addr, fcr31);
 813
 814	return 0;
 815}
 816
 817/*
 818 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
 819 */
 820asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 821{
 822	enum ctx_state prev_state;
 823	void __user *fault_addr;
 824	int sig;
 825
 826	prev_state = exception_enter();
 827	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 828		       SIGFPE) == NOTIFY_STOP)
 829		goto out;
 830
 831	/* Clear FCSR.Cause before enabling interrupts */
 832	write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
 833	local_irq_enable();
 834
 835	die_if_kernel("FP exception in kernel code", regs);
 836
 837	if (fcr31 & FPU_CSR_UNI_X) {
 838		/*
 839		 * Unimplemented operation exception.  If we've got the full
 840		 * software emulator on-board, let's use it...
 841		 *
 842		 * Force FPU to dump state into task/thread context.  We're
 843		 * moving a lot of data here for what is probably a single
 844		 * instruction, but the alternative is to pre-decode the FP
 845		 * register operands before invoking the emulator, which seems
 846		 * a bit extreme for what should be an infrequent event.
 847		 */
 848
 849		/* Run the emulator */
 850		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 851					       &fault_addr);
 852
 853		/*
 854		 * We can't allow the emulated instruction to leave any
 855		 * enabled Cause bits set in $fcr31.
 856		 */
 857		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
 858		current->thread.fpu.fcr31 &= ~fcr31;
 859
 860		/* Restore the hardware register state */
 861		own_fpu(1);	/* Using the FPU again.	 */
 862	} else {
 863		sig = SIGFPE;
 864		fault_addr = (void __user *) regs->cp0_epc;
 865	}
 866
 867	/* Send a signal if required.  */
 868	process_fpemu_return(sig, fault_addr, fcr31);
 869
 870out:
 871	exception_exit(prev_state);
 872}
 873
 874/*
 875 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
 876 * emulated more than some threshold number of instructions, force migration to
 877 * a "CPU" that has FP support.
 878 */
 879static void mt_ase_fp_affinity(void)
 880{
 881#ifdef CONFIG_MIPS_MT_FPAFF
 882	if (mt_fpemul_threshold > 0 &&
 883	     ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
 884		/*
 885		 * If there's no FPU present, or if the application has already
 886		 * restricted the allowed set to exclude any CPUs with FPUs,
 887		 * we'll skip the procedure.
 888		 */
 889		if (cpumask_intersects(&current->cpus_mask, &mt_fpu_cpumask)) {
 890			cpumask_t tmask;
 891
 892			current->thread.user_cpus_allowed
 893				= current->cpus_mask;
 894			cpumask_and(&tmask, &current->cpus_mask,
 895				    &mt_fpu_cpumask);
 896			set_cpus_allowed_ptr(current, &tmask);
 897			set_thread_flag(TIF_FPUBOUND);
 898		}
 899	}
 900#endif /* CONFIG_MIPS_MT_FPAFF */
 901}
 902
 903#else /* !CONFIG_MIPS_FP_SUPPORT */
 904
 905static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
 906		       unsigned long old_epc, unsigned long old_ra)
 907{
 908	return -1;
 909}
 910
 911#endif /* !CONFIG_MIPS_FP_SUPPORT */
 912
 913void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
 914	const char *str)
 915{
 916	char b[40];
 917
 918#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 919	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 920			 SIGTRAP) == NOTIFY_STOP)
 921		return;
 922#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 923
 924	if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 925		       SIGTRAP) == NOTIFY_STOP)
 926		return;
 927
 928	/*
 929	 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
 930	 * insns, even for trap and break codes that indicate arithmetic
 931	 * failures.  Weird ...
 932	 * But should we continue the brokenness???  --macro
 933	 */
 934	switch (code) {
 935	case BRK_OVERFLOW:
 936	case BRK_DIVZERO:
 937		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 938		die_if_kernel(b, regs);
 939		force_sig_fault(SIGFPE,
 940				code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
 941				(void __user *) regs->cp0_epc);
 942		break;
 943	case BRK_BUG:
 944		die_if_kernel("Kernel bug detected", regs);
 945		force_sig(SIGTRAP);
 946		break;
 947	case BRK_MEMU:
 948		/*
 949		 * This breakpoint code is used by the FPU emulator to retake
 950		 * control of the CPU after executing the instruction from the
 951		 * delay slot of an emulated branch.
 952		 *
 953		 * Terminate if exception was recognized as a delay slot return
 954		 * otherwise handle as normal.
 955		 */
 956		if (do_dsemulret(regs))
 957			return;
 958
 959		die_if_kernel("Math emu break/trap", regs);
 960		force_sig(SIGTRAP);
 961		break;
 962	default:
 963		scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 964		die_if_kernel(b, regs);
 965		if (si_code) {
 966			force_sig_fault(SIGTRAP, si_code, NULL);
 967		} else {
 968			force_sig(SIGTRAP);
 969		}
 970	}
 971}
 972
 973asmlinkage void do_bp(struct pt_regs *regs)
 974{
 975	unsigned long epc = msk_isa16_mode(exception_epc(regs));
 976	unsigned int opcode, bcode;
 977	enum ctx_state prev_state;
 978	mm_segment_t seg;
 979
 980	seg = get_fs();
 981	if (!user_mode(regs))
 982		set_fs(KERNEL_DS);
 983
 984	prev_state = exception_enter();
 985	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 986	if (get_isa16_mode(regs->cp0_epc)) {
 987		u16 instr[2];
 988
 989		if (__get_user(instr[0], (u16 __user *)epc))
 990			goto out_sigsegv;
 991
 992		if (!cpu_has_mmips) {
 993			/* MIPS16e mode */
 994			bcode = (instr[0] >> 5) & 0x3f;
 995		} else if (mm_insn_16bit(instr[0])) {
 996			/* 16-bit microMIPS BREAK */
 997			bcode = instr[0] & 0xf;
 998		} else {
 999			/* 32-bit microMIPS BREAK */
1000			if (__get_user(instr[1], (u16 __user *)(epc + 2)))
1001				goto out_sigsegv;
1002			opcode = (instr[0] << 16) | instr[1];
1003			bcode = (opcode >> 6) & ((1 << 20) - 1);
1004		}
1005	} else {
1006		if (__get_user(opcode, (unsigned int __user *)epc))
1007			goto out_sigsegv;
1008		bcode = (opcode >> 6) & ((1 << 20) - 1);
1009	}
1010
1011	/*
1012	 * There is the ancient bug in the MIPS assemblers that the break
1013	 * code starts left to bit 16 instead to bit 6 in the opcode.
1014	 * Gas is bug-compatible, but not always, grrr...
1015	 * We handle both cases with a simple heuristics.  --macro
1016	 */
1017	if (bcode >= (1 << 10))
1018		bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1019
1020	/*
1021	 * notify the kprobe handlers, if instruction is likely to
1022	 * pertain to them.
1023	 */
1024	switch (bcode) {
1025	case BRK_UPROBE:
1026		if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1027			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1028			goto out;
1029		else
1030			break;
1031	case BRK_UPROBE_XOL:
1032		if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1033			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1034			goto out;
1035		else
1036			break;
1037	case BRK_KPROBE_BP:
1038		if (notify_die(DIE_BREAK, "debug", regs, bcode,
1039			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1040			goto out;
1041		else
1042			break;
1043	case BRK_KPROBE_SSTEPBP:
1044		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1045			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1046			goto out;
1047		else
1048			break;
1049	default:
1050		break;
1051	}
1052
1053	do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1054
1055out:
1056	set_fs(seg);
1057	exception_exit(prev_state);
1058	return;
1059
1060out_sigsegv:
1061	force_sig(SIGSEGV);
1062	goto out;
1063}
1064
1065asmlinkage void do_tr(struct pt_regs *regs)
1066{
1067	u32 opcode, tcode = 0;
1068	enum ctx_state prev_state;
1069	u16 instr[2];
1070	mm_segment_t seg;
1071	unsigned long epc = msk_isa16_mode(exception_epc(regs));
1072
1073	seg = get_fs();
1074	if (!user_mode(regs))
1075		set_fs(KERNEL_DS);
1076
1077	prev_state = exception_enter();
1078	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1079	if (get_isa16_mode(regs->cp0_epc)) {
1080		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1081		    __get_user(instr[1], (u16 __user *)(epc + 2)))
1082			goto out_sigsegv;
1083		opcode = (instr[0] << 16) | instr[1];
1084		/* Immediate versions don't provide a code.  */
1085		if (!(opcode & OPCODE))
1086			tcode = (opcode >> 12) & ((1 << 4) - 1);
1087	} else {
1088		if (__get_user(opcode, (u32 __user *)epc))
1089			goto out_sigsegv;
1090		/* Immediate versions don't provide a code.  */
1091		if (!(opcode & OPCODE))
1092			tcode = (opcode >> 6) & ((1 << 10) - 1);
1093	}
1094
1095	do_trap_or_bp(regs, tcode, 0, "Trap");
1096
1097out:
1098	set_fs(seg);
1099	exception_exit(prev_state);
1100	return;
1101
1102out_sigsegv:
1103	force_sig(SIGSEGV);
1104	goto out;
1105}
1106
1107asmlinkage void do_ri(struct pt_regs *regs)
1108{
1109	unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1110	unsigned long old_epc = regs->cp0_epc;
1111	unsigned long old31 = regs->regs[31];
1112	enum ctx_state prev_state;
1113	unsigned int opcode = 0;
1114	int status = -1;
1115
1116	/*
1117	 * Avoid any kernel code. Just emulate the R2 instruction
1118	 * as quickly as possible.
1119	 */
1120	if (mipsr2_emulation && cpu_has_mips_r6 &&
1121	    likely(user_mode(regs)) &&
1122	    likely(get_user(opcode, epc) >= 0)) {
1123		unsigned long fcr31 = 0;
1124
1125		status = mipsr2_decoder(regs, opcode, &fcr31);
1126		switch (status) {
1127		case 0:
1128		case SIGEMT:
1129			return;
1130		case SIGILL:
1131			goto no_r2_instr;
1132		default:
1133			process_fpemu_return(status,
1134					     &current->thread.cp0_baduaddr,
1135					     fcr31);
1136			return;
1137		}
1138	}
1139
1140no_r2_instr:
1141
1142	prev_state = exception_enter();
1143	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1144
1145	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1146		       SIGILL) == NOTIFY_STOP)
1147		goto out;
1148
1149	die_if_kernel("Reserved instruction in kernel code", regs);
1150
1151	if (unlikely(compute_return_epc(regs) < 0))
1152		goto out;
1153
1154	if (!get_isa16_mode(regs->cp0_epc)) {
1155		if (unlikely(get_user(opcode, epc) < 0))
1156			status = SIGSEGV;
1157
1158		if (!cpu_has_llsc && status < 0)
1159			status = simulate_llsc(regs, opcode);
1160
1161		if (status < 0)
1162			status = simulate_rdhwr_normal(regs, opcode);
1163
1164		if (status < 0)
1165			status = simulate_sync(regs, opcode);
1166
1167		if (status < 0)
1168			status = simulate_fp(regs, opcode, old_epc, old31);
1169	} else if (cpu_has_mmips) {
1170		unsigned short mmop[2] = { 0 };
1171
1172		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1173			status = SIGSEGV;
1174		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1175			status = SIGSEGV;
1176		opcode = mmop[0];
1177		opcode = (opcode << 16) | mmop[1];
1178
1179		if (status < 0)
1180			status = simulate_rdhwr_mm(regs, opcode);
1181	}
1182
1183	if (status < 0)
1184		status = SIGILL;
1185
1186	if (unlikely(status > 0)) {
1187		regs->cp0_epc = old_epc;		/* Undo skip-over.  */
1188		regs->regs[31] = old31;
1189		force_sig(status);
1190	}
1191
1192out:
1193	exception_exit(prev_state);
1194}
1195
1196/*
1197 * No lock; only written during early bootup by CPU 0.
1198 */
1199static RAW_NOTIFIER_HEAD(cu2_chain);
1200
1201int __ref register_cu2_notifier(struct notifier_block *nb)
1202{
1203	return raw_notifier_chain_register(&cu2_chain, nb);
1204}
1205
1206int cu2_notifier_call_chain(unsigned long val, void *v)
1207{
1208	return raw_notifier_call_chain(&cu2_chain, val, v);
1209}
1210
1211static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1212	void *data)
1213{
1214	struct pt_regs *regs = data;
1215
1216	die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1217			      "instruction", regs);
1218	force_sig(SIGILL);
1219
1220	return NOTIFY_OK;
1221}
1222
1223#ifdef CONFIG_MIPS_FP_SUPPORT
1224
1225static int enable_restore_fp_context(int msa)
1226{
1227	int err, was_fpu_owner, prior_msa;
1228	bool first_fp;
1229
1230	/* Initialize context if it hasn't been used already */
1231	first_fp = init_fp_ctx(current);
1232
1233	if (first_fp) {
1234		preempt_disable();
1235		err = own_fpu_inatomic(1);
1236		if (msa && !err) {
1237			enable_msa();
1238			set_thread_flag(TIF_USEDMSA);
1239			set_thread_flag(TIF_MSA_CTX_LIVE);
1240		}
1241		preempt_enable();
1242		return err;
1243	}
1244
1245	/*
1246	 * This task has formerly used the FP context.
1247	 *
1248	 * If this thread has no live MSA vector context then we can simply
1249	 * restore the scalar FP context. If it has live MSA vector context
1250	 * (that is, it has or may have used MSA since last performing a
1251	 * function call) then we'll need to restore the vector context. This
1252	 * applies even if we're currently only executing a scalar FP
1253	 * instruction. This is because if we were to later execute an MSA
1254	 * instruction then we'd either have to:
1255	 *
1256	 *  - Restore the vector context & clobber any registers modified by
1257	 *    scalar FP instructions between now & then.
1258	 *
1259	 * or
1260	 *
1261	 *  - Not restore the vector context & lose the most significant bits
1262	 *    of all vector registers.
1263	 *
1264	 * Neither of those options is acceptable. We cannot restore the least
1265	 * significant bits of the registers now & only restore the most
1266	 * significant bits later because the most significant bits of any
1267	 * vector registers whose aliased FP register is modified now will have
1268	 * been zeroed. We'd have no way to know that when restoring the vector
1269	 * context & thus may load an outdated value for the most significant
1270	 * bits of a vector register.
1271	 */
1272	if (!msa && !thread_msa_context_live())
1273		return own_fpu(1);
1274
1275	/*
1276	 * This task is using or has previously used MSA. Thus we require
1277	 * that Status.FR == 1.
1278	 */
1279	preempt_disable();
1280	was_fpu_owner = is_fpu_owner();
1281	err = own_fpu_inatomic(0);
1282	if (err)
1283		goto out;
1284
1285	enable_msa();
1286	write_msa_csr(current->thread.fpu.msacsr);
1287	set_thread_flag(TIF_USEDMSA);
1288
1289	/*
1290	 * If this is the first time that the task is using MSA and it has
1291	 * previously used scalar FP in this time slice then we already nave
1292	 * FP context which we shouldn't clobber. We do however need to clear
1293	 * the upper 64b of each vector register so that this task has no
1294	 * opportunity to see data left behind by another.
1295	 */
1296	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1297	if (!prior_msa && was_fpu_owner) {
1298		init_msa_upper();
1299
1300		goto out;
1301	}
1302
1303	if (!prior_msa) {
1304		/*
1305		 * Restore the least significant 64b of each vector register
1306		 * from the existing scalar FP context.
1307		 */
1308		_restore_fp(current);
1309
1310		/*
1311		 * The task has not formerly used MSA, so clear the upper 64b
1312		 * of each vector register such that it cannot see data left
1313		 * behind by another task.
1314		 */
1315		init_msa_upper();
1316	} else {
1317		/* We need to restore the vector context. */
1318		restore_msa(current);
1319
1320		/* Restore the scalar FP control & status register */
1321		if (!was_fpu_owner)
1322			write_32bit_cp1_register(CP1_STATUS,
1323						 current->thread.fpu.fcr31);
1324	}
1325
1326out:
1327	preempt_enable();
1328
1329	return 0;
1330}
1331
1332#else /* !CONFIG_MIPS_FP_SUPPORT */
1333
1334static int enable_restore_fp_context(int msa)
1335{
1336	return SIGILL;
1337}
1338
1339#endif /* CONFIG_MIPS_FP_SUPPORT */
1340
1341asmlinkage void do_cpu(struct pt_regs *regs)
1342{
1343	enum ctx_state prev_state;
1344	unsigned int __user *epc;
1345	unsigned long old_epc, old31;
1346	unsigned int opcode;
1347	unsigned int cpid;
1348	int status;
1349
1350	prev_state = exception_enter();
1351	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1352
1353	if (cpid != 2)
1354		die_if_kernel("do_cpu invoked from kernel context!", regs);
1355
1356	switch (cpid) {
1357	case 0:
1358		epc = (unsigned int __user *)exception_epc(regs);
1359		old_epc = regs->cp0_epc;
1360		old31 = regs->regs[31];
1361		opcode = 0;
1362		status = -1;
1363
1364		if (unlikely(compute_return_epc(regs) < 0))
1365			break;
1366
1367		if (!get_isa16_mode(regs->cp0_epc)) {
1368			if (unlikely(get_user(opcode, epc) < 0))
1369				status = SIGSEGV;
1370
1371			if (!cpu_has_llsc && status < 0)
1372				status = simulate_llsc(regs, opcode);
1373		}
1374
1375		if (status < 0)
1376			status = SIGILL;
1377
1378		if (unlikely(status > 0)) {
1379			regs->cp0_epc = old_epc;	/* Undo skip-over.  */
1380			regs->regs[31] = old31;
1381			force_sig(status);
1382		}
1383
1384		break;
1385
1386#ifdef CONFIG_MIPS_FP_SUPPORT
1387	case 3:
1388		/*
1389		 * The COP3 opcode space and consequently the CP0.Status.CU3
1390		 * bit and the CP0.Cause.CE=3 encoding have been removed as
1391		 * of the MIPS III ISA.  From the MIPS IV and MIPS32r2 ISAs
1392		 * up the space has been reused for COP1X instructions, that
1393		 * are enabled by the CP0.Status.CU1 bit and consequently
1394		 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1395		 * exceptions.  Some FPU-less processors that implement one
1396		 * of these ISAs however use this code erroneously for COP1X
1397		 * instructions.  Therefore we redirect this trap to the FP
1398		 * emulator too.
1399		 */
1400		if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1401			force_sig(SIGILL);
1402			break;
1403		}
1404		/* Fall through.  */
1405
1406	case 1: {
1407		void __user *fault_addr;
1408		unsigned long fcr31;
1409		int err, sig;
1410
1411		err = enable_restore_fp_context(0);
1412
1413		if (raw_cpu_has_fpu && !err)
1414			break;
1415
1416		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1417					       &fault_addr);
1418
1419		/*
1420		 * We can't allow the emulated instruction to leave
1421		 * any enabled Cause bits set in $fcr31.
1422		 */
1423		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1424		current->thread.fpu.fcr31 &= ~fcr31;
1425
1426		/* Send a signal if required.  */
1427		if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1428			mt_ase_fp_affinity();
1429
1430		break;
1431	}
1432#else /* CONFIG_MIPS_FP_SUPPORT */
1433	case 1:
1434	case 3:
1435		force_sig(SIGILL);
1436		break;
1437#endif /* CONFIG_MIPS_FP_SUPPORT */
1438
1439	case 2:
1440		raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1441		break;
1442	}
1443
1444	exception_exit(prev_state);
1445}
1446
1447asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1448{
1449	enum ctx_state prev_state;
1450
1451	prev_state = exception_enter();
1452	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1453	if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1454		       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1455		goto out;
1456
1457	/* Clear MSACSR.Cause before enabling interrupts */
1458	write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1459	local_irq_enable();
1460
1461	die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1462	force_sig(SIGFPE);
1463out:
1464	exception_exit(prev_state);
1465}
1466
1467asmlinkage void do_msa(struct pt_regs *regs)
1468{
1469	enum ctx_state prev_state;
1470	int err;
1471
1472	prev_state = exception_enter();
1473
1474	if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1475		force_sig(SIGILL);
1476		goto out;
1477	}
1478
1479	die_if_kernel("do_msa invoked from kernel context!", regs);
1480
1481	err = enable_restore_fp_context(1);
1482	if (err)
1483		force_sig(SIGILL);
1484out:
1485	exception_exit(prev_state);
1486}
1487
1488asmlinkage void do_mdmx(struct pt_regs *regs)
1489{
1490	enum ctx_state prev_state;
1491
1492	prev_state = exception_enter();
1493	force_sig(SIGILL);
1494	exception_exit(prev_state);
1495}
1496
1497/*
1498 * Called with interrupts disabled.
1499 */
1500asmlinkage void do_watch(struct pt_regs *regs)
1501{
1502	enum ctx_state prev_state;
1503
1504	prev_state = exception_enter();
1505	/*
1506	 * Clear WP (bit 22) bit of cause register so we don't loop
1507	 * forever.
1508	 */
1509	clear_c0_cause(CAUSEF_WP);
1510
1511	/*
1512	 * If the current thread has the watch registers loaded, save
1513	 * their values and send SIGTRAP.  Otherwise another thread
1514	 * left the registers set, clear them and continue.
1515	 */
1516	if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1517		mips_read_watch_registers();
1518		local_irq_enable();
1519		force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
1520	} else {
1521		mips_clear_watch_registers();
1522		local_irq_enable();
1523	}
1524	exception_exit(prev_state);
1525}
1526
1527asmlinkage void do_mcheck(struct pt_regs *regs)
1528{
1529	int multi_match = regs->cp0_status & ST0_TS;
1530	enum ctx_state prev_state;
1531	mm_segment_t old_fs = get_fs();
1532
1533	prev_state = exception_enter();
1534	show_regs(regs);
1535
1536	if (multi_match) {
1537		dump_tlb_regs();
1538		pr_info("\n");
1539		dump_tlb_all();
1540	}
1541
1542	if (!user_mode(regs))
1543		set_fs(KERNEL_DS);
1544
1545	show_code((unsigned int __user *) regs->cp0_epc);
1546
1547	set_fs(old_fs);
1548
1549	/*
1550	 * Some chips may have other causes of machine check (e.g. SB1
1551	 * graduation timer)
1552	 */
1553	panic("Caught Machine Check exception - %scaused by multiple "
1554	      "matching entries in the TLB.",
1555	      (multi_match) ? "" : "not ");
1556}
1557
1558asmlinkage void do_mt(struct pt_regs *regs)
1559{
1560	int subcode;
1561
1562	subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1563			>> VPECONTROL_EXCPT_SHIFT;
1564	switch (subcode) {
1565	case 0:
1566		printk(KERN_DEBUG "Thread Underflow\n");
1567		break;
1568	case 1:
1569		printk(KERN_DEBUG "Thread Overflow\n");
1570		break;
1571	case 2:
1572		printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1573		break;
1574	case 3:
1575		printk(KERN_DEBUG "Gating Storage Exception\n");
1576		break;
1577	case 4:
1578		printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1579		break;
1580	case 5:
1581		printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1582		break;
1583	default:
1584		printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1585			subcode);
1586		break;
1587	}
1588	die_if_kernel("MIPS MT Thread exception in kernel", regs);
1589
1590	force_sig(SIGILL);
1591}
1592
1593
1594asmlinkage void do_dsp(struct pt_regs *regs)
1595{
1596	if (cpu_has_dsp)
1597		panic("Unexpected DSP exception");
1598
1599	force_sig(SIGILL);
1600}
1601
1602asmlinkage void do_reserved(struct pt_regs *regs)
1603{
1604	/*
1605	 * Game over - no way to handle this if it ever occurs.	 Most probably
1606	 * caused by a new unknown cpu type or after another deadly
1607	 * hard/software error.
1608	 */
1609	show_regs(regs);
1610	panic("Caught reserved exception %ld - should not happen.",
1611	      (regs->cp0_cause & 0x7f) >> 2);
1612}
1613
1614static int __initdata l1parity = 1;
1615static int __init nol1parity(char *s)
1616{
1617	l1parity = 0;
1618	return 1;
1619}
1620__setup("nol1par", nol1parity);
1621static int __initdata l2parity = 1;
1622static int __init nol2parity(char *s)
1623{
1624	l2parity = 0;
1625	return 1;
1626}
1627__setup("nol2par", nol2parity);
1628
1629/*
1630 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1631 * it different ways.
1632 */
1633static inline void parity_protection_init(void)
1634{
1635#define ERRCTL_PE	0x80000000
1636#define ERRCTL_L2P	0x00800000
1637
1638	if (mips_cm_revision() >= CM_REV_CM3) {
1639		ulong gcr_ectl, cp0_ectl;
1640
1641		/*
1642		 * With CM3 systems we need to ensure that the L1 & L2
1643		 * parity enables are set to the same value, since this
1644		 * is presumed by the hardware engineers.
1645		 *
1646		 * If the user disabled either of L1 or L2 ECC checking,
1647		 * disable both.
1648		 */
1649		l1parity &= l2parity;
1650		l2parity &= l1parity;
1651
1652		/* Probe L1 ECC support */
1653		cp0_ectl = read_c0_ecc();
1654		write_c0_ecc(cp0_ectl | ERRCTL_PE);
1655		back_to_back_c0_hazard();
1656		cp0_ectl = read_c0_ecc();
1657
1658		/* Probe L2 ECC support */
1659		gcr_ectl = read_gcr_err_control();
1660
1661		if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
1662		    !(cp0_ectl & ERRCTL_PE)) {
1663			/*
1664			 * One of L1 or L2 ECC checking isn't supported,
1665			 * so we cannot enable either.
1666			 */
1667			l1parity = l2parity = 0;
1668		}
1669
1670		/* Configure L1 ECC checking */
1671		if (l1parity)
1672			cp0_ectl |= ERRCTL_PE;
1673		else
1674			cp0_ectl &= ~ERRCTL_PE;
1675		write_c0_ecc(cp0_ectl);
1676		back_to_back_c0_hazard();
1677		WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1678
1679		/* Configure L2 ECC checking */
1680		if (l2parity)
1681			gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1682		else
1683			gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
1684		write_gcr_err_control(gcr_ectl);
1685		gcr_ectl = read_gcr_err_control();
1686		gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1687		WARN_ON(!!gcr_ectl != l2parity);
1688
1689		pr_info("Cache parity protection %sabled\n",
1690			l1parity ? "en" : "dis");
1691		return;
1692	}
1693
1694	switch (current_cpu_type()) {
1695	case CPU_24K:
1696	case CPU_34K:
1697	case CPU_74K:
1698	case CPU_1004K:
1699	case CPU_1074K:
1700	case CPU_INTERAPTIV:
1701	case CPU_PROAPTIV:
1702	case CPU_P5600:
1703	case CPU_QEMU_GENERIC:
1704	case CPU_P6600:
1705		{
1706			unsigned long errctl;
1707			unsigned int l1parity_present, l2parity_present;
1708
1709			errctl = read_c0_ecc();
1710			errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1711
1712			/* probe L1 parity support */
1713			write_c0_ecc(errctl | ERRCTL_PE);
1714			back_to_back_c0_hazard();
1715			l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1716
1717			/* probe L2 parity support */
1718			write_c0_ecc(errctl|ERRCTL_L2P);
1719			back_to_back_c0_hazard();
1720			l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1721
1722			if (l1parity_present && l2parity_present) {
1723				if (l1parity)
1724					errctl |= ERRCTL_PE;
1725				if (l1parity ^ l2parity)
1726					errctl |= ERRCTL_L2P;
1727			} else if (l1parity_present) {
1728				if (l1parity)
1729					errctl |= ERRCTL_PE;
1730			} else if (l2parity_present) {
1731				if (l2parity)
1732					errctl |= ERRCTL_L2P;
1733			} else {
1734				/* No parity available */
1735			}
1736
1737			printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1738
1739			write_c0_ecc(errctl);
1740			back_to_back_c0_hazard();
1741			errctl = read_c0_ecc();
1742			printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1743
1744			if (l1parity_present)
1745				printk(KERN_INFO "Cache parity protection %sabled\n",
1746				       (errctl & ERRCTL_PE) ? "en" : "dis");
1747
1748			if (l2parity_present) {
1749				if (l1parity_present && l1parity)
1750					errctl ^= ERRCTL_L2P;
1751				printk(KERN_INFO "L2 cache parity protection %sabled\n",
1752				       (errctl & ERRCTL_L2P) ? "en" : "dis");
1753			}
1754		}
1755		break;
1756
1757	case CPU_5KC:
1758	case CPU_5KE:
1759	case CPU_LOONGSON32:
1760		write_c0_ecc(0x80000000);
1761		back_to_back_c0_hazard();
1762		/* Set the PE bit (bit 31) in the c0_errctl register. */
1763		printk(KERN_INFO "Cache parity protection %sabled\n",
1764		       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1765		break;
1766	case CPU_20KC:
1767	case CPU_25KF:
1768		/* Clear the DE bit (bit 16) in the c0_status register. */
1769		printk(KERN_INFO "Enable cache parity protection for "
1770		       "MIPS 20KC/25KF CPUs.\n");
1771		clear_c0_status(ST0_DE);
1772		break;
1773	default:
1774		break;
1775	}
1776}
1777
1778asmlinkage void cache_parity_error(void)
1779{
1780	const int field = 2 * sizeof(unsigned long);
1781	unsigned int reg_val;
1782
1783	/* For the moment, report the problem and hang. */
1784	printk("Cache error exception:\n");
1785	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1786	reg_val = read_c0_cacheerr();
1787	printk("c0_cacheerr == %08x\n", reg_val);
1788
1789	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1790	       reg_val & (1<<30) ? "secondary" : "primary",
1791	       reg_val & (1<<31) ? "data" : "insn");
1792	if ((cpu_has_mips_r2_r6) &&
1793	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1794		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1795			reg_val & (1<<29) ? "ED " : "",
1796			reg_val & (1<<28) ? "ET " : "",
1797			reg_val & (1<<27) ? "ES " : "",
1798			reg_val & (1<<26) ? "EE " : "",
1799			reg_val & (1<<25) ? "EB " : "",
1800			reg_val & (1<<24) ? "EI " : "",
1801			reg_val & (1<<23) ? "E1 " : "",
1802			reg_val & (1<<22) ? "E0 " : "");
1803	} else {
1804		pr_err("Error bits: %s%s%s%s%s%s%s\n",
1805			reg_val & (1<<29) ? "ED " : "",
1806			reg_val & (1<<28) ? "ET " : "",
1807			reg_val & (1<<26) ? "EE " : "",
1808			reg_val & (1<<25) ? "EB " : "",
1809			reg_val & (1<<24) ? "EI " : "",
1810			reg_val & (1<<23) ? "E1 " : "",
1811			reg_val & (1<<22) ? "E0 " : "");
1812	}
1813	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1814
1815#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1816	if (reg_val & (1<<22))
1817		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1818
1819	if (reg_val & (1<<23))
1820		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1821#endif
1822
1823	panic("Can't handle the cache error!");
1824}
1825
1826asmlinkage void do_ftlb(void)
1827{
1828	const int field = 2 * sizeof(unsigned long);
1829	unsigned int reg_val;
1830
1831	/* For the moment, report the problem and hang. */
1832	if ((cpu_has_mips_r2_r6) &&
1833	    (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1834	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1835		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1836		       read_c0_ecc());
1837		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1838		reg_val = read_c0_cacheerr();
1839		pr_err("c0_cacheerr == %08x\n", reg_val);
1840
1841		if ((reg_val & 0xc0000000) == 0xc0000000) {
1842			pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1843		} else {
1844			pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1845			       reg_val & (1<<30) ? "secondary" : "primary",
1846			       reg_val & (1<<31) ? "data" : "insn");
1847		}
1848	} else {
1849		pr_err("FTLB error exception\n");
1850	}
1851	/* Just print the cacheerr bits for now */
1852	cache_parity_error();
1853}
1854
1855/*
1856 * SDBBP EJTAG debug exception handler.
1857 * We skip the instruction and return to the next instruction.
1858 */
1859void ejtag_exception_handler(struct pt_regs *regs)
1860{
1861	const int field = 2 * sizeof(unsigned long);
1862	unsigned long depc, old_epc, old_ra;
1863	unsigned int debug;
1864
1865	printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1866	depc = read_c0_depc();
1867	debug = read_c0_debug();
1868	printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1869	if (debug & 0x80000000) {
1870		/*
1871		 * In branch delay slot.
1872		 * We cheat a little bit here and use EPC to calculate the
1873		 * debug return address (DEPC). EPC is restored after the
1874		 * calculation.
1875		 */
1876		old_epc = regs->cp0_epc;
1877		old_ra = regs->regs[31];
1878		regs->cp0_epc = depc;
1879		compute_return_epc(regs);
1880		depc = regs->cp0_epc;
1881		regs->cp0_epc = old_epc;
1882		regs->regs[31] = old_ra;
1883	} else
1884		depc += 4;
1885	write_c0_depc(depc);
1886
1887#if 0
1888	printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1889	write_c0_debug(debug | 0x100);
1890#endif
1891}
1892
1893/*
1894 * NMI exception handler.
1895 * No lock; only written during early bootup by CPU 0.
1896 */
1897static RAW_NOTIFIER_HEAD(nmi_chain);
1898
1899int register_nmi_notifier(struct notifier_block *nb)
1900{
1901	return raw_notifier_chain_register(&nmi_chain, nb);
1902}
1903
1904void __noreturn nmi_exception_handler(struct pt_regs *regs)
1905{
1906	char str[100];
1907
1908	nmi_enter();
1909	raw_notifier_call_chain(&nmi_chain, 0, regs);
1910	bust_spinlocks(1);
1911	snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1912		 smp_processor_id(), regs->cp0_epc);
1913	regs->cp0_epc = read_c0_errorepc();
1914	die(str, regs);
1915	nmi_exit();
1916}
1917
1918#define VECTORSPACING 0x100	/* for EI/VI mode */
1919
1920unsigned long ebase;
1921EXPORT_SYMBOL_GPL(ebase);
1922unsigned long exception_handlers[32];
1923unsigned long vi_handlers[64];
1924
1925void __init *set_except_vector(int n, void *addr)
1926{
1927	unsigned long handler = (unsigned long) addr;
1928	unsigned long old_handler;
1929
1930#ifdef CONFIG_CPU_MICROMIPS
1931	/*
1932	 * Only the TLB handlers are cache aligned with an even
1933	 * address. All other handlers are on an odd address and
1934	 * require no modification. Otherwise, MIPS32 mode will
1935	 * be entered when handling any TLB exceptions. That
1936	 * would be bad...since we must stay in microMIPS mode.
1937	 */
1938	if (!(handler & 0x1))
1939		handler |= 1;
1940#endif
1941	old_handler = xchg(&exception_handlers[n], handler);
1942
1943	if (n == 0 && cpu_has_divec) {
1944#ifdef CONFIG_CPU_MICROMIPS
1945		unsigned long jump_mask = ~((1 << 27) - 1);
1946#else
1947		unsigned long jump_mask = ~((1 << 28) - 1);
1948#endif
1949		u32 *buf = (u32 *)(ebase + 0x200);
1950		unsigned int k0 = 26;
1951		if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1952			uasm_i_j(&buf, handler & ~jump_mask);
1953			uasm_i_nop(&buf);
1954		} else {
1955			UASM_i_LA(&buf, k0, handler);
1956			uasm_i_jr(&buf, k0);
1957			uasm_i_nop(&buf);
1958		}
1959		local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1960	}
1961	return (void *)old_handler;
1962}
1963
1964static void do_default_vi(void)
1965{
1966	show_regs(get_irq_regs());
1967	panic("Caught unexpected vectored interrupt.");
1968}
1969
1970static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1971{
1972	unsigned long handler;
1973	unsigned long old_handler = vi_handlers[n];
1974	int srssets = current_cpu_data.srsets;
1975	u16 *h;
1976	unsigned char *b;
1977
1978	BUG_ON(!cpu_has_veic && !cpu_has_vint);
1979
1980	if (addr == NULL) {
1981		handler = (unsigned long) do_default_vi;
1982		srs = 0;
1983	} else
1984		handler = (unsigned long) addr;
1985	vi_handlers[n] = handler;
1986
1987	b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1988
1989	if (srs >= srssets)
1990		panic("Shadow register set %d not supported", srs);
1991
1992	if (cpu_has_veic) {
1993		if (board_bind_eic_interrupt)
1994			board_bind_eic_interrupt(n, srs);
1995	} else if (cpu_has_vint) {
1996		/* SRSMap is only defined if shadow sets are implemented */
1997		if (srssets > 1)
1998			change_c0_srsmap(0xf << n*4, srs << n*4);
1999	}
2000
2001	if (srs == 0) {
2002		/*
2003		 * If no shadow set is selected then use the default handler
2004		 * that does normal register saving and standard interrupt exit
2005		 */
2006		extern char except_vec_vi, except_vec_vi_lui;
2007		extern char except_vec_vi_ori, except_vec_vi_end;
2008		extern char rollback_except_vec_vi;
2009		char *vec_start = using_rollback_handler() ?
2010			&rollback_except_vec_vi : &except_vec_vi;
2011#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
2012		const int lui_offset = &except_vec_vi_lui - vec_start + 2;
2013		const int ori_offset = &except_vec_vi_ori - vec_start + 2;
2014#else
2015		const int lui_offset = &except_vec_vi_lui - vec_start;
2016		const int ori_offset = &ex

Large files files are truncated, but you can click here to view the full file