PageRenderTime 50ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 0ms

/arch/parisc/kernel/traps.c

https://bitbucket.org/sola/android_board_snowball_kernel
C | 885 lines | 575 code | 156 blank | 154 comment | 80 complexity | 9adfe893ee9ae922a937f0cae9ea1aa7 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * linux/arch/parisc/traps.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
  6. */
  7. /*
  8. * 'Traps.c' handles hardware traps and faults after we have saved some
  9. * state in 'asm.s'.
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/kernel.h>
  13. #include <linux/string.h>
  14. #include <linux/errno.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/timer.h>
  17. #include <linux/delay.h>
  18. #include <linux/mm.h>
  19. #include <linux/module.h>
  20. #include <linux/smp.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/init.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/console.h>
  25. #include <linux/bug.h>
  26. #include <asm/assembly.h>
  27. #include <asm/system.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/io.h>
  30. #include <asm/irq.h>
  31. #include <asm/traps.h>
  32. #include <asm/unaligned.h>
  33. #include <asm/atomic.h>
  34. #include <asm/smp.h>
  35. #include <asm/pdc.h>
  36. #include <asm/pdc_chassis.h>
  37. #include <asm/unwind.h>
  38. #include <asm/tlbflush.h>
  39. #include <asm/cacheflush.h>
  40. #include "../math-emu/math-emu.h" /* for handle_fpe() */
  41. #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
  42. /* dumped to the console via printk) */
  43. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
  44. DEFINE_SPINLOCK(pa_dbit_lock);
  45. #endif
  46. static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
  47. struct pt_regs *regs);
  48. static int printbinary(char *buf, unsigned long x, int nbits)
  49. {
  50. unsigned long mask = 1UL << (nbits - 1);
  51. while (mask != 0) {
  52. *buf++ = (mask & x ? '1' : '0');
  53. mask >>= 1;
  54. }
  55. *buf = '\0';
  56. return nbits;
  57. }
  58. #ifdef CONFIG_64BIT
  59. #define RFMT "%016lx"
  60. #else
  61. #define RFMT "%08lx"
  62. #endif
  63. #define FFMT "%016llx" /* fpregs are 64-bit always */
  64. #define PRINTREGS(lvl,r,f,fmt,x) \
  65. printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
  66. lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
  67. (r)[(x)+2], (r)[(x)+3])
  68. static void print_gr(char *level, struct pt_regs *regs)
  69. {
  70. int i;
  71. char buf[64];
  72. printk("%s\n", level);
  73. printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
  74. printbinary(buf, regs->gr[0], 32);
  75. printk("%sPSW: %s %s\n", level, buf, print_tainted());
  76. for (i = 0; i < 32; i += 4)
  77. PRINTREGS(level, regs->gr, "r", RFMT, i);
  78. }
  79. static void print_fr(char *level, struct pt_regs *regs)
  80. {
  81. int i;
  82. char buf[64];
  83. struct { u32 sw[2]; } s;
  84. /* FR are 64bit everywhere. Need to use asm to get the content
  85. * of fpsr/fper1, and we assume that we won't have a FP Identify
  86. * in our way, otherwise we're screwed.
  87. * The fldd is used to restore the T-bit if there was one, as the
  88. * store clears it anyway.
  89. * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
  90. asm volatile ("fstd %%fr0,0(%1) \n\t"
  91. "fldd 0(%1),%%fr0 \n\t"
  92. : "=m" (s) : "r" (&s) : "r0");
  93. printk("%s\n", level);
  94. printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
  95. printbinary(buf, s.sw[0], 32);
  96. printk("%sFPSR: %s\n", level, buf);
  97. printk("%sFPER1: %08x\n", level, s.sw[1]);
  98. /* here we'll print fr0 again, tho it'll be meaningless */
  99. for (i = 0; i < 32; i += 4)
  100. PRINTREGS(level, regs->fr, "fr", FFMT, i);
  101. }
  102. void show_regs(struct pt_regs *regs)
  103. {
  104. int i, user;
  105. char *level;
  106. unsigned long cr30, cr31;
  107. user = user_mode(regs);
  108. level = user ? KERN_DEBUG : KERN_CRIT;
  109. print_gr(level, regs);
  110. for (i = 0; i < 8; i += 4)
  111. PRINTREGS(level, regs->sr, "sr", RFMT, i);
  112. if (user)
  113. print_fr(level, regs);
  114. cr30 = mfctl(30);
  115. cr31 = mfctl(31);
  116. printk("%s\n", level);
  117. printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
  118. level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
  119. printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
  120. level, regs->iir, regs->isr, regs->ior);
  121. printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
  122. level, current_thread_info()->cpu, cr30, cr31);
  123. printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
  124. if (user) {
  125. printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
  126. printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
  127. printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
  128. } else {
  129. printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
  130. printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
  131. printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
  132. parisc_show_stack(current, NULL, regs);
  133. }
  134. }
  135. void dump_stack(void)
  136. {
  137. show_stack(NULL, NULL);
  138. }
  139. EXPORT_SYMBOL(dump_stack);
  140. static void do_show_stack(struct unwind_frame_info *info)
  141. {
  142. int i = 1;
  143. printk(KERN_CRIT "Backtrace:\n");
  144. while (i <= 16) {
  145. if (unwind_once(info) < 0 || info->ip == 0)
  146. break;
  147. if (__kernel_text_address(info->ip)) {
  148. printk(KERN_CRIT " [<" RFMT ">] %pS\n",
  149. info->ip, (void *) info->ip);
  150. i++;
  151. }
  152. }
  153. printk(KERN_CRIT "\n");
  154. }
  155. static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
  156. struct pt_regs *regs)
  157. {
  158. struct unwind_frame_info info;
  159. struct task_struct *t;
  160. t = task ? task : current;
  161. if (regs) {
  162. unwind_frame_init(&info, t, regs);
  163. goto show_stack;
  164. }
  165. if (t == current) {
  166. unsigned long sp;
  167. HERE:
  168. asm volatile ("copy %%r30, %0" : "=r"(sp));
  169. {
  170. struct pt_regs r;
  171. memset(&r, 0, sizeof(struct pt_regs));
  172. r.iaoq[0] = (unsigned long)&&HERE;
  173. r.gr[2] = (unsigned long)__builtin_return_address(0);
  174. r.gr[30] = sp;
  175. unwind_frame_init(&info, current, &r);
  176. }
  177. } else {
  178. unwind_frame_init_from_blocked_task(&info, t);
  179. }
  180. show_stack:
  181. do_show_stack(&info);
  182. }
  183. void show_stack(struct task_struct *t, unsigned long *sp)
  184. {
  185. return parisc_show_stack(t, sp, NULL);
  186. }
  187. int is_valid_bugaddr(unsigned long iaoq)
  188. {
  189. return 1;
  190. }
  191. void die_if_kernel(char *str, struct pt_regs *regs, long err)
  192. {
  193. if (user_mode(regs)) {
  194. if (err == 0)
  195. return; /* STFU */
  196. printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
  197. current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
  198. #ifdef PRINT_USER_FAULTS
  199. /* XXX for debugging only */
  200. show_regs(regs);
  201. #endif
  202. return;
  203. }
  204. oops_in_progress = 1;
  205. oops_enter();
  206. /* Amuse the user in a SPARC fashion */
  207. if (err) printk(KERN_CRIT
  208. " _______________________________ \n"
  209. " < Your System ate a SPARC! Gah! >\n"
  210. " ------------------------------- \n"
  211. " \\ ^__^\n"
  212. " (__)\\ )\\/\\\n"
  213. " U ||----w |\n"
  214. " || ||\n");
  215. /* unlock the pdc lock if necessary */
  216. pdc_emergency_unlock();
  217. /* maybe the kernel hasn't booted very far yet and hasn't been able
  218. * to initialize the serial or STI console. In that case we should
  219. * re-enable the pdc console, so that the user will be able to
  220. * identify the problem. */
  221. if (!console_drivers)
  222. pdc_console_restart();
  223. if (err)
  224. printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
  225. current->comm, task_pid_nr(current), str, err);
  226. /* Wot's wrong wif bein' racy? */
  227. if (current->thread.flags & PARISC_KERNEL_DEATH) {
  228. printk(KERN_CRIT "%s() recursion detected.\n", __func__);
  229. local_irq_enable();
  230. while (1);
  231. }
  232. current->thread.flags |= PARISC_KERNEL_DEATH;
  233. show_regs(regs);
  234. dump_stack();
  235. add_taint(TAINT_DIE);
  236. if (in_interrupt())
  237. panic("Fatal exception in interrupt");
  238. if (panic_on_oops) {
  239. printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
  240. ssleep(5);
  241. panic("Fatal exception");
  242. }
  243. oops_exit();
  244. do_exit(SIGSEGV);
  245. }
  246. int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
  247. {
  248. return syscall(regs);
  249. }
  250. /* gdb uses break 4,8 */
  251. #define GDB_BREAK_INSN 0x10004
  252. static void handle_gdb_break(struct pt_regs *regs, int wot)
  253. {
  254. struct siginfo si;
  255. si.si_signo = SIGTRAP;
  256. si.si_errno = 0;
  257. si.si_code = wot;
  258. si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
  259. force_sig_info(SIGTRAP, &si, current);
  260. }
  261. static void handle_break(struct pt_regs *regs)
  262. {
  263. unsigned iir = regs->iir;
  264. if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
  265. /* check if a BUG() or WARN() trapped here. */
  266. enum bug_trap_type tt;
  267. tt = report_bug(regs->iaoq[0] & ~3, regs);
  268. if (tt == BUG_TRAP_TYPE_WARN) {
  269. regs->iaoq[0] += 4;
  270. regs->iaoq[1] += 4;
  271. return; /* return to next instruction when WARN_ON(). */
  272. }
  273. die_if_kernel("Unknown kernel breakpoint", regs,
  274. (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
  275. }
  276. #ifdef PRINT_USER_FAULTS
  277. if (unlikely(iir != GDB_BREAK_INSN)) {
  278. printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
  279. iir & 31, (iir>>13) & ((1<<13)-1),
  280. task_pid_nr(current), current->comm);
  281. show_regs(regs);
  282. }
  283. #endif
  284. /* send standard GDB signal */
  285. handle_gdb_break(regs, TRAP_BRKPT);
  286. }
  287. static void default_trap(int code, struct pt_regs *regs)
  288. {
  289. printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
  290. show_regs(regs);
  291. }
  292. void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
  293. void transfer_pim_to_trap_frame(struct pt_regs *regs)
  294. {
  295. register int i;
  296. extern unsigned int hpmc_pim_data[];
  297. struct pdc_hpmc_pim_11 *pim_narrow;
  298. struct pdc_hpmc_pim_20 *pim_wide;
  299. if (boot_cpu_data.cpu_type >= pcxu) {
  300. pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
  301. /*
  302. * Note: The following code will probably generate a
  303. * bunch of truncation error warnings from the compiler.
  304. * Could be handled with an ifdef, but perhaps there
  305. * is a better way.
  306. */
  307. regs->gr[0] = pim_wide->cr[22];
  308. for (i = 1; i < 32; i++)
  309. regs->gr[i] = pim_wide->gr[i];
  310. for (i = 0; i < 32; i++)
  311. regs->fr[i] = pim_wide->fr[i];
  312. for (i = 0; i < 8; i++)
  313. regs->sr[i] = pim_wide->sr[i];
  314. regs->iasq[0] = pim_wide->cr[17];
  315. regs->iasq[1] = pim_wide->iasq_back;
  316. regs->iaoq[0] = pim_wide->cr[18];
  317. regs->iaoq[1] = pim_wide->iaoq_back;
  318. regs->sar = pim_wide->cr[11];
  319. regs->iir = pim_wide->cr[19];
  320. regs->isr = pim_wide->cr[20];
  321. regs->ior = pim_wide->cr[21];
  322. }
  323. else {
  324. pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
  325. regs->gr[0] = pim_narrow->cr[22];
  326. for (i = 1; i < 32; i++)
  327. regs->gr[i] = pim_narrow->gr[i];
  328. for (i = 0; i < 32; i++)
  329. regs->fr[i] = pim_narrow->fr[i];
  330. for (i = 0; i < 8; i++)
  331. regs->sr[i] = pim_narrow->sr[i];
  332. regs->iasq[0] = pim_narrow->cr[17];
  333. regs->iasq[1] = pim_narrow->iasq_back;
  334. regs->iaoq[0] = pim_narrow->cr[18];
  335. regs->iaoq[1] = pim_narrow->iaoq_back;
  336. regs->sar = pim_narrow->cr[11];
  337. regs->iir = pim_narrow->cr[19];
  338. regs->isr = pim_narrow->cr[20];
  339. regs->ior = pim_narrow->cr[21];
  340. }
  341. /*
  342. * The following fields only have meaning if we came through
  343. * another path. So just zero them here.
  344. */
  345. regs->ksp = 0;
  346. regs->kpc = 0;
  347. regs->orig_r28 = 0;
  348. }
  349. /*
  350. * This routine is called as a last resort when everything else
  351. * has gone clearly wrong. We get called for faults in kernel space,
  352. * and HPMC's.
  353. */
  354. void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
  355. {
  356. static DEFINE_SPINLOCK(terminate_lock);
  357. oops_in_progress = 1;
  358. set_eiem(0);
  359. local_irq_disable();
  360. spin_lock(&terminate_lock);
  361. /* unlock the pdc lock if necessary */
  362. pdc_emergency_unlock();
  363. /* restart pdc console if necessary */
  364. if (!console_drivers)
  365. pdc_console_restart();
  366. /* Not all paths will gutter the processor... */
  367. switch(code){
  368. case 1:
  369. transfer_pim_to_trap_frame(regs);
  370. break;
  371. default:
  372. /* Fall through */
  373. break;
  374. }
  375. {
  376. /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
  377. struct unwind_frame_info info;
  378. unwind_frame_init(&info, current, regs);
  379. do_show_stack(&info);
  380. }
  381. printk("\n");
  382. printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
  383. msg, code, regs, offset);
  384. show_regs(regs);
  385. spin_unlock(&terminate_lock);
  386. /* put soft power button back under hardware control;
  387. * if the user had pressed it once at any time, the
  388. * system will shut down immediately right here. */
  389. pdc_soft_power_button(0);
  390. /* Call kernel panic() so reboot timeouts work properly
  391. * FIXME: This function should be on the list of
  392. * panic notifiers, and we should call panic
  393. * directly from the location that we wish.
  394. * e.g. We should not call panic from
  395. * parisc_terminate, but rather the oter way around.
  396. * This hack works, prints the panic message twice,
  397. * and it enables reboot timers!
  398. */
  399. panic(msg);
  400. }
  401. void notrace handle_interruption(int code, struct pt_regs *regs)
  402. {
  403. unsigned long fault_address = 0;
  404. unsigned long fault_space = 0;
  405. struct siginfo si;
  406. if (code == 1)
  407. pdc_console_restart(); /* switch back to pdc if HPMC */
  408. else
  409. local_irq_enable();
  410. /* Security check:
  411. * If the priority level is still user, and the
  412. * faulting space is not equal to the active space
  413. * then the user is attempting something in a space
  414. * that does not belong to them. Kill the process.
  415. *
  416. * This is normally the situation when the user
  417. * attempts to jump into the kernel space at the
  418. * wrong offset, be it at the gateway page or a
  419. * random location.
  420. *
  421. * We cannot normally signal the process because it
  422. * could *be* on the gateway page, and processes
  423. * executing on the gateway page can't have signals
  424. * delivered.
  425. *
  426. * We merely readjust the address into the users
  427. * space, at a destination address of zero, and
  428. * allow processing to continue.
  429. */
  430. if (((unsigned long)regs->iaoq[0] & 3) &&
  431. ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
  432. /* Kill the user process later */
  433. regs->iaoq[0] = 0 | 3;
  434. regs->iaoq[1] = regs->iaoq[0] + 4;
  435. regs->iasq[0] = regs->iasq[1] = regs->sr[7];
  436. regs->gr[0] &= ~PSW_B;
  437. return;
  438. }
  439. #if 0
  440. printk(KERN_CRIT "Interruption # %d\n", code);
  441. #endif
  442. switch(code) {
  443. case 1:
  444. /* High-priority machine check (HPMC) */
  445. /* set up a new led state on systems shipped with a LED State panel */
  446. pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
  447. parisc_terminate("High Priority Machine Check (HPMC)",
  448. regs, code, 0);
  449. /* NOT REACHED */
  450. case 2:
  451. /* Power failure interrupt */
  452. printk(KERN_CRIT "Power failure interrupt !\n");
  453. return;
  454. case 3:
  455. /* Recovery counter trap */
  456. regs->gr[0] &= ~PSW_R;
  457. if (user_space(regs))
  458. handle_gdb_break(regs, TRAP_TRACE);
  459. /* else this must be the start of a syscall - just let it run */
  460. return;
  461. case 5:
  462. /* Low-priority machine check */
  463. pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
  464. flush_cache_all();
  465. flush_tlb_all();
  466. cpu_lpmc(5, regs);
  467. return;
  468. case 6:
  469. /* Instruction TLB miss fault/Instruction page fault */
  470. fault_address = regs->iaoq[0];
  471. fault_space = regs->iasq[0];
  472. break;
  473. case 8:
  474. /* Illegal instruction trap */
  475. die_if_kernel("Illegal instruction", regs, code);
  476. si.si_code = ILL_ILLOPC;
  477. goto give_sigill;
  478. case 9:
  479. /* Break instruction trap */
  480. handle_break(regs);
  481. return;
  482. case 10:
  483. /* Privileged operation trap */
  484. die_if_kernel("Privileged operation", regs, code);
  485. si.si_code = ILL_PRVOPC;
  486. goto give_sigill;
  487. case 11:
  488. /* Privileged register trap */
  489. if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
  490. /* This is a MFCTL cr26/cr27 to gr instruction.
  491. * PCXS traps on this, so we need to emulate it.
  492. */
  493. if (regs->iir & 0x00200000)
  494. regs->gr[regs->iir & 0x1f] = mfctl(27);
  495. else
  496. regs->gr[regs->iir & 0x1f] = mfctl(26);
  497. regs->iaoq[0] = regs->iaoq[1];
  498. regs->iaoq[1] += 4;
  499. regs->iasq[0] = regs->iasq[1];
  500. return;
  501. }
  502. die_if_kernel("Privileged register usage", regs, code);
  503. si.si_code = ILL_PRVREG;
  504. give_sigill:
  505. si.si_signo = SIGILL;
  506. si.si_errno = 0;
  507. si.si_addr = (void __user *) regs->iaoq[0];
  508. force_sig_info(SIGILL, &si, current);
  509. return;
  510. case 12:
  511. /* Overflow Trap, let the userland signal handler do the cleanup */
  512. si.si_signo = SIGFPE;
  513. si.si_code = FPE_INTOVF;
  514. si.si_addr = (void __user *) regs->iaoq[0];
  515. force_sig_info(SIGFPE, &si, current);
  516. return;
  517. case 13:
  518. /* Conditional Trap
  519. The condition succeeds in an instruction which traps
  520. on condition */
  521. if(user_mode(regs)){
  522. si.si_signo = SIGFPE;
  523. /* Set to zero, and let the userspace app figure it out from
  524. the insn pointed to by si_addr */
  525. si.si_code = 0;
  526. si.si_addr = (void __user *) regs->iaoq[0];
  527. force_sig_info(SIGFPE, &si, current);
  528. return;
  529. }
  530. /* The kernel doesn't want to handle condition codes */
  531. break;
  532. case 14:
  533. /* Assist Exception Trap, i.e. floating point exception. */
  534. die_if_kernel("Floating point exception", regs, 0); /* quiet */
  535. handle_fpe(regs);
  536. return;
  537. case 15:
  538. /* Data TLB miss fault/Data page fault */
  539. /* Fall through */
  540. case 16:
  541. /* Non-access instruction TLB miss fault */
  542. /* The instruction TLB entry needed for the target address of the FIC
  543. is absent, and hardware can't find it, so we get to cleanup */
  544. /* Fall through */
  545. case 17:
  546. /* Non-access data TLB miss fault/Non-access data page fault */
  547. /* FIXME:
  548. Still need to add slow path emulation code here!
  549. If the insn used a non-shadow register, then the tlb
  550. handlers could not have their side-effect (e.g. probe
  551. writing to a target register) emulated since rfir would
  552. erase the changes to said register. Instead we have to
  553. setup everything, call this function we are in, and emulate
  554. by hand. Technically we need to emulate:
  555. fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
  556. */
  557. fault_address = regs->ior;
  558. fault_space = regs->isr;
  559. break;
  560. case 18:
  561. /* PCXS only -- later cpu's split this into types 26,27 & 28 */
  562. /* Check for unaligned access */
  563. if (check_unaligned(regs)) {
  564. handle_unaligned(regs);
  565. return;
  566. }
  567. /* Fall Through */
  568. case 26:
  569. /* PCXL: Data memory access rights trap */
  570. fault_address = regs->ior;
  571. fault_space = regs->isr;
  572. break;
  573. case 19:
  574. /* Data memory break trap */
  575. regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
  576. /* fall thru */
  577. case 21:
  578. /* Page reference trap */
  579. handle_gdb_break(regs, TRAP_HWBKPT);
  580. return;
  581. case 25:
  582. /* Taken branch trap */
  583. regs->gr[0] &= ~PSW_T;
  584. if (user_space(regs))
  585. handle_gdb_break(regs, TRAP_BRANCH);
  586. /* else this must be the start of a syscall - just let it
  587. * run.
  588. */
  589. return;
  590. case 7:
  591. /* Instruction access rights */
  592. /* PCXL: Instruction memory protection trap */
  593. /*
  594. * This could be caused by either: 1) a process attempting
  595. * to execute within a vma that does not have execute
  596. * permission, or 2) an access rights violation caused by a
  597. * flush only translation set up by ptep_get_and_clear().
  598. * So we check the vma permissions to differentiate the two.
  599. * If the vma indicates we have execute permission, then
  600. * the cause is the latter one. In this case, we need to
  601. * call do_page_fault() to fix the problem.
  602. */
  603. if (user_mode(regs)) {
  604. struct vm_area_struct *vma;
  605. down_read(&current->mm->mmap_sem);
  606. vma = find_vma(current->mm,regs->iaoq[0]);
  607. if (vma && (regs->iaoq[0] >= vma->vm_start)
  608. && (vma->vm_flags & VM_EXEC)) {
  609. fault_address = regs->iaoq[0];
  610. fault_space = regs->iasq[0];
  611. up_read(&current->mm->mmap_sem);
  612. break; /* call do_page_fault() */
  613. }
  614. up_read(&current->mm->mmap_sem);
  615. }
  616. /* Fall Through */
  617. case 27:
  618. /* Data memory protection ID trap */
  619. if (code == 27 && !user_mode(regs) &&
  620. fixup_exception(regs))
  621. return;
  622. die_if_kernel("Protection id trap", regs, code);
  623. si.si_code = SEGV_MAPERR;
  624. si.si_signo = SIGSEGV;
  625. si.si_errno = 0;
  626. if (code == 7)
  627. si.si_addr = (void __user *) regs->iaoq[0];
  628. else
  629. si.si_addr = (void __user *) regs->ior;
  630. force_sig_info(SIGSEGV, &si, current);
  631. return;
  632. case 28:
  633. /* Unaligned data reference trap */
  634. handle_unaligned(regs);
  635. return;
  636. default:
  637. if (user_mode(regs)) {
  638. #ifdef PRINT_USER_FAULTS
  639. printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
  640. task_pid_nr(current), current->comm);
  641. show_regs(regs);
  642. #endif
  643. /* SIGBUS, for lack of a better one. */
  644. si.si_signo = SIGBUS;
  645. si.si_code = BUS_OBJERR;
  646. si.si_errno = 0;
  647. si.si_addr = (void __user *) regs->ior;
  648. force_sig_info(SIGBUS, &si, current);
  649. return;
  650. }
  651. pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
  652. parisc_terminate("Unexpected interruption", regs, code, 0);
  653. /* NOT REACHED */
  654. }
  655. if (user_mode(regs)) {
  656. if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
  657. #ifdef PRINT_USER_FAULTS
  658. if (fault_space == 0)
  659. printk(KERN_DEBUG "User Fault on Kernel Space ");
  660. else
  661. printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
  662. code);
  663. printk(KERN_CONT "pid=%d command='%s'\n",
  664. task_pid_nr(current), current->comm);
  665. show_regs(regs);
  666. #endif
  667. si.si_signo = SIGSEGV;
  668. si.si_errno = 0;
  669. si.si_code = SEGV_MAPERR;
  670. si.si_addr = (void __user *) regs->ior;
  671. force_sig_info(SIGSEGV, &si, current);
  672. return;
  673. }
  674. }
  675. else {
  676. /*
  677. * The kernel should never fault on its own address space.
  678. */
  679. if (fault_space == 0)
  680. {
  681. pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
  682. parisc_terminate("Kernel Fault", regs, code, fault_address);
  683. }
  684. }
  685. do_page_fault(regs, code, fault_address);
  686. }
  687. int __init check_ivt(void *iva)
  688. {
  689. extern u32 os_hpmc_size;
  690. extern const u32 os_hpmc[];
  691. int i;
  692. u32 check = 0;
  693. u32 *ivap;
  694. u32 *hpmcp;
  695. u32 length;
  696. if (strcmp((char *)iva, "cows can fly"))
  697. return -1;
  698. ivap = (u32 *)iva;
  699. for (i = 0; i < 8; i++)
  700. *ivap++ = 0;
  701. /* Compute Checksum for HPMC handler */
  702. length = os_hpmc_size;
  703. ivap[7] = length;
  704. hpmcp = (u32 *)os_hpmc;
  705. for (i=0; i<length/4; i++)
  706. check += *hpmcp++;
  707. for (i=0; i<8; i++)
  708. check += ivap[i];
  709. ivap[5] = -check;
  710. return 0;
  711. }
  712. #ifndef CONFIG_64BIT
  713. extern const void fault_vector_11;
  714. #endif
  715. extern const void fault_vector_20;
  716. void __init trap_init(void)
  717. {
  718. void *iva;
  719. if (boot_cpu_data.cpu_type >= pcxu)
  720. iva = (void *) &fault_vector_20;
  721. else
  722. #ifdef CONFIG_64BIT
  723. panic("Can't boot 64-bit OS on PA1.1 processor!");
  724. #else
  725. iva = (void *) &fault_vector_11;
  726. #endif
  727. if (check_ivt(iva))
  728. panic("IVT invalid");
  729. }