/kern_oII/arch/s390/kernel/traps.c

http://omnia2droid.googlecode.com/ · C · 767 lines · 650 code · 62 blank · 55 comment · 111 complexity · 9f78f115fd47d3aed15ba29281a3be97 MD5 · raw file

  1. /*
  2. * arch/s390/kernel/traps.c
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  7. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  8. *
  9. * Derived from "arch/i386/kernel/traps.c"
  10. * Copyright (C) 1991, 1992 Linus Torvalds
  11. */
  12. /*
  13. * 'Traps.c' handles hardware traps and faults after we have saved some
  14. * state in 'asm.s'.
  15. */
  16. #include <linux/sched.h>
  17. #include <linux/kernel.h>
  18. #include <linux/string.h>
  19. #include <linux/errno.h>
  20. #include <linux/ptrace.h>
  21. #include <linux/timer.h>
  22. #include <linux/mm.h>
  23. #include <linux/smp.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/seq_file.h>
  27. #include <linux/delay.h>
  28. #include <linux/module.h>
  29. #include <linux/kdebug.h>
  30. #include <linux/kallsyms.h>
  31. #include <linux/reboot.h>
  32. #include <linux/kprobes.h>
  33. #include <linux/bug.h>
  34. #include <linux/utsname.h>
  35. #include <asm/system.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/io.h>
  38. #include <asm/atomic.h>
  39. #include <asm/mathemu.h>
  40. #include <asm/cpcmd.h>
  41. #include <asm/s390_ext.h>
  42. #include <asm/lowcore.h>
  43. #include <asm/debug.h>
  44. #include "entry.h"
  45. pgm_check_handler_t *pgm_check_table[128];
  46. #ifdef CONFIG_SYSCTL
  47. #ifdef CONFIG_PROCESS_DEBUG
  48. int sysctl_userprocess_debug = 1;
  49. #else
  50. int sysctl_userprocess_debug = 0;
  51. #endif
  52. #endif
  53. extern pgm_check_handler_t do_protection_exception;
  54. extern pgm_check_handler_t do_dat_exception;
  55. extern pgm_check_handler_t do_asce_exception;
  56. #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
  57. #ifndef CONFIG_64BIT
  58. #define LONG "%08lx "
  59. #define FOURLONG "%08lx %08lx %08lx %08lx\n"
  60. static int kstack_depth_to_print = 12;
  61. #else /* CONFIG_64BIT */
  62. #define LONG "%016lx "
  63. #define FOURLONG "%016lx %016lx %016lx %016lx\n"
  64. static int kstack_depth_to_print = 20;
  65. #endif /* CONFIG_64BIT */
  66. /*
  67. * For show_trace we have tree different stack to consider:
  68. * - the panic stack which is used if the kernel stack has overflown
  69. * - the asynchronous interrupt stack (cpu related)
  70. * - the synchronous kernel stack (process related)
  71. * The stack trace can start at any of the three stack and can potentially
  72. * touch all of them. The order is: panic stack, async stack, sync stack.
  73. */
  74. static unsigned long
  75. __show_trace(unsigned long sp, unsigned long low, unsigned long high)
  76. {
  77. struct stack_frame *sf;
  78. struct pt_regs *regs;
  79. while (1) {
  80. sp = sp & PSW_ADDR_INSN;
  81. if (sp < low || sp > high - sizeof(*sf))
  82. return sp;
  83. sf = (struct stack_frame *) sp;
  84. printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  85. print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
  86. /* Follow the backchain. */
  87. while (1) {
  88. low = sp;
  89. sp = sf->back_chain & PSW_ADDR_INSN;
  90. if (!sp)
  91. break;
  92. if (sp <= low || sp > high - sizeof(*sf))
  93. return sp;
  94. sf = (struct stack_frame *) sp;
  95. printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
  96. print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
  97. }
  98. /* Zero backchain detected, check for interrupt frame. */
  99. sp = (unsigned long) (sf + 1);
  100. if (sp <= low || sp > high - sizeof(*regs))
  101. return sp;
  102. regs = (struct pt_regs *) sp;
  103. printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
  104. print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
  105. low = sp;
  106. sp = regs->gprs[15];
  107. }
  108. }
  109. static void show_trace(struct task_struct *task, unsigned long *stack)
  110. {
  111. register unsigned long __r15 asm ("15");
  112. unsigned long sp;
  113. sp = (unsigned long) stack;
  114. if (!sp)
  115. sp = task ? task->thread.ksp : __r15;
  116. printk("Call Trace:\n");
  117. #ifdef CONFIG_CHECK_STACK
  118. sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
  119. S390_lowcore.panic_stack);
  120. #endif
  121. sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
  122. S390_lowcore.async_stack);
  123. if (task)
  124. __show_trace(sp, (unsigned long) task_stack_page(task),
  125. (unsigned long) task_stack_page(task) + THREAD_SIZE);
  126. else
  127. __show_trace(sp, S390_lowcore.thread_info,
  128. S390_lowcore.thread_info + THREAD_SIZE);
  129. if (!task)
  130. task = current;
  131. debug_show_held_locks(task);
  132. }
  133. void show_stack(struct task_struct *task, unsigned long *sp)
  134. {
  135. register unsigned long * __r15 asm ("15");
  136. unsigned long *stack;
  137. int i;
  138. if (!sp)
  139. stack = task ? (unsigned long *) task->thread.ksp : __r15;
  140. else
  141. stack = sp;
  142. for (i = 0; i < kstack_depth_to_print; i++) {
  143. if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
  144. break;
  145. if (i && ((i * sizeof (long) % 32) == 0))
  146. printk("\n ");
  147. printk(LONG, *stack++);
  148. }
  149. printk("\n");
  150. show_trace(task, sp);
  151. }
  152. static void show_last_breaking_event(struct pt_regs *regs)
  153. {
  154. #ifdef CONFIG_64BIT
  155. printk("Last Breaking-Event-Address:\n");
  156. printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
  157. print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
  158. #endif
  159. }
  160. /*
  161. * The architecture-independent dump_stack generator
  162. */
  163. void dump_stack(void)
  164. {
  165. printk("CPU: %d %s %s %.*s\n",
  166. task_thread_info(current)->cpu, print_tainted(),
  167. init_utsname()->release,
  168. (int)strcspn(init_utsname()->version, " "),
  169. init_utsname()->version);
  170. printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
  171. current->comm, current->pid, current,
  172. (void *) current->thread.ksp);
  173. show_stack(NULL, NULL);
  174. }
  175. EXPORT_SYMBOL(dump_stack);
  176. static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
  177. {
  178. return (regs->psw.mask & bits) / ((~bits + 1) & bits);
  179. }
  180. void show_registers(struct pt_regs *regs)
  181. {
  182. char *mode;
  183. mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
  184. printk("%s PSW : %p %p",
  185. mode, (void *) regs->psw.mask,
  186. (void *) regs->psw.addr);
  187. print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
  188. printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
  189. "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
  190. mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
  191. mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
  192. mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
  193. mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
  194. mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
  195. #ifdef CONFIG_64BIT
  196. printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
  197. #endif
  198. printk("\n%s GPRS: " FOURLONG, mode,
  199. regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
  200. printk(" " FOURLONG,
  201. regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
  202. printk(" " FOURLONG,
  203. regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
  204. printk(" " FOURLONG,
  205. regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
  206. show_code(regs);
  207. }
  208. void show_regs(struct pt_regs *regs)
  209. {
  210. print_modules();
  211. printk("CPU: %d %s %s %.*s\n",
  212. task_thread_info(current)->cpu, print_tainted(),
  213. init_utsname()->release,
  214. (int)strcspn(init_utsname()->version, " "),
  215. init_utsname()->version);
  216. printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
  217. current->comm, current->pid, current,
  218. (void *) current->thread.ksp);
  219. show_registers(regs);
  220. /* Show stack backtrace if pt_regs is from kernel mode */
  221. if (!(regs->psw.mask & PSW_MASK_PSTATE))
  222. show_trace(NULL, (unsigned long *) regs->gprs[15]);
  223. show_last_breaking_event(regs);
  224. }
  225. /* This is called from fs/proc/array.c */
  226. void task_show_regs(struct seq_file *m, struct task_struct *task)
  227. {
  228. struct pt_regs *regs;
  229. regs = task_pt_regs(task);
  230. seq_printf(m, "task: %p, ksp: %p\n",
  231. task, (void *)task->thread.ksp);
  232. seq_printf(m, "User PSW : %p %p\n",
  233. (void *) regs->psw.mask, (void *)regs->psw.addr);
  234. seq_printf(m, "User GPRS: " FOURLONG,
  235. regs->gprs[0], regs->gprs[1],
  236. regs->gprs[2], regs->gprs[3]);
  237. seq_printf(m, " " FOURLONG,
  238. regs->gprs[4], regs->gprs[5],
  239. regs->gprs[6], regs->gprs[7]);
  240. seq_printf(m, " " FOURLONG,
  241. regs->gprs[8], regs->gprs[9],
  242. regs->gprs[10], regs->gprs[11]);
  243. seq_printf(m, " " FOURLONG,
  244. regs->gprs[12], regs->gprs[13],
  245. regs->gprs[14], regs->gprs[15]);
  246. seq_printf(m, "User ACRS: %08x %08x %08x %08x\n",
  247. task->thread.acrs[0], task->thread.acrs[1],
  248. task->thread.acrs[2], task->thread.acrs[3]);
  249. seq_printf(m, " %08x %08x %08x %08x\n",
  250. task->thread.acrs[4], task->thread.acrs[5],
  251. task->thread.acrs[6], task->thread.acrs[7]);
  252. seq_printf(m, " %08x %08x %08x %08x\n",
  253. task->thread.acrs[8], task->thread.acrs[9],
  254. task->thread.acrs[10], task->thread.acrs[11]);
  255. seq_printf(m, " %08x %08x %08x %08x\n",
  256. task->thread.acrs[12], task->thread.acrs[13],
  257. task->thread.acrs[14], task->thread.acrs[15]);
  258. }
  259. static DEFINE_SPINLOCK(die_lock);
  260. void die(const char * str, struct pt_regs * regs, long err)
  261. {
  262. static int die_counter;
  263. oops_enter();
  264. debug_stop_all();
  265. console_verbose();
  266. spin_lock_irq(&die_lock);
  267. bust_spinlocks(1);
  268. printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
  269. #ifdef CONFIG_PREEMPT
  270. printk("PREEMPT ");
  271. #endif
  272. #ifdef CONFIG_SMP
  273. printk("SMP ");
  274. #endif
  275. #ifdef CONFIG_DEBUG_PAGEALLOC
  276. printk("DEBUG_PAGEALLOC");
  277. #endif
  278. printk("\n");
  279. notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
  280. show_regs(regs);
  281. bust_spinlocks(0);
  282. add_taint(TAINT_DIE);
  283. spin_unlock_irq(&die_lock);
  284. if (in_interrupt())
  285. panic("Fatal exception in interrupt");
  286. if (panic_on_oops)
  287. panic("Fatal exception: panic_on_oops");
  288. oops_exit();
  289. do_exit(SIGSEGV);
  290. }
  291. static void inline
  292. report_user_fault(long interruption_code, struct pt_regs *regs)
  293. {
  294. #if defined(CONFIG_SYSCTL)
  295. if (!sysctl_userprocess_debug)
  296. return;
  297. #endif
  298. #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
  299. printk("User process fault: interruption code 0x%lX\n",
  300. interruption_code);
  301. show_regs(regs);
  302. #endif
  303. }
  304. int is_valid_bugaddr(unsigned long addr)
  305. {
  306. return 1;
  307. }
  308. static void __kprobes inline do_trap(long interruption_code, int signr,
  309. char *str, struct pt_regs *regs,
  310. siginfo_t *info)
  311. {
  312. /*
  313. * We got all needed information from the lowcore and can
  314. * now safely switch on interrupts.
  315. */
  316. if (regs->psw.mask & PSW_MASK_PSTATE)
  317. local_irq_enable();
  318. if (notify_die(DIE_TRAP, str, regs, interruption_code,
  319. interruption_code, signr) == NOTIFY_STOP)
  320. return;
  321. if (regs->psw.mask & PSW_MASK_PSTATE) {
  322. struct task_struct *tsk = current;
  323. tsk->thread.trap_no = interruption_code & 0xffff;
  324. force_sig_info(signr, info, tsk);
  325. report_user_fault(interruption_code, regs);
  326. } else {
  327. const struct exception_table_entry *fixup;
  328. fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
  329. if (fixup)
  330. regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
  331. else {
  332. enum bug_trap_type btt;
  333. btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
  334. if (btt == BUG_TRAP_TYPE_WARN)
  335. return;
  336. die(str, regs, interruption_code);
  337. }
  338. }
  339. }
  340. static inline void __user *get_check_address(struct pt_regs *regs)
  341. {
  342. return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
  343. }
  344. void __kprobes do_single_step(struct pt_regs *regs)
  345. {
  346. if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
  347. SIGTRAP) == NOTIFY_STOP){
  348. return;
  349. }
  350. if ((current->ptrace & PT_PTRACED) != 0)
  351. force_sig(SIGTRAP, current);
  352. }
  353. static void default_trap_handler(struct pt_regs * regs, long interruption_code)
  354. {
  355. if (regs->psw.mask & PSW_MASK_PSTATE) {
  356. local_irq_enable();
  357. do_exit(SIGSEGV);
  358. report_user_fault(interruption_code, regs);
  359. } else
  360. die("Unknown program exception", regs, interruption_code);
  361. }
  362. #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
  363. static void name(struct pt_regs * regs, long interruption_code) \
  364. { \
  365. siginfo_t info; \
  366. info.si_signo = signr; \
  367. info.si_errno = 0; \
  368. info.si_code = sicode; \
  369. info.si_addr = siaddr; \
  370. do_trap(interruption_code, signr, str, regs, &info); \
  371. }
  372. DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception,
  373. ILL_ILLADR, get_check_address(regs))
  374. DO_ERROR_INFO(SIGILL, "execute exception", execute_exception,
  375. ILL_ILLOPN, get_check_address(regs))
  376. DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception,
  377. FPE_INTDIV, get_check_address(regs))
  378. DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception,
  379. FPE_INTOVF, get_check_address(regs))
  380. DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception,
  381. FPE_FLTOVF, get_check_address(regs))
  382. DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception,
  383. FPE_FLTUND, get_check_address(regs))
  384. DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception,
  385. FPE_FLTRES, get_check_address(regs))
  386. DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception,
  387. FPE_FLTDIV, get_check_address(regs))
  388. DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception,
  389. FPE_FLTINV, get_check_address(regs))
  390. DO_ERROR_INFO(SIGILL, "operand exception", operand_exception,
  391. ILL_ILLOPN, get_check_address(regs))
  392. DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op,
  393. ILL_PRVOPC, get_check_address(regs))
  394. DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception,
  395. ILL_ILLOPN, get_check_address(regs))
  396. DO_ERROR_INFO(SIGILL, "translation exception", translation_exception,
  397. ILL_ILLOPN, get_check_address(regs))
  398. static inline void
  399. do_fp_trap(struct pt_regs *regs, void __user *location,
  400. int fpc, long interruption_code)
  401. {
  402. siginfo_t si;
  403. si.si_signo = SIGFPE;
  404. si.si_errno = 0;
  405. si.si_addr = location;
  406. si.si_code = 0;
  407. /* FPC[2] is Data Exception Code */
  408. if ((fpc & 0x00000300) == 0) {
  409. /* bits 6 and 7 of DXC are 0 iff IEEE exception */
  410. if (fpc & 0x8000) /* invalid fp operation */
  411. si.si_code = FPE_FLTINV;
  412. else if (fpc & 0x4000) /* div by 0 */
  413. si.si_code = FPE_FLTDIV;
  414. else if (fpc & 0x2000) /* overflow */
  415. si.si_code = FPE_FLTOVF;
  416. else if (fpc & 0x1000) /* underflow */
  417. si.si_code = FPE_FLTUND;
  418. else if (fpc & 0x0800) /* inexact */
  419. si.si_code = FPE_FLTRES;
  420. }
  421. current->thread.ieee_instruction_pointer = (addr_t) location;
  422. do_trap(interruption_code, SIGFPE,
  423. "floating point exception", regs, &si);
  424. }
  425. static void illegal_op(struct pt_regs * regs, long interruption_code)
  426. {
  427. siginfo_t info;
  428. __u8 opcode[6];
  429. __u16 __user *location;
  430. int signal = 0;
  431. location = get_check_address(regs);
  432. /*
  433. * We got all needed information from the lowcore and can
  434. * now safely switch on interrupts.
  435. */
  436. if (regs->psw.mask & PSW_MASK_PSTATE)
  437. local_irq_enable();
  438. if (regs->psw.mask & PSW_MASK_PSTATE) {
  439. if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
  440. return;
  441. if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
  442. if (current->ptrace & PT_PTRACED)
  443. force_sig(SIGTRAP, current);
  444. else
  445. signal = SIGILL;
  446. #ifdef CONFIG_MATHEMU
  447. } else if (opcode[0] == 0xb3) {
  448. if (get_user(*((__u16 *) (opcode+2)), location+1))
  449. return;
  450. signal = math_emu_b3(opcode, regs);
  451. } else if (opcode[0] == 0xed) {
  452. if (get_user(*((__u32 *) (opcode+2)),
  453. (__u32 __user *)(location+1)))
  454. return;
  455. signal = math_emu_ed(opcode, regs);
  456. } else if (*((__u16 *) opcode) == 0xb299) {
  457. if (get_user(*((__u16 *) (opcode+2)), location+1))
  458. return;
  459. signal = math_emu_srnm(opcode, regs);
  460. } else if (*((__u16 *) opcode) == 0xb29c) {
  461. if (get_user(*((__u16 *) (opcode+2)), location+1))
  462. return;
  463. signal = math_emu_stfpc(opcode, regs);
  464. } else if (*((__u16 *) opcode) == 0xb29d) {
  465. if (get_user(*((__u16 *) (opcode+2)), location+1))
  466. return;
  467. signal = math_emu_lfpc(opcode, regs);
  468. #endif
  469. } else
  470. signal = SIGILL;
  471. } else {
  472. /*
  473. * If we get an illegal op in kernel mode, send it through the
  474. * kprobes notifier. If kprobes doesn't pick it up, SIGILL
  475. */
  476. if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
  477. 3, SIGTRAP) != NOTIFY_STOP)
  478. signal = SIGILL;
  479. }
  480. #ifdef CONFIG_MATHEMU
  481. if (signal == SIGFPE)
  482. do_fp_trap(regs, location,
  483. current->thread.fp_regs.fpc, interruption_code);
  484. else if (signal == SIGSEGV) {
  485. info.si_signo = signal;
  486. info.si_errno = 0;
  487. info.si_code = SEGV_MAPERR;
  488. info.si_addr = (void __user *) location;
  489. do_trap(interruption_code, signal,
  490. "user address fault", regs, &info);
  491. } else
  492. #endif
  493. if (signal) {
  494. info.si_signo = signal;
  495. info.si_errno = 0;
  496. info.si_code = ILL_ILLOPC;
  497. info.si_addr = (void __user *) location;
  498. do_trap(interruption_code, signal,
  499. "illegal operation", regs, &info);
  500. }
  501. }
  502. #ifdef CONFIG_MATHEMU
  503. asmlinkage void
  504. specification_exception(struct pt_regs * regs, long interruption_code)
  505. {
  506. __u8 opcode[6];
  507. __u16 __user *location = NULL;
  508. int signal = 0;
  509. location = (__u16 __user *) get_check_address(regs);
  510. /*
  511. * We got all needed information from the lowcore and can
  512. * now safely switch on interrupts.
  513. */
  514. if (regs->psw.mask & PSW_MASK_PSTATE)
  515. local_irq_enable();
  516. if (regs->psw.mask & PSW_MASK_PSTATE) {
  517. get_user(*((__u16 *) opcode), location);
  518. switch (opcode[0]) {
  519. case 0x28: /* LDR Rx,Ry */
  520. signal = math_emu_ldr(opcode);
  521. break;
  522. case 0x38: /* LER Rx,Ry */
  523. signal = math_emu_ler(opcode);
  524. break;
  525. case 0x60: /* STD R,D(X,B) */
  526. get_user(*((__u16 *) (opcode+2)), location+1);
  527. signal = math_emu_std(opcode, regs);
  528. break;
  529. case 0x68: /* LD R,D(X,B) */
  530. get_user(*((__u16 *) (opcode+2)), location+1);
  531. signal = math_emu_ld(opcode, regs);
  532. break;
  533. case 0x70: /* STE R,D(X,B) */
  534. get_user(*((__u16 *) (opcode+2)), location+1);
  535. signal = math_emu_ste(opcode, regs);
  536. break;
  537. case 0x78: /* LE R,D(X,B) */
  538. get_user(*((__u16 *) (opcode+2)), location+1);
  539. signal = math_emu_le(opcode, regs);
  540. break;
  541. default:
  542. signal = SIGILL;
  543. break;
  544. }
  545. } else
  546. signal = SIGILL;
  547. if (signal == SIGFPE)
  548. do_fp_trap(regs, location,
  549. current->thread.fp_regs.fpc, interruption_code);
  550. else if (signal) {
  551. siginfo_t info;
  552. info.si_signo = signal;
  553. info.si_errno = 0;
  554. info.si_code = ILL_ILLOPN;
  555. info.si_addr = location;
  556. do_trap(interruption_code, signal,
  557. "specification exception", regs, &info);
  558. }
  559. }
  560. #else
  561. DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
  562. ILL_ILLOPN, get_check_address(regs));
  563. #endif
  564. static void data_exception(struct pt_regs * regs, long interruption_code)
  565. {
  566. __u16 __user *location;
  567. int signal = 0;
  568. location = get_check_address(regs);
  569. /*
  570. * We got all needed information from the lowcore and can
  571. * now safely switch on interrupts.
  572. */
  573. if (regs->psw.mask & PSW_MASK_PSTATE)
  574. local_irq_enable();
  575. if (MACHINE_HAS_IEEE)
  576. asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
  577. #ifdef CONFIG_MATHEMU
  578. else if (regs->psw.mask & PSW_MASK_PSTATE) {
  579. __u8 opcode[6];
  580. get_user(*((__u16 *) opcode), location);
  581. switch (opcode[0]) {
  582. case 0x28: /* LDR Rx,Ry */
  583. signal = math_emu_ldr(opcode);
  584. break;
  585. case 0x38: /* LER Rx,Ry */
  586. signal = math_emu_ler(opcode);
  587. break;
  588. case 0x60: /* STD R,D(X,B) */
  589. get_user(*((__u16 *) (opcode+2)), location+1);
  590. signal = math_emu_std(opcode, regs);
  591. break;
  592. case 0x68: /* LD R,D(X,B) */
  593. get_user(*((__u16 *) (opcode+2)), location+1);
  594. signal = math_emu_ld(opcode, regs);
  595. break;
  596. case 0x70: /* STE R,D(X,B) */
  597. get_user(*((__u16 *) (opcode+2)), location+1);
  598. signal = math_emu_ste(opcode, regs);
  599. break;
  600. case 0x78: /* LE R,D(X,B) */
  601. get_user(*((__u16 *) (opcode+2)), location+1);
  602. signal = math_emu_le(opcode, regs);
  603. break;
  604. case 0xb3:
  605. get_user(*((__u16 *) (opcode+2)), location+1);
  606. signal = math_emu_b3(opcode, regs);
  607. break;
  608. case 0xed:
  609. get_user(*((__u32 *) (opcode+2)),
  610. (__u32 __user *)(location+1));
  611. signal = math_emu_ed(opcode, regs);
  612. break;
  613. case 0xb2:
  614. if (opcode[1] == 0x99) {
  615. get_user(*((__u16 *) (opcode+2)), location+1);
  616. signal = math_emu_srnm(opcode, regs);
  617. } else if (opcode[1] == 0x9c) {
  618. get_user(*((__u16 *) (opcode+2)), location+1);
  619. signal = math_emu_stfpc(opcode, regs);
  620. } else if (opcode[1] == 0x9d) {
  621. get_user(*((__u16 *) (opcode+2)), location+1);
  622. signal = math_emu_lfpc(opcode, regs);
  623. } else
  624. signal = SIGILL;
  625. break;
  626. default:
  627. signal = SIGILL;
  628. break;
  629. }
  630. }
  631. #endif
  632. if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
  633. signal = SIGFPE;
  634. else
  635. signal = SIGILL;
  636. if (signal == SIGFPE)
  637. do_fp_trap(regs, location,
  638. current->thread.fp_regs.fpc, interruption_code);
  639. else if (signal) {
  640. siginfo_t info;
  641. info.si_signo = signal;
  642. info.si_errno = 0;
  643. info.si_code = ILL_ILLOPN;
  644. info.si_addr = location;
  645. do_trap(interruption_code, signal,
  646. "data exception", regs, &info);
  647. }
  648. }
  649. static void space_switch_exception(struct pt_regs * regs, long int_code)
  650. {
  651. siginfo_t info;
  652. /* Set user psw back to home space mode. */
  653. if (regs->psw.mask & PSW_MASK_PSTATE)
  654. regs->psw.mask |= PSW_ASC_HOME;
  655. /* Send SIGILL. */
  656. info.si_signo = SIGILL;
  657. info.si_errno = 0;
  658. info.si_code = ILL_PRVOPC;
  659. info.si_addr = get_check_address(regs);
  660. do_trap(int_code, SIGILL, "space switch event", regs, &info);
  661. }
  662. asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
  663. {
  664. bust_spinlocks(1);
  665. printk("Kernel stack overflow.\n");
  666. show_regs(regs);
  667. bust_spinlocks(0);
  668. panic("Corrupt kernel stack, can't continue.");
  669. }
  670. /* init is done in lowcore.S and head.S */
  671. void __init trap_init(void)
  672. {
  673. int i;
  674. for (i = 0; i < 128; i++)
  675. pgm_check_table[i] = &default_trap_handler;
  676. pgm_check_table[1] = &illegal_op;
  677. pgm_check_table[2] = &privileged_op;
  678. pgm_check_table[3] = &execute_exception;
  679. pgm_check_table[4] = &do_protection_exception;
  680. pgm_check_table[5] = &addressing_exception;
  681. pgm_check_table[6] = &specification_exception;
  682. pgm_check_table[7] = &data_exception;
  683. pgm_check_table[8] = &overflow_exception;
  684. pgm_check_table[9] = &divide_exception;
  685. pgm_check_table[0x0A] = &overflow_exception;
  686. pgm_check_table[0x0B] = &divide_exception;
  687. pgm_check_table[0x0C] = &hfp_overflow_exception;
  688. pgm_check_table[0x0D] = &hfp_underflow_exception;
  689. pgm_check_table[0x0E] = &hfp_significance_exception;
  690. pgm_check_table[0x0F] = &hfp_divide_exception;
  691. pgm_check_table[0x10] = &do_dat_exception;
  692. pgm_check_table[0x11] = &do_dat_exception;
  693. pgm_check_table[0x12] = &translation_exception;
  694. pgm_check_table[0x13] = &special_op_exception;
  695. #ifdef CONFIG_64BIT
  696. pgm_check_table[0x38] = &do_asce_exception;
  697. pgm_check_table[0x39] = &do_dat_exception;
  698. pgm_check_table[0x3A] = &do_dat_exception;
  699. pgm_check_table[0x3B] = &do_dat_exception;
  700. #endif /* CONFIG_64BIT */
  701. pgm_check_table[0x15] = &operand_exception;
  702. pgm_check_table[0x1C] = &space_switch_exception;
  703. pgm_check_table[0x1D] = &hfp_sqrt_exception;
  704. pfault_irq_init();
  705. }