/arch/x86_64/kernel/traps.c

https://bitbucket.org/evzijst/gittest · C · 948 lines · 733 code · 106 blank · 109 comment · 114 complexity · c2753cdd302fe2b646fab79f63e99196 MD5 · raw file

  1. /*
  2. * linux/arch/x86-64/traps.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  6. *
  7. * Pentium III FXSR, SSE support
  8. * Gareth Hughes <gareth@valinux.com>, May 2000
  9. *
  10. * $Id: traps.c,v 1.36 2002/03/24 11:09:10 ak Exp $
  11. */
  12. /*
  13. * 'Traps.c' handles hardware traps and faults after we have saved some
  14. * state in 'entry.S'.
  15. */
  16. #include <linux/config.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/string.h>
  20. #include <linux/errno.h>
  21. #include <linux/ptrace.h>
  22. #include <linux/timer.h>
  23. #include <linux/mm.h>
  24. #include <linux/init.h>
  25. #include <linux/delay.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/module.h>
  29. #include <linux/moduleparam.h>
  30. #include <asm/system.h>
  31. #include <asm/uaccess.h>
  32. #include <asm/io.h>
  33. #include <asm/atomic.h>
  34. #include <asm/debugreg.h>
  35. #include <asm/desc.h>
  36. #include <asm/i387.h>
  37. #include <asm/kdebug.h>
  38. #include <asm/processor.h>
  39. #include <asm/smp.h>
  40. #include <asm/pgalloc.h>
  41. #include <asm/pda.h>
  42. #include <asm/proto.h>
  43. #include <asm/nmi.h>
  44. #include <linux/irq.h>
  45. extern struct gate_struct idt_table[256];
  46. asmlinkage void divide_error(void);
  47. asmlinkage void debug(void);
  48. asmlinkage void nmi(void);
  49. asmlinkage void int3(void);
  50. asmlinkage void overflow(void);
  51. asmlinkage void bounds(void);
  52. asmlinkage void invalid_op(void);
  53. asmlinkage void device_not_available(void);
  54. asmlinkage void double_fault(void);
  55. asmlinkage void coprocessor_segment_overrun(void);
  56. asmlinkage void invalid_TSS(void);
  57. asmlinkage void segment_not_present(void);
  58. asmlinkage void stack_segment(void);
  59. asmlinkage void general_protection(void);
  60. asmlinkage void page_fault(void);
  61. asmlinkage void coprocessor_error(void);
  62. asmlinkage void simd_coprocessor_error(void);
  63. asmlinkage void reserved(void);
  64. asmlinkage void alignment_check(void);
  65. asmlinkage void machine_check(void);
  66. asmlinkage void spurious_interrupt_bug(void);
  67. asmlinkage void call_debug(void);
  68. struct notifier_block *die_chain;
  69. static DEFINE_SPINLOCK(die_notifier_lock);
  70. int register_die_notifier(struct notifier_block *nb)
  71. {
  72. int err = 0;
  73. unsigned long flags;
  74. spin_lock_irqsave(&die_notifier_lock, flags);
  75. err = notifier_chain_register(&die_chain, nb);
  76. spin_unlock_irqrestore(&die_notifier_lock, flags);
  77. return err;
  78. }
  79. static inline void conditional_sti(struct pt_regs *regs)
  80. {
  81. if (regs->eflags & X86_EFLAGS_IF)
  82. local_irq_enable();
  83. }
  84. static int kstack_depth_to_print = 10;
  85. #ifdef CONFIG_KALLSYMS
  86. #include <linux/kallsyms.h>
  87. int printk_address(unsigned long address)
  88. {
  89. unsigned long offset = 0, symsize;
  90. const char *symname;
  91. char *modname;
  92. char *delim = ":";
  93. char namebuf[128];
  94. symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
  95. if (!symname)
  96. return printk("[<%016lx>]", address);
  97. if (!modname)
  98. modname = delim = "";
  99. return printk("<%016lx>{%s%s%s%s%+ld}",
  100. address,delim,modname,delim,symname,offset);
  101. }
  102. #else
  103. int printk_address(unsigned long address)
  104. {
  105. return printk("[<%016lx>]", address);
  106. }
  107. #endif
  108. unsigned long *in_exception_stack(int cpu, unsigned long stack)
  109. {
  110. int k;
  111. for (k = 0; k < N_EXCEPTION_STACKS; k++) {
  112. struct tss_struct *tss = &per_cpu(init_tss, cpu);
  113. unsigned long start = tss->ist[k] - EXCEPTION_STKSZ;
  114. if (stack >= start && stack < tss->ist[k])
  115. return (unsigned long *)tss->ist[k];
  116. }
  117. return NULL;
  118. }
  119. /*
  120. * x86-64 can have upto three kernel stacks:
  121. * process stack
  122. * interrupt stack
  123. * severe exception (double fault, nmi, stack fault) hardware stack
  124. * Check and process them in order.
  125. */
  126. void show_trace(unsigned long *stack)
  127. {
  128. unsigned long addr;
  129. unsigned long *irqstack, *irqstack_end, *estack_end;
  130. const int cpu = safe_smp_processor_id();
  131. int i;
  132. printk("\nCall Trace:");
  133. i = 0;
  134. estack_end = in_exception_stack(cpu, (unsigned long)stack);
  135. if (estack_end) {
  136. while (stack < estack_end) {
  137. addr = *stack++;
  138. if (__kernel_text_address(addr)) {
  139. i += printk_address(addr);
  140. i += printk(" ");
  141. if (i > 50) {
  142. printk("\n");
  143. i = 0;
  144. }
  145. }
  146. }
  147. i += printk(" <EOE> ");
  148. i += 7;
  149. stack = (unsigned long *) estack_end[-2];
  150. }
  151. irqstack_end = (unsigned long *) (cpu_pda[cpu].irqstackptr);
  152. irqstack = (unsigned long *) (cpu_pda[cpu].irqstackptr - IRQSTACKSIZE + 64);
  153. if (stack >= irqstack && stack < irqstack_end) {
  154. printk("<IRQ> ");
  155. while (stack < irqstack_end) {
  156. addr = *stack++;
  157. /*
  158. * If the address is either in the text segment of the
  159. * kernel, or in the region which contains vmalloc'ed
  160. * memory, it *may* be the address of a calling
  161. * routine; if so, print it so that someone tracing
  162. * down the cause of the crash will be able to figure
  163. * out the call path that was taken.
  164. */
  165. if (__kernel_text_address(addr)) {
  166. i += printk_address(addr);
  167. i += printk(" ");
  168. if (i > 50) {
  169. printk("\n ");
  170. i = 0;
  171. }
  172. }
  173. }
  174. stack = (unsigned long *) (irqstack_end[-1]);
  175. printk(" <EOI> ");
  176. i += 7;
  177. }
  178. while (((long) stack & (THREAD_SIZE-1)) != 0) {
  179. addr = *stack++;
  180. if (__kernel_text_address(addr)) {
  181. i += printk_address(addr);
  182. i += printk(" ");
  183. if (i > 50) {
  184. printk("\n ");
  185. i = 0;
  186. }
  187. }
  188. }
  189. printk("\n");
  190. }
  191. void show_stack(struct task_struct *tsk, unsigned long * rsp)
  192. {
  193. unsigned long *stack;
  194. int i;
  195. const int cpu = safe_smp_processor_id();
  196. unsigned long *irqstack_end = (unsigned long *) (cpu_pda[cpu].irqstackptr);
  197. unsigned long *irqstack = (unsigned long *) (cpu_pda[cpu].irqstackptr - IRQSTACKSIZE);
  198. // debugging aid: "show_stack(NULL, NULL);" prints the
  199. // back trace for this cpu.
  200. if (rsp == NULL) {
  201. if (tsk)
  202. rsp = (unsigned long *)tsk->thread.rsp;
  203. else
  204. rsp = (unsigned long *)&rsp;
  205. }
  206. stack = rsp;
  207. for(i=0; i < kstack_depth_to_print; i++) {
  208. if (stack >= irqstack && stack <= irqstack_end) {
  209. if (stack == irqstack_end) {
  210. stack = (unsigned long *) (irqstack_end[-1]);
  211. printk(" <EOI> ");
  212. }
  213. } else {
  214. if (((long) stack & (THREAD_SIZE-1)) == 0)
  215. break;
  216. }
  217. if (i && ((i % 4) == 0))
  218. printk("\n ");
  219. printk("%016lx ", *stack++);
  220. }
  221. show_trace((unsigned long *)rsp);
  222. }
  223. /*
  224. * The architecture-independent dump_stack generator
  225. */
  226. void dump_stack(void)
  227. {
  228. unsigned long dummy;
  229. show_trace(&dummy);
  230. }
  231. EXPORT_SYMBOL(dump_stack);
  232. void show_registers(struct pt_regs *regs)
  233. {
  234. int i;
  235. int in_kernel = (regs->cs & 3) == 0;
  236. unsigned long rsp;
  237. const int cpu = safe_smp_processor_id();
  238. struct task_struct *cur = cpu_pda[cpu].pcurrent;
  239. rsp = regs->rsp;
  240. printk("CPU %d ", cpu);
  241. __show_regs(regs);
  242. printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
  243. cur->comm, cur->pid, cur->thread_info, cur);
  244. /*
  245. * When in-kernel, we also print out the stack and code at the
  246. * time of the fault..
  247. */
  248. if (in_kernel) {
  249. printk("Stack: ");
  250. show_stack(NULL, (unsigned long*)rsp);
  251. printk("\nCode: ");
  252. if(regs->rip < PAGE_OFFSET)
  253. goto bad;
  254. for(i=0;i<20;i++)
  255. {
  256. unsigned char c;
  257. if(__get_user(c, &((unsigned char*)regs->rip)[i])) {
  258. bad:
  259. printk(" Bad RIP value.");
  260. break;
  261. }
  262. printk("%02x ", c);
  263. }
  264. }
  265. printk("\n");
  266. }
  267. void handle_BUG(struct pt_regs *regs)
  268. {
  269. struct bug_frame f;
  270. char tmp;
  271. if (regs->cs & 3)
  272. return;
  273. if (__copy_from_user(&f, (struct bug_frame *) regs->rip,
  274. sizeof(struct bug_frame)))
  275. return;
  276. if ((unsigned long)f.filename < __PAGE_OFFSET ||
  277. f.ud2[0] != 0x0f || f.ud2[1] != 0x0b)
  278. return;
  279. if (__get_user(tmp, f.filename))
  280. f.filename = "unmapped filename";
  281. printk("----------- [cut here ] --------- [please bite here ] ---------\n");
  282. printk(KERN_ALERT "Kernel BUG at %.50s:%d\n", f.filename, f.line);
  283. }
  284. void out_of_line_bug(void)
  285. {
  286. BUG();
  287. }
  288. static DEFINE_SPINLOCK(die_lock);
  289. static int die_owner = -1;
  290. void oops_begin(void)
  291. {
  292. int cpu = safe_smp_processor_id();
  293. /* racy, but better than risking deadlock. */
  294. local_irq_disable();
  295. if (!spin_trylock(&die_lock)) {
  296. if (cpu == die_owner)
  297. /* nested oops. should stop eventually */;
  298. else
  299. spin_lock(&die_lock);
  300. }
  301. die_owner = cpu;
  302. console_verbose();
  303. bust_spinlocks(1);
  304. }
  305. void oops_end(void)
  306. {
  307. die_owner = -1;
  308. bust_spinlocks(0);
  309. spin_unlock(&die_lock);
  310. if (panic_on_oops)
  311. panic("Oops");
  312. }
  313. void __die(const char * str, struct pt_regs * regs, long err)
  314. {
  315. static int die_counter;
  316. printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
  317. #ifdef CONFIG_PREEMPT
  318. printk("PREEMPT ");
  319. #endif
  320. #ifdef CONFIG_SMP
  321. printk("SMP ");
  322. #endif
  323. #ifdef CONFIG_DEBUG_PAGEALLOC
  324. printk("DEBUG_PAGEALLOC");
  325. #endif
  326. printk("\n");
  327. notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
  328. show_registers(regs);
  329. /* Executive summary in case the oops scrolled away */
  330. printk(KERN_ALERT "RIP ");
  331. printk_address(regs->rip);
  332. printk(" RSP <%016lx>\n", regs->rsp);
  333. }
  334. void die(const char * str, struct pt_regs * regs, long err)
  335. {
  336. oops_begin();
  337. handle_BUG(regs);
  338. __die(str, regs, err);
  339. oops_end();
  340. do_exit(SIGSEGV);
  341. }
  342. static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
  343. {
  344. if (!(regs->eflags & VM_MASK) && (regs->cs == __KERNEL_CS))
  345. die(str, regs, err);
  346. }
  347. void die_nmi(char *str, struct pt_regs *regs)
  348. {
  349. oops_begin();
  350. /*
  351. * We are in trouble anyway, lets at least try
  352. * to get a message out.
  353. */
  354. printk(str, safe_smp_processor_id());
  355. show_registers(regs);
  356. if (panic_on_timeout || panic_on_oops)
  357. panic("nmi watchdog");
  358. printk("console shuts up ...\n");
  359. oops_end();
  360. do_exit(SIGSEGV);
  361. }
  362. static void do_trap(int trapnr, int signr, char *str,
  363. struct pt_regs * regs, long error_code, siginfo_t *info)
  364. {
  365. conditional_sti(regs);
  366. #ifdef CONFIG_CHECKING
  367. {
  368. unsigned long gs;
  369. struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
  370. rdmsrl(MSR_GS_BASE, gs);
  371. if (gs != (unsigned long)pda) {
  372. wrmsrl(MSR_GS_BASE, pda);
  373. printk("%s: wrong gs %lx expected %p rip %lx\n", str, gs, pda,
  374. regs->rip);
  375. }
  376. }
  377. #endif
  378. if ((regs->cs & 3) != 0) {
  379. struct task_struct *tsk = current;
  380. if (exception_trace && unhandled_signal(tsk, signr))
  381. printk(KERN_INFO
  382. "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
  383. tsk->comm, tsk->pid, str,
  384. regs->rip,regs->rsp,error_code);
  385. tsk->thread.error_code = error_code;
  386. tsk->thread.trap_no = trapnr;
  387. if (info)
  388. force_sig_info(signr, info, tsk);
  389. else
  390. force_sig(signr, tsk);
  391. return;
  392. }
  393. /* kernel trap */
  394. {
  395. const struct exception_table_entry *fixup;
  396. fixup = search_exception_tables(regs->rip);
  397. if (fixup) {
  398. regs->rip = fixup->fixup;
  399. } else
  400. die(str, regs, error_code);
  401. return;
  402. }
  403. }
  404. #define DO_ERROR(trapnr, signr, str, name) \
  405. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  406. { \
  407. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  408. == NOTIFY_STOP) \
  409. return; \
  410. do_trap(trapnr, signr, str, regs, error_code, NULL); \
  411. }
  412. #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
  413. asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
  414. { \
  415. siginfo_t info; \
  416. info.si_signo = signr; \
  417. info.si_errno = 0; \
  418. info.si_code = sicode; \
  419. info.si_addr = (void __user *)siaddr; \
  420. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  421. == NOTIFY_STOP) \
  422. return; \
  423. do_trap(trapnr, signr, str, regs, error_code, &info); \
  424. }
  425. DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
  426. DO_ERROR( 4, SIGSEGV, "overflow", overflow)
  427. DO_ERROR( 5, SIGSEGV, "bounds", bounds)
  428. DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->rip)
  429. DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
  430. DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
  431. DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
  432. DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
  433. DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
  434. DO_ERROR(18, SIGSEGV, "reserved", reserved)
  435. #define DO_ERROR_STACK(trapnr, signr, str, name) \
  436. asmlinkage void *do_##name(struct pt_regs * regs, long error_code) \
  437. { \
  438. struct pt_regs *pr = ((struct pt_regs *)(current->thread.rsp0))-1; \
  439. if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
  440. == NOTIFY_STOP) \
  441. return regs; \
  442. if (regs->cs & 3) { \
  443. memcpy(pr, regs, sizeof(struct pt_regs)); \
  444. regs = pr; \
  445. } \
  446. do_trap(trapnr, signr, str, regs, error_code, NULL); \
  447. return regs; \
  448. }
  449. DO_ERROR_STACK(12, SIGBUS, "stack segment", stack_segment)
  450. DO_ERROR_STACK( 8, SIGSEGV, "double fault", double_fault)
  451. asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
  452. {
  453. conditional_sti(regs);
  454. #ifdef CONFIG_CHECKING
  455. {
  456. unsigned long gs;
  457. struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
  458. rdmsrl(MSR_GS_BASE, gs);
  459. if (gs != (unsigned long)pda) {
  460. wrmsrl(MSR_GS_BASE, pda);
  461. oops_in_progress++;
  462. printk("general protection handler: wrong gs %lx expected %p\n", gs, pda);
  463. oops_in_progress--;
  464. }
  465. }
  466. #endif
  467. if ((regs->cs & 3)!=0) {
  468. struct task_struct *tsk = current;
  469. if (exception_trace && unhandled_signal(tsk, SIGSEGV))
  470. printk(KERN_INFO
  471. "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
  472. tsk->comm, tsk->pid,
  473. regs->rip,regs->rsp,error_code);
  474. tsk->thread.error_code = error_code;
  475. tsk->thread.trap_no = 13;
  476. force_sig(SIGSEGV, tsk);
  477. return;
  478. }
  479. /* kernel gp */
  480. {
  481. const struct exception_table_entry *fixup;
  482. fixup = search_exception_tables(regs->rip);
  483. if (fixup) {
  484. regs->rip = fixup->fixup;
  485. return;
  486. }
  487. if (notify_die(DIE_GPF, "general protection fault", regs,
  488. error_code, 13, SIGSEGV) == NOTIFY_STOP)
  489. return;
  490. die("general protection fault", regs, error_code);
  491. }
  492. }
  493. static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
  494. {
  495. printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
  496. printk("You probably have a hardware problem with your RAM chips\n");
  497. /* Clear and disable the memory parity error line. */
  498. reason = (reason & 0xf) | 4;
  499. outb(reason, 0x61);
  500. }
  501. static void io_check_error(unsigned char reason, struct pt_regs * regs)
  502. {
  503. printk("NMI: IOCK error (debug interrupt?)\n");
  504. show_registers(regs);
  505. /* Re-enable the IOCK line, wait for a few seconds */
  506. reason = (reason & 0xf) | 8;
  507. outb(reason, 0x61);
  508. mdelay(2000);
  509. reason &= ~8;
  510. outb(reason, 0x61);
  511. }
  512. static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
  513. { printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
  514. printk("Dazed and confused, but trying to continue\n");
  515. printk("Do you have a strange power saving mode enabled?\n");
  516. }
  517. asmlinkage void default_do_nmi(struct pt_regs *regs)
  518. {
  519. unsigned char reason = 0;
  520. /* Only the BSP gets external NMIs from the system. */
  521. if (!smp_processor_id())
  522. reason = get_nmi_reason();
  523. if (!(reason & 0xc0)) {
  524. if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
  525. == NOTIFY_STOP)
  526. return;
  527. #ifdef CONFIG_X86_LOCAL_APIC
  528. /*
  529. * Ok, so this is none of the documented NMI sources,
  530. * so it must be the NMI watchdog.
  531. */
  532. if (nmi_watchdog > 0) {
  533. nmi_watchdog_tick(regs,reason);
  534. return;
  535. }
  536. #endif
  537. unknown_nmi_error(reason, regs);
  538. return;
  539. }
  540. if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
  541. return;
  542. /* AK: following checks seem to be broken on modern chipsets. FIXME */
  543. if (reason & 0x80)
  544. mem_parity_error(reason, regs);
  545. if (reason & 0x40)
  546. io_check_error(reason, regs);
  547. }
  548. asmlinkage void do_int3(struct pt_regs * regs, long error_code)
  549. {
  550. if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
  551. return;
  552. }
  553. do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
  554. return;
  555. }
  556. /* runs on IST stack. */
  557. asmlinkage void *do_debug(struct pt_regs * regs, unsigned long error_code)
  558. {
  559. struct pt_regs *pr;
  560. unsigned long condition;
  561. struct task_struct *tsk = current;
  562. siginfo_t info;
  563. pr = (struct pt_regs *)(current->thread.rsp0)-1;
  564. if (regs->cs & 3) {
  565. memcpy(pr, regs, sizeof(struct pt_regs));
  566. regs = pr;
  567. }
  568. #ifdef CONFIG_CHECKING
  569. {
  570. /* RED-PEN interaction with debugger - could destroy gs */
  571. unsigned long gs;
  572. struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
  573. rdmsrl(MSR_GS_BASE, gs);
  574. if (gs != (unsigned long)pda) {
  575. wrmsrl(MSR_GS_BASE, pda);
  576. printk("debug handler: wrong gs %lx expected %p\n", gs, pda);
  577. }
  578. }
  579. #endif
  580. asm("movq %%db6,%0" : "=r" (condition));
  581. if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
  582. SIGTRAP) == NOTIFY_STOP) {
  583. return regs;
  584. }
  585. conditional_sti(regs);
  586. /* Mask out spurious debug traps due to lazy DR7 setting */
  587. if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
  588. if (!tsk->thread.debugreg7) {
  589. goto clear_dr7;
  590. }
  591. }
  592. tsk->thread.debugreg6 = condition;
  593. /* Mask out spurious TF errors due to lazy TF clearing */
  594. if ((condition & DR_STEP) &&
  595. (notify_die(DIE_DEBUGSTEP, "debugstep", regs, condition,
  596. 1, SIGTRAP) != NOTIFY_STOP)) {
  597. /*
  598. * The TF error should be masked out only if the current
  599. * process is not traced and if the TRAP flag has been set
  600. * previously by a tracing process (condition detected by
  601. * the PT_DTRACE flag); remember that the i386 TRAP flag
  602. * can be modified by the process itself in user mode,
  603. * allowing programs to debug themselves without the ptrace()
  604. * interface.
  605. */
  606. if ((regs->cs & 3) == 0)
  607. goto clear_TF_reenable;
  608. if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE)
  609. goto clear_TF;
  610. }
  611. /* Ok, finally something we can handle */
  612. tsk->thread.trap_no = 1;
  613. tsk->thread.error_code = error_code;
  614. info.si_signo = SIGTRAP;
  615. info.si_errno = 0;
  616. info.si_code = TRAP_BRKPT;
  617. if ((regs->cs & 3) == 0)
  618. goto clear_dr7;
  619. info.si_addr = (void __user *)regs->rip;
  620. force_sig_info(SIGTRAP, &info, tsk);
  621. clear_dr7:
  622. asm volatile("movq %0,%%db7"::"r"(0UL));
  623. notify_die(DIE_DEBUG, "debug", regs, condition, 1, SIGTRAP);
  624. return regs;
  625. clear_TF_reenable:
  626. set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
  627. clear_TF:
  628. /* RED-PEN could cause spurious errors */
  629. if (notify_die(DIE_DEBUG, "debug2", regs, condition, 1, SIGTRAP)
  630. != NOTIFY_STOP)
  631. regs->eflags &= ~TF_MASK;
  632. return regs;
  633. }
  634. static int kernel_math_error(struct pt_regs *regs, char *str)
  635. {
  636. const struct exception_table_entry *fixup;
  637. fixup = search_exception_tables(regs->rip);
  638. if (fixup) {
  639. regs->rip = fixup->fixup;
  640. return 1;
  641. }
  642. notify_die(DIE_GPF, str, regs, 0, 16, SIGFPE);
  643. #if 0
  644. /* This should be a die, but warn only for now */
  645. die(str, regs, 0);
  646. #else
  647. printk(KERN_DEBUG "%s: %s at ", current->comm, str);
  648. printk_address(regs->rip);
  649. printk("\n");
  650. #endif
  651. return 0;
  652. }
  653. /*
  654. * Note that we play around with the 'TS' bit in an attempt to get
  655. * the correct behaviour even in the presence of the asynchronous
  656. * IRQ13 behaviour
  657. */
  658. asmlinkage void do_coprocessor_error(struct pt_regs *regs)
  659. {
  660. void __user *rip = (void __user *)(regs->rip);
  661. struct task_struct * task;
  662. siginfo_t info;
  663. unsigned short cwd, swd;
  664. conditional_sti(regs);
  665. if ((regs->cs & 3) == 0 &&
  666. kernel_math_error(regs, "kernel x87 math error"))
  667. return;
  668. /*
  669. * Save the info for the exception handler and clear the error.
  670. */
  671. task = current;
  672. save_init_fpu(task);
  673. task->thread.trap_no = 16;
  674. task->thread.error_code = 0;
  675. info.si_signo = SIGFPE;
  676. info.si_errno = 0;
  677. info.si_code = __SI_FAULT;
  678. info.si_addr = rip;
  679. /*
  680. * (~cwd & swd) will mask out exceptions that are not set to unmasked
  681. * status. 0x3f is the exception bits in these regs, 0x200 is the
  682. * C1 reg you need in case of a stack fault, 0x040 is the stack
  683. * fault bit. We should only be taking one exception at a time,
  684. * so if this combination doesn't produce any single exception,
  685. * then we have a bad program that isn't synchronizing its FPU usage
  686. * and it will suffer the consequences since we won't be able to
  687. * fully reproduce the context of the exception
  688. */
  689. cwd = get_fpu_cwd(task);
  690. swd = get_fpu_swd(task);
  691. switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
  692. case 0x000:
  693. default:
  694. break;
  695. case 0x001: /* Invalid Op */
  696. case 0x041: /* Stack Fault */
  697. case 0x241: /* Stack Fault | Direction */
  698. info.si_code = FPE_FLTINV;
  699. break;
  700. case 0x002: /* Denormalize */
  701. case 0x010: /* Underflow */
  702. info.si_code = FPE_FLTUND;
  703. break;
  704. case 0x004: /* Zero Divide */
  705. info.si_code = FPE_FLTDIV;
  706. break;
  707. case 0x008: /* Overflow */
  708. info.si_code = FPE_FLTOVF;
  709. break;
  710. case 0x020: /* Precision */
  711. info.si_code = FPE_FLTRES;
  712. break;
  713. }
  714. force_sig_info(SIGFPE, &info, task);
  715. }
  716. asmlinkage void bad_intr(void)
  717. {
  718. printk("bad interrupt");
  719. }
  720. asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
  721. {
  722. void __user *rip = (void __user *)(regs->rip);
  723. struct task_struct * task;
  724. siginfo_t info;
  725. unsigned short mxcsr;
  726. conditional_sti(regs);
  727. if ((regs->cs & 3) == 0 &&
  728. kernel_math_error(regs, "simd math error"))
  729. return;
  730. /*
  731. * Save the info for the exception handler and clear the error.
  732. */
  733. task = current;
  734. save_init_fpu(task);
  735. task->thread.trap_no = 19;
  736. task->thread.error_code = 0;
  737. info.si_signo = SIGFPE;
  738. info.si_errno = 0;
  739. info.si_code = __SI_FAULT;
  740. info.si_addr = rip;
  741. /*
  742. * The SIMD FPU exceptions are handled a little differently, as there
  743. * is only a single status/control register. Thus, to determine which
  744. * unmasked exception was caught we must mask the exception mask bits
  745. * at 0x1f80, and then use these to mask the exception bits at 0x3f.
  746. */
  747. mxcsr = get_fpu_mxcsr(task);
  748. switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
  749. case 0x000:
  750. default:
  751. break;
  752. case 0x001: /* Invalid Op */
  753. info.si_code = FPE_FLTINV;
  754. break;
  755. case 0x002: /* Denormalize */
  756. case 0x010: /* Underflow */
  757. info.si_code = FPE_FLTUND;
  758. break;
  759. case 0x004: /* Zero Divide */
  760. info.si_code = FPE_FLTDIV;
  761. break;
  762. case 0x008: /* Overflow */
  763. info.si_code = FPE_FLTOVF;
  764. break;
  765. case 0x020: /* Precision */
  766. info.si_code = FPE_FLTRES;
  767. break;
  768. }
  769. force_sig_info(SIGFPE, &info, task);
  770. }
  771. asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
  772. {
  773. }
  774. asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
  775. {
  776. }
  777. /*
  778. * 'math_state_restore()' saves the current math information in the
  779. * old math state array, and gets the new ones from the current task
  780. *
  781. * Careful.. There are problems with IBM-designed IRQ13 behaviour.
  782. * Don't touch unless you *really* know how it works.
  783. */
  784. asmlinkage void math_state_restore(void)
  785. {
  786. struct task_struct *me = current;
  787. clts(); /* Allow maths ops (or we recurse) */
  788. if (!used_math())
  789. init_fpu(me);
  790. restore_fpu_checking(&me->thread.i387.fxsave);
  791. me->thread_info->status |= TS_USEDFPU;
  792. }
  793. void do_call_debug(struct pt_regs *regs)
  794. {
  795. notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
  796. }
  797. void __init trap_init(void)
  798. {
  799. set_intr_gate(0,&divide_error);
  800. set_intr_gate_ist(1,&debug,DEBUG_STACK);
  801. set_intr_gate_ist(2,&nmi,NMI_STACK);
  802. set_system_gate(3,&int3);
  803. set_system_gate(4,&overflow); /* int4-5 can be called from all */
  804. set_system_gate(5,&bounds);
  805. set_intr_gate(6,&invalid_op);
  806. set_intr_gate(7,&device_not_available);
  807. set_intr_gate_ist(8,&double_fault, DOUBLEFAULT_STACK);
  808. set_intr_gate(9,&coprocessor_segment_overrun);
  809. set_intr_gate(10,&invalid_TSS);
  810. set_intr_gate(11,&segment_not_present);
  811. set_intr_gate_ist(12,&stack_segment,STACKFAULT_STACK);
  812. set_intr_gate(13,&general_protection);
  813. set_intr_gate(14,&page_fault);
  814. set_intr_gate(15,&spurious_interrupt_bug);
  815. set_intr_gate(16,&coprocessor_error);
  816. set_intr_gate(17,&alignment_check);
  817. #ifdef CONFIG_X86_MCE
  818. set_intr_gate_ist(18,&machine_check, MCE_STACK);
  819. #endif
  820. set_intr_gate(19,&simd_coprocessor_error);
  821. #ifdef CONFIG_IA32_EMULATION
  822. set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
  823. #endif
  824. set_intr_gate(KDB_VECTOR, call_debug);
  825. /*
  826. * Should be a barrier for any external CPU state.
  827. */
  828. cpu_init();
  829. }
  830. /* Actual parsing is done early in setup.c. */
  831. static int __init oops_dummy(char *s)
  832. {
  833. panic_on_oops = 1;
  834. return -1;
  835. }
  836. __setup("oops=", oops_dummy);
  837. static int __init kstack_setup(char *s)
  838. {
  839. kstack_depth_to_print = simple_strtoul(s,NULL,0);
  840. return 0;
  841. }
  842. __setup("kstack=", kstack_setup);