/arch/ppc64/kernel/kprobes.c

https://bitbucket.org/evzijst/gittest · C · 290 lines · 174 code · 36 blank · 80 comment · 31 complexity · a0ed6a0a8dfd72f57c44c06bf00897c0 MD5 · raw file

  1. /*
  2. * Kernel Probes (KProbes)
  3. * arch/ppc64/kernel/kprobes.c
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. *
  19. * Copyright (C) IBM Corporation, 2002, 2004
  20. *
  21. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22. * Probes initial implementation ( includes contributions from
  23. * Rusty Russell).
  24. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  25. * interface to access function arguments.
  26. * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
  27. * for PPC64
  28. */
  29. #include <linux/config.h>
  30. #include <linux/kprobes.h>
  31. #include <linux/ptrace.h>
  32. #include <linux/spinlock.h>
  33. #include <linux/preempt.h>
  34. #include <asm/kdebug.h>
  35. #include <asm/sstep.h>
  36. /* kprobe_status settings */
  37. #define KPROBE_HIT_ACTIVE 0x00000001
  38. #define KPROBE_HIT_SS 0x00000002
  39. static struct kprobe *current_kprobe;
  40. static unsigned long kprobe_status, kprobe_saved_msr;
  41. static struct pt_regs jprobe_saved_regs;
  42. int arch_prepare_kprobe(struct kprobe *p)
  43. {
  44. kprobe_opcode_t insn = *p->addr;
  45. if (IS_MTMSRD(insn) || IS_RFID(insn))
  46. /* cannot put bp on RFID/MTMSRD */
  47. return 1;
  48. return 0;
  49. }
  50. void arch_copy_kprobe(struct kprobe *p)
  51. {
  52. memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
  53. }
  54. void arch_remove_kprobe(struct kprobe *p)
  55. {
  56. }
  57. static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
  58. {
  59. *p->addr = p->opcode;
  60. regs->nip = (unsigned long)p->addr;
  61. }
  62. static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
  63. {
  64. regs->msr |= MSR_SE;
  65. /*single step inline if it a breakpoint instruction*/
  66. if (p->opcode == BREAKPOINT_INSTRUCTION)
  67. regs->nip = (unsigned long)p->addr;
  68. else
  69. regs->nip = (unsigned long)&p->ainsn.insn;
  70. }
  71. static inline int kprobe_handler(struct pt_regs *regs)
  72. {
  73. struct kprobe *p;
  74. int ret = 0;
  75. unsigned int *addr = (unsigned int *)regs->nip;
  76. /* Check we're not actually recursing */
  77. if (kprobe_running()) {
  78. /* We *are* holding lock here, so this is safe.
  79. Disarm the probe we just hit, and ignore it. */
  80. p = get_kprobe(addr);
  81. if (p) {
  82. if (kprobe_status == KPROBE_HIT_SS) {
  83. regs->msr &= ~MSR_SE;
  84. regs->msr |= kprobe_saved_msr;
  85. unlock_kprobes();
  86. goto no_kprobe;
  87. }
  88. disarm_kprobe(p, regs);
  89. ret = 1;
  90. } else {
  91. p = current_kprobe;
  92. if (p->break_handler && p->break_handler(p, regs)) {
  93. goto ss_probe;
  94. }
  95. }
  96. /* If it's not ours, can't be delete race, (we hold lock). */
  97. goto no_kprobe;
  98. }
  99. lock_kprobes();
  100. p = get_kprobe(addr);
  101. if (!p) {
  102. unlock_kprobes();
  103. if (*addr != BREAKPOINT_INSTRUCTION) {
  104. /*
  105. * PowerPC has multiple variants of the "trap"
  106. * instruction. If the current instruction is a
  107. * trap variant, it could belong to someone else
  108. */
  109. kprobe_opcode_t cur_insn = *addr;
  110. if (IS_TW(cur_insn) || IS_TD(cur_insn) ||
  111. IS_TWI(cur_insn) || IS_TDI(cur_insn))
  112. goto no_kprobe;
  113. /*
  114. * The breakpoint instruction was removed right
  115. * after we hit it. Another cpu has removed
  116. * either a probepoint or a debugger breakpoint
  117. * at this address. In either case, no further
  118. * handling of this interrupt is appropriate.
  119. */
  120. ret = 1;
  121. }
  122. /* Not one of ours: let kernel handle it */
  123. goto no_kprobe;
  124. }
  125. kprobe_status = KPROBE_HIT_ACTIVE;
  126. current_kprobe = p;
  127. kprobe_saved_msr = regs->msr;
  128. if (p->pre_handler && p->pre_handler(p, regs))
  129. /* handler has already set things up, so skip ss setup */
  130. return 1;
  131. ss_probe:
  132. prepare_singlestep(p, regs);
  133. kprobe_status = KPROBE_HIT_SS;
  134. /*
  135. * This preempt_disable() matches the preempt_enable_no_resched()
  136. * in post_kprobe_handler().
  137. */
  138. preempt_disable();
  139. return 1;
  140. no_kprobe:
  141. return ret;
  142. }
  143. /*
  144. * Called after single-stepping. p->addr is the address of the
  145. * instruction whose first byte has been replaced by the "breakpoint"
  146. * instruction. To avoid the SMP problems that can occur when we
  147. * temporarily put back the original opcode to single-step, we
  148. * single-stepped a copy of the instruction. The address of this
  149. * copy is p->ainsn.insn.
  150. */
  151. static void resume_execution(struct kprobe *p, struct pt_regs *regs)
  152. {
  153. int ret;
  154. regs->nip = (unsigned long)p->addr;
  155. ret = emulate_step(regs, p->ainsn.insn[0]);
  156. if (ret == 0)
  157. regs->nip = (unsigned long)p->addr + 4;
  158. regs->msr &= ~MSR_SE;
  159. }
  160. static inline int post_kprobe_handler(struct pt_regs *regs)
  161. {
  162. if (!kprobe_running())
  163. return 0;
  164. if (current_kprobe->post_handler)
  165. current_kprobe->post_handler(current_kprobe, regs, 0);
  166. resume_execution(current_kprobe, regs);
  167. regs->msr |= kprobe_saved_msr;
  168. unlock_kprobes();
  169. preempt_enable_no_resched();
  170. /*
  171. * if somebody else is singlestepping across a probe point, msr
  172. * will have SE set, in which case, continue the remaining processing
  173. * of do_debug, as if this is not a probe hit.
  174. */
  175. if (regs->msr & MSR_SE)
  176. return 0;
  177. return 1;
  178. }
  179. /* Interrupts disabled, kprobe_lock held. */
  180. static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
  181. {
  182. if (current_kprobe->fault_handler
  183. && current_kprobe->fault_handler(current_kprobe, regs, trapnr))
  184. return 1;
  185. if (kprobe_status & KPROBE_HIT_SS) {
  186. resume_execution(current_kprobe, regs);
  187. regs->msr |= kprobe_saved_msr;
  188. unlock_kprobes();
  189. preempt_enable_no_resched();
  190. }
  191. return 0;
  192. }
  193. /*
  194. * Wrapper routine to for handling exceptions.
  195. */
  196. int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
  197. void *data)
  198. {
  199. struct die_args *args = (struct die_args *)data;
  200. int ret = NOTIFY_DONE;
  201. /*
  202. * Interrupts are not disabled here. We need to disable
  203. * preemption, because kprobe_running() uses smp_processor_id().
  204. */
  205. preempt_disable();
  206. switch (val) {
  207. case DIE_IABR_MATCH:
  208. case DIE_DABR_MATCH:
  209. case DIE_BPT:
  210. if (kprobe_handler(args->regs))
  211. ret = NOTIFY_STOP;
  212. break;
  213. case DIE_SSTEP:
  214. if (post_kprobe_handler(args->regs))
  215. ret = NOTIFY_STOP;
  216. break;
  217. case DIE_GPF:
  218. case DIE_PAGE_FAULT:
  219. if (kprobe_running() &&
  220. kprobe_fault_handler(args->regs, args->trapnr))
  221. ret = NOTIFY_STOP;
  222. break;
  223. default:
  224. break;
  225. }
  226. preempt_enable();
  227. return ret;
  228. }
  229. int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
  230. {
  231. struct jprobe *jp = container_of(p, struct jprobe, kp);
  232. memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs));
  233. /* setup return addr to the jprobe handler routine */
  234. regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry);
  235. regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
  236. return 1;
  237. }
  238. void jprobe_return(void)
  239. {
  240. asm volatile("trap" ::: "memory");
  241. }
  242. void jprobe_return_end(void)
  243. {
  244. };
  245. int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
  246. {
  247. /*
  248. * FIXME - we should ideally be validating that we got here 'cos
  249. * of the "trap" in jprobe_return() above, before restoring the
  250. * saved regs...
  251. */
  252. memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs));
  253. return 1;
  254. }