/arch/powerpc/kernel/hw_breakpoint.c

http://github.com/mirrors/linux · C · 447 lines · 247 code · 62 blank · 138 comment · 54 complexity · f83cd346f71d9869f9b2c358266afe61 MD5 · raw file

  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
  4. * using the CPU's debug registers. Derived from
  5. * "arch/x86/kernel/hw_breakpoint.c"
  6. *
  7. * Copyright 2010 IBM Corporation
  8. * Author: K.Prasad <prasad@linux.vnet.ibm.com>
  9. */
  10. #include <linux/hw_breakpoint.h>
  11. #include <linux/notifier.h>
  12. #include <linux/kprobes.h>
  13. #include <linux/percpu.h>
  14. #include <linux/kernel.h>
  15. #include <linux/sched.h>
  16. #include <linux/smp.h>
  17. #include <linux/debugfs.h>
  18. #include <linux/init.h>
  19. #include <asm/hw_breakpoint.h>
  20. #include <asm/processor.h>
  21. #include <asm/sstep.h>
  22. #include <asm/debug.h>
  23. #include <asm/debugfs.h>
  24. #include <asm/hvcall.h>
  25. #include <linux/uaccess.h>
  26. /*
  27. * Stores the breakpoints currently in use on each breakpoint address
  28. * register for every cpu
  29. */
  30. static DEFINE_PER_CPU(struct perf_event *, bp_per_reg);
  31. /*
  32. * Returns total number of data or instruction breakpoints available.
  33. */
  34. int hw_breakpoint_slots(int type)
  35. {
  36. if (type == TYPE_DATA)
  37. return HBP_NUM;
  38. return 0; /* no instruction breakpoints available */
  39. }
  40. /*
  41. * Install a perf counter breakpoint.
  42. *
  43. * We seek a free debug address register and use it for this
  44. * breakpoint.
  45. *
  46. * Atomic: we hold the counter->ctx->lock and we only handle variables
  47. * and registers local to this cpu.
  48. */
  49. int arch_install_hw_breakpoint(struct perf_event *bp)
  50. {
  51. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  52. struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
  53. *slot = bp;
  54. /*
  55. * Do not install DABR values if the instruction must be single-stepped.
  56. * If so, DABR will be populated in single_step_dabr_instruction().
  57. */
  58. if (current->thread.last_hit_ubp != bp)
  59. __set_breakpoint(info);
  60. return 0;
  61. }
  62. /*
  63. * Uninstall the breakpoint contained in the given counter.
  64. *
  65. * First we search the debug address register it uses and then we disable
  66. * it.
  67. *
  68. * Atomic: we hold the counter->ctx->lock and we only handle variables
  69. * and registers local to this cpu.
  70. */
  71. void arch_uninstall_hw_breakpoint(struct perf_event *bp)
  72. {
  73. struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
  74. if (*slot != bp) {
  75. WARN_ONCE(1, "Can't find the breakpoint");
  76. return;
  77. }
  78. *slot = NULL;
  79. hw_breakpoint_disable();
  80. }
  81. /*
  82. * Perform cleanup of arch-specific counters during unregistration
  83. * of the perf-event
  84. */
  85. void arch_unregister_hw_breakpoint(struct perf_event *bp)
  86. {
  87. /*
  88. * If the breakpoint is unregistered between a hw_breakpoint_handler()
  89. * and the single_step_dabr_instruction(), then cleanup the breakpoint
  90. * restoration variables to prevent dangling pointers.
  91. * FIXME, this should not be using bp->ctx at all! Sayeth peterz.
  92. */
  93. if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L))
  94. bp->ctx->task->thread.last_hit_ubp = NULL;
  95. }
  96. /*
  97. * Check for virtual address in kernel space.
  98. */
  99. int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
  100. {
  101. return is_kernel_addr(hw->address);
  102. }
  103. int arch_bp_generic_fields(int type, int *gen_bp_type)
  104. {
  105. *gen_bp_type = 0;
  106. if (type & HW_BRK_TYPE_READ)
  107. *gen_bp_type |= HW_BREAKPOINT_R;
  108. if (type & HW_BRK_TYPE_WRITE)
  109. *gen_bp_type |= HW_BREAKPOINT_W;
  110. if (*gen_bp_type == 0)
  111. return -EINVAL;
  112. return 0;
  113. }
  114. /*
  115. * Watchpoint match range is always doubleword(8 bytes) aligned on
  116. * powerpc. If the given range is crossing doubleword boundary, we
  117. * need to increase the length such that next doubleword also get
  118. * covered. Ex,
  119. *
  120. * address len = 6 bytes
  121. * |=========.
  122. * |------------v--|------v--------|
  123. * | | | | | | | | | | | | | | | | |
  124. * |---------------|---------------|
  125. * <---8 bytes--->
  126. *
  127. * In this case, we should configure hw as:
  128. * start_addr = address & ~HW_BREAKPOINT_ALIGN
  129. * len = 16 bytes
  130. *
  131. * @start_addr and @end_addr are inclusive.
  132. */
  133. static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
  134. {
  135. u16 max_len = DABR_MAX_LEN;
  136. u16 hw_len;
  137. unsigned long start_addr, end_addr;
  138. start_addr = hw->address & ~HW_BREAKPOINT_ALIGN;
  139. end_addr = (hw->address + hw->len - 1) | HW_BREAKPOINT_ALIGN;
  140. hw_len = end_addr - start_addr + 1;
  141. if (dawr_enabled()) {
  142. max_len = DAWR_MAX_LEN;
  143. /* DAWR region can't cross 512 bytes boundary */
  144. if ((start_addr >> 9) != (end_addr >> 9))
  145. return -EINVAL;
  146. } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
  147. /* 8xx can setup a range without limitation */
  148. max_len = U16_MAX;
  149. }
  150. if (hw_len > max_len)
  151. return -EINVAL;
  152. hw->hw_len = hw_len;
  153. return 0;
  154. }
  155. /*
  156. * Validate the arch-specific HW Breakpoint register settings
  157. */
  158. int hw_breakpoint_arch_parse(struct perf_event *bp,
  159. const struct perf_event_attr *attr,
  160. struct arch_hw_breakpoint *hw)
  161. {
  162. int ret = -EINVAL;
  163. if (!bp || !attr->bp_len)
  164. return ret;
  165. hw->type = HW_BRK_TYPE_TRANSLATE;
  166. if (attr->bp_type & HW_BREAKPOINT_R)
  167. hw->type |= HW_BRK_TYPE_READ;
  168. if (attr->bp_type & HW_BREAKPOINT_W)
  169. hw->type |= HW_BRK_TYPE_WRITE;
  170. if (hw->type == HW_BRK_TYPE_TRANSLATE)
  171. /* must set alteast read or write */
  172. return ret;
  173. if (!attr->exclude_user)
  174. hw->type |= HW_BRK_TYPE_USER;
  175. if (!attr->exclude_kernel)
  176. hw->type |= HW_BRK_TYPE_KERNEL;
  177. if (!attr->exclude_hv)
  178. hw->type |= HW_BRK_TYPE_HYP;
  179. hw->address = attr->bp_addr;
  180. hw->len = attr->bp_len;
  181. if (!ppc_breakpoint_available())
  182. return -ENODEV;
  183. return hw_breakpoint_validate_len(hw);
  184. }
  185. /*
  186. * Restores the breakpoint on the debug registers.
  187. * Invoke this function if it is known that the execution context is
  188. * about to change to cause loss of MSR_SE settings.
  189. */
  190. void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
  191. {
  192. struct arch_hw_breakpoint *info;
  193. if (likely(!tsk->thread.last_hit_ubp))
  194. return;
  195. info = counter_arch_bp(tsk->thread.last_hit_ubp);
  196. regs->msr &= ~MSR_SE;
  197. __set_breakpoint(info);
  198. tsk->thread.last_hit_ubp = NULL;
  199. }
  200. static bool dar_within_range(unsigned long dar, struct arch_hw_breakpoint *info)
  201. {
  202. return ((info->address <= dar) && (dar - info->address < info->len));
  203. }
  204. static bool
  205. dar_range_overlaps(unsigned long dar, int size, struct arch_hw_breakpoint *info)
  206. {
  207. return ((dar <= info->address + info->len - 1) &&
  208. (dar + size - 1 >= info->address));
  209. }
  210. /*
  211. * Handle debug exception notifications.
  212. */
  213. static bool stepping_handler(struct pt_regs *regs, struct perf_event *bp,
  214. struct arch_hw_breakpoint *info)
  215. {
  216. unsigned int instr = 0;
  217. int ret, type, size;
  218. struct instruction_op op;
  219. unsigned long addr = info->address;
  220. if (__get_user_inatomic(instr, (unsigned int *)regs->nip))
  221. goto fail;
  222. ret = analyse_instr(&op, regs, instr);
  223. type = GETTYPE(op.type);
  224. size = GETSIZE(op.type);
  225. if (!ret && (type == LARX || type == STCX)) {
  226. printk_ratelimited("Breakpoint hit on instruction that can't be emulated."
  227. " Breakpoint at 0x%lx will be disabled.\n", addr);
  228. goto disable;
  229. }
  230. /*
  231. * If it's extraneous event, we still need to emulate/single-
  232. * step the instruction, but we don't generate an event.
  233. */
  234. if (size && !dar_range_overlaps(regs->dar, size, info))
  235. info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
  236. /* Do not emulate user-space instructions, instead single-step them */
  237. if (user_mode(regs)) {
  238. current->thread.last_hit_ubp = bp;
  239. regs->msr |= MSR_SE;
  240. return false;
  241. }
  242. if (!emulate_step(regs, instr))
  243. goto fail;
  244. return true;
  245. fail:
  246. /*
  247. * We've failed in reliably handling the hw-breakpoint. Unregister
  248. * it and throw a warning message to let the user know about it.
  249. */
  250. WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
  251. "0x%lx will be disabled.", addr);
  252. disable:
  253. perf_event_disable_inatomic(bp);
  254. return false;
  255. }
  256. int hw_breakpoint_handler(struct die_args *args)
  257. {
  258. int rc = NOTIFY_STOP;
  259. struct perf_event *bp;
  260. struct pt_regs *regs = args->regs;
  261. struct arch_hw_breakpoint *info;
  262. /* Disable breakpoints during exception handling */
  263. hw_breakpoint_disable();
  264. /*
  265. * The counter may be concurrently released but that can only
  266. * occur from a call_rcu() path. We can then safely fetch
  267. * the breakpoint, use its callback, touch its counter
  268. * while we are in an rcu_read_lock() path.
  269. */
  270. rcu_read_lock();
  271. bp = __this_cpu_read(bp_per_reg);
  272. if (!bp) {
  273. rc = NOTIFY_DONE;
  274. goto out;
  275. }
  276. info = counter_arch_bp(bp);
  277. /*
  278. * Return early after invoking user-callback function without restoring
  279. * DABR if the breakpoint is from ptrace which always operates in
  280. * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
  281. * generated in do_dabr().
  282. */
  283. if (bp->overflow_handler == ptrace_triggered) {
  284. perf_bp_event(bp, regs);
  285. rc = NOTIFY_DONE;
  286. goto out;
  287. }
  288. info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
  289. if (IS_ENABLED(CONFIG_PPC_8xx)) {
  290. if (!dar_within_range(regs->dar, info))
  291. info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
  292. } else {
  293. if (!stepping_handler(regs, bp, info))
  294. goto out;
  295. }
  296. /*
  297. * As a policy, the callback is invoked in a 'trigger-after-execute'
  298. * fashion
  299. */
  300. if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
  301. perf_bp_event(bp, regs);
  302. __set_breakpoint(info);
  303. out:
  304. rcu_read_unlock();
  305. return rc;
  306. }
  307. NOKPROBE_SYMBOL(hw_breakpoint_handler);
  308. /*
  309. * Handle single-step exceptions following a DABR hit.
  310. */
  311. static int single_step_dabr_instruction(struct die_args *args)
  312. {
  313. struct pt_regs *regs = args->regs;
  314. struct perf_event *bp = NULL;
  315. struct arch_hw_breakpoint *info;
  316. bp = current->thread.last_hit_ubp;
  317. /*
  318. * Check if we are single-stepping as a result of a
  319. * previous HW Breakpoint exception
  320. */
  321. if (!bp)
  322. return NOTIFY_DONE;
  323. info = counter_arch_bp(bp);
  324. /*
  325. * We shall invoke the user-defined callback function in the single
  326. * stepping handler to confirm to 'trigger-after-execute' semantics
  327. */
  328. if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
  329. perf_bp_event(bp, regs);
  330. __set_breakpoint(info);
  331. current->thread.last_hit_ubp = NULL;
  332. /*
  333. * If the process was being single-stepped by ptrace, let the
  334. * other single-step actions occur (e.g. generate SIGTRAP).
  335. */
  336. if (test_thread_flag(TIF_SINGLESTEP))
  337. return NOTIFY_DONE;
  338. return NOTIFY_STOP;
  339. }
  340. NOKPROBE_SYMBOL(single_step_dabr_instruction);
  341. /*
  342. * Handle debug exception notifications.
  343. */
  344. int hw_breakpoint_exceptions_notify(
  345. struct notifier_block *unused, unsigned long val, void *data)
  346. {
  347. int ret = NOTIFY_DONE;
  348. switch (val) {
  349. case DIE_DABR_MATCH:
  350. ret = hw_breakpoint_handler(data);
  351. break;
  352. case DIE_SSTEP:
  353. ret = single_step_dabr_instruction(data);
  354. break;
  355. }
  356. return ret;
  357. }
  358. NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify);
  359. /*
  360. * Release the user breakpoints used by ptrace
  361. */
  362. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  363. {
  364. struct thread_struct *t = &tsk->thread;
  365. unregister_hw_breakpoint(t->ptrace_bps[0]);
  366. t->ptrace_bps[0] = NULL;
  367. }
  368. void hw_breakpoint_pmu_read(struct perf_event *bp)
  369. {
  370. /* TODO */
  371. }
  372. void ptrace_triggered(struct perf_event *bp,
  373. struct perf_sample_data *data, struct pt_regs *regs)
  374. {
  375. struct perf_event_attr attr;
  376. /*
  377. * Disable the breakpoint request here since ptrace has defined a
  378. * one-shot behaviour for breakpoint exceptions in PPC64.
  379. * The SIGTRAP signal is generated automatically for us in do_dabr().
  380. * We don't have to do anything about that here
  381. */
  382. attr = bp->attr;
  383. attr.disabled = true;
  384. modify_user_hw_breakpoint(bp, &attr);
  385. }