PageRenderTime 38ms CodeModel.GetById 20ms app.highlight 14ms RepoModel.GetById 1ms app.codeStats 0ms

/arch/powerpc/kernel/hw_breakpoint.c

http://github.com/mirrors/linux
C | 447 lines | 247 code | 62 blank | 138 comment | 54 complexity | f83cd346f71d9869f9b2c358266afe61 MD5 | raw file
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
  4 * using the CPU's debug registers. Derived from
  5 * "arch/x86/kernel/hw_breakpoint.c"
  6 *
  7 * Copyright 2010 IBM Corporation
  8 * Author: K.Prasad <prasad@linux.vnet.ibm.com>
  9 */
 10
 11#include <linux/hw_breakpoint.h>
 12#include <linux/notifier.h>
 13#include <linux/kprobes.h>
 14#include <linux/percpu.h>
 15#include <linux/kernel.h>
 16#include <linux/sched.h>
 17#include <linux/smp.h>
 18#include <linux/debugfs.h>
 19#include <linux/init.h>
 20
 21#include <asm/hw_breakpoint.h>
 22#include <asm/processor.h>
 23#include <asm/sstep.h>
 24#include <asm/debug.h>
 25#include <asm/debugfs.h>
 26#include <asm/hvcall.h>
 27#include <linux/uaccess.h>
 28
 29/*
 30 * Stores the breakpoints currently in use on each breakpoint address
 31 * register for every cpu
 32 */
 33static DEFINE_PER_CPU(struct perf_event *, bp_per_reg);
 34
 35/*
 36 * Returns total number of data or instruction breakpoints available.
 37 */
 38int hw_breakpoint_slots(int type)
 39{
 40	if (type == TYPE_DATA)
 41		return HBP_NUM;
 42	return 0;		/* no instruction breakpoints available */
 43}
 44
 45/*
 46 * Install a perf counter breakpoint.
 47 *
 48 * We seek a free debug address register and use it for this
 49 * breakpoint.
 50 *
 51 * Atomic: we hold the counter->ctx->lock and we only handle variables
 52 * and registers local to this cpu.
 53 */
 54int arch_install_hw_breakpoint(struct perf_event *bp)
 55{
 56	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 57	struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
 58
 59	*slot = bp;
 60
 61	/*
 62	 * Do not install DABR values if the instruction must be single-stepped.
 63	 * If so, DABR will be populated in single_step_dabr_instruction().
 64	 */
 65	if (current->thread.last_hit_ubp != bp)
 66		__set_breakpoint(info);
 67
 68	return 0;
 69}
 70
 71/*
 72 * Uninstall the breakpoint contained in the given counter.
 73 *
 74 * First we search the debug address register it uses and then we disable
 75 * it.
 76 *
 77 * Atomic: we hold the counter->ctx->lock and we only handle variables
 78 * and registers local to this cpu.
 79 */
 80void arch_uninstall_hw_breakpoint(struct perf_event *bp)
 81{
 82	struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
 83
 84	if (*slot != bp) {
 85		WARN_ONCE(1, "Can't find the breakpoint");
 86		return;
 87	}
 88
 89	*slot = NULL;
 90	hw_breakpoint_disable();
 91}
 92
 93/*
 94 * Perform cleanup of arch-specific counters during unregistration
 95 * of the perf-event
 96 */
 97void arch_unregister_hw_breakpoint(struct perf_event *bp)
 98{
 99	/*
100	 * If the breakpoint is unregistered between a hw_breakpoint_handler()
101	 * and the single_step_dabr_instruction(), then cleanup the breakpoint
102	 * restoration variables to prevent dangling pointers.
103	 * FIXME, this should not be using bp->ctx at all! Sayeth peterz.
104	 */
105	if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L))
106		bp->ctx->task->thread.last_hit_ubp = NULL;
107}
108
109/*
110 * Check for virtual address in kernel space.
111 */
112int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
113{
114	return is_kernel_addr(hw->address);
115}
116
117int arch_bp_generic_fields(int type, int *gen_bp_type)
118{
119	*gen_bp_type = 0;
120	if (type & HW_BRK_TYPE_READ)
121		*gen_bp_type |= HW_BREAKPOINT_R;
122	if (type & HW_BRK_TYPE_WRITE)
123		*gen_bp_type |= HW_BREAKPOINT_W;
124	if (*gen_bp_type == 0)
125		return -EINVAL;
126	return 0;
127}
128
129/*
130 * Watchpoint match range is always doubleword(8 bytes) aligned on
131 * powerpc. If the given range is crossing doubleword boundary, we
132 * need to increase the length such that next doubleword also get
133 * covered. Ex,
134 *
135 *          address   len = 6 bytes
136 *                |=========.
137 *   |------------v--|------v--------|
138 *   | | | | | | | | | | | | | | | | |
139 *   |---------------|---------------|
140 *    <---8 bytes--->
141 *
142 * In this case, we should configure hw as:
143 *   start_addr = address & ~HW_BREAKPOINT_ALIGN
144 *   len = 16 bytes
145 *
146 * @start_addr and @end_addr are inclusive.
147 */
148static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
149{
150	u16 max_len = DABR_MAX_LEN;
151	u16 hw_len;
152	unsigned long start_addr, end_addr;
153
154	start_addr = hw->address & ~HW_BREAKPOINT_ALIGN;
155	end_addr = (hw->address + hw->len - 1) | HW_BREAKPOINT_ALIGN;
156	hw_len = end_addr - start_addr + 1;
157
158	if (dawr_enabled()) {
159		max_len = DAWR_MAX_LEN;
160		/* DAWR region can't cross 512 bytes boundary */
161		if ((start_addr >> 9) != (end_addr >> 9))
162			return -EINVAL;
163	} else if (IS_ENABLED(CONFIG_PPC_8xx)) {
164		/* 8xx can setup a range without limitation */
165		max_len = U16_MAX;
166	}
167
168	if (hw_len > max_len)
169		return -EINVAL;
170
171	hw->hw_len = hw_len;
172	return 0;
173}
174
175/*
176 * Validate the arch-specific HW Breakpoint register settings
177 */
178int hw_breakpoint_arch_parse(struct perf_event *bp,
179			     const struct perf_event_attr *attr,
180			     struct arch_hw_breakpoint *hw)
181{
182	int ret = -EINVAL;
183
184	if (!bp || !attr->bp_len)
185		return ret;
186
187	hw->type = HW_BRK_TYPE_TRANSLATE;
188	if (attr->bp_type & HW_BREAKPOINT_R)
189		hw->type |= HW_BRK_TYPE_READ;
190	if (attr->bp_type & HW_BREAKPOINT_W)
191		hw->type |= HW_BRK_TYPE_WRITE;
192	if (hw->type == HW_BRK_TYPE_TRANSLATE)
193		/* must set alteast read or write */
194		return ret;
195	if (!attr->exclude_user)
196		hw->type |= HW_BRK_TYPE_USER;
197	if (!attr->exclude_kernel)
198		hw->type |= HW_BRK_TYPE_KERNEL;
199	if (!attr->exclude_hv)
200		hw->type |= HW_BRK_TYPE_HYP;
201	hw->address = attr->bp_addr;
202	hw->len = attr->bp_len;
203
204	if (!ppc_breakpoint_available())
205		return -ENODEV;
206
207	return hw_breakpoint_validate_len(hw);
208}
209
210/*
211 * Restores the breakpoint on the debug registers.
212 * Invoke this function if it is known that the execution context is
213 * about to change to cause loss of MSR_SE settings.
214 */
215void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
216{
217	struct arch_hw_breakpoint *info;
218
219	if (likely(!tsk->thread.last_hit_ubp))
220		return;
221
222	info = counter_arch_bp(tsk->thread.last_hit_ubp);
223	regs->msr &= ~MSR_SE;
224	__set_breakpoint(info);
225	tsk->thread.last_hit_ubp = NULL;
226}
227
228static bool dar_within_range(unsigned long dar, struct arch_hw_breakpoint *info)
229{
230	return ((info->address <= dar) && (dar - info->address < info->len));
231}
232
233static bool
234dar_range_overlaps(unsigned long dar, int size, struct arch_hw_breakpoint *info)
235{
236	return ((dar <= info->address + info->len - 1) &&
237		(dar + size - 1 >= info->address));
238}
239
240/*
241 * Handle debug exception notifications.
242 */
243static bool stepping_handler(struct pt_regs *regs, struct perf_event *bp,
244			     struct arch_hw_breakpoint *info)
245{
246	unsigned int instr = 0;
247	int ret, type, size;
248	struct instruction_op op;
249	unsigned long addr = info->address;
250
251	if (__get_user_inatomic(instr, (unsigned int *)regs->nip))
252		goto fail;
253
254	ret = analyse_instr(&op, regs, instr);
255	type = GETTYPE(op.type);
256	size = GETSIZE(op.type);
257
258	if (!ret && (type == LARX || type == STCX)) {
259		printk_ratelimited("Breakpoint hit on instruction that can't be emulated."
260				   " Breakpoint at 0x%lx will be disabled.\n", addr);
261		goto disable;
262	}
263
264	/*
265	 * If it's extraneous event, we still need to emulate/single-
266	 * step the instruction, but we don't generate an event.
267	 */
268	if (size && !dar_range_overlaps(regs->dar, size, info))
269		info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
270
271	/* Do not emulate user-space instructions, instead single-step them */
272	if (user_mode(regs)) {
273		current->thread.last_hit_ubp = bp;
274		regs->msr |= MSR_SE;
275		return false;
276	}
277
278	if (!emulate_step(regs, instr))
279		goto fail;
280
281	return true;
282
283fail:
284	/*
285	 * We've failed in reliably handling the hw-breakpoint. Unregister
286	 * it and throw a warning message to let the user know about it.
287	 */
288	WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
289		"0x%lx will be disabled.", addr);
290
291disable:
292	perf_event_disable_inatomic(bp);
293	return false;
294}
295
296int hw_breakpoint_handler(struct die_args *args)
297{
298	int rc = NOTIFY_STOP;
299	struct perf_event *bp;
300	struct pt_regs *regs = args->regs;
301	struct arch_hw_breakpoint *info;
302
303	/* Disable breakpoints during exception handling */
304	hw_breakpoint_disable();
305
306	/*
307	 * The counter may be concurrently released but that can only
308	 * occur from a call_rcu() path. We can then safely fetch
309	 * the breakpoint, use its callback, touch its counter
310	 * while we are in an rcu_read_lock() path.
311	 */
312	rcu_read_lock();
313
314	bp = __this_cpu_read(bp_per_reg);
315	if (!bp) {
316		rc = NOTIFY_DONE;
317		goto out;
318	}
319	info = counter_arch_bp(bp);
320
321	/*
322	 * Return early after invoking user-callback function without restoring
323	 * DABR if the breakpoint is from ptrace which always operates in
324	 * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
325	 * generated in do_dabr().
326	 */
327	if (bp->overflow_handler == ptrace_triggered) {
328		perf_bp_event(bp, regs);
329		rc = NOTIFY_DONE;
330		goto out;
331	}
332
333	info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
334	if (IS_ENABLED(CONFIG_PPC_8xx)) {
335		if (!dar_within_range(regs->dar, info))
336			info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
337	} else {
338		if (!stepping_handler(regs, bp, info))
339			goto out;
340	}
341
342	/*
343	 * As a policy, the callback is invoked in a 'trigger-after-execute'
344	 * fashion
345	 */
346	if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
347		perf_bp_event(bp, regs);
348
349	__set_breakpoint(info);
350out:
351	rcu_read_unlock();
352	return rc;
353}
354NOKPROBE_SYMBOL(hw_breakpoint_handler);
355
356/*
357 * Handle single-step exceptions following a DABR hit.
358 */
359static int single_step_dabr_instruction(struct die_args *args)
360{
361	struct pt_regs *regs = args->regs;
362	struct perf_event *bp = NULL;
363	struct arch_hw_breakpoint *info;
364
365	bp = current->thread.last_hit_ubp;
366	/*
367	 * Check if we are single-stepping as a result of a
368	 * previous HW Breakpoint exception
369	 */
370	if (!bp)
371		return NOTIFY_DONE;
372
373	info = counter_arch_bp(bp);
374
375	/*
376	 * We shall invoke the user-defined callback function in the single
377	 * stepping handler to confirm to 'trigger-after-execute' semantics
378	 */
379	if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
380		perf_bp_event(bp, regs);
381
382	__set_breakpoint(info);
383	current->thread.last_hit_ubp = NULL;
384
385	/*
386	 * If the process was being single-stepped by ptrace, let the
387	 * other single-step actions occur (e.g. generate SIGTRAP).
388	 */
389	if (test_thread_flag(TIF_SINGLESTEP))
390		return NOTIFY_DONE;
391
392	return NOTIFY_STOP;
393}
394NOKPROBE_SYMBOL(single_step_dabr_instruction);
395
396/*
397 * Handle debug exception notifications.
398 */
399int hw_breakpoint_exceptions_notify(
400		struct notifier_block *unused, unsigned long val, void *data)
401{
402	int ret = NOTIFY_DONE;
403
404	switch (val) {
405	case DIE_DABR_MATCH:
406		ret = hw_breakpoint_handler(data);
407		break;
408	case DIE_SSTEP:
409		ret = single_step_dabr_instruction(data);
410		break;
411	}
412
413	return ret;
414}
415NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify);
416
417/*
418 * Release the user breakpoints used by ptrace
419 */
420void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
421{
422	struct thread_struct *t = &tsk->thread;
423
424	unregister_hw_breakpoint(t->ptrace_bps[0]);
425	t->ptrace_bps[0] = NULL;
426}
427
428void hw_breakpoint_pmu_read(struct perf_event *bp)
429{
430	/* TODO */
431}
432
433void ptrace_triggered(struct perf_event *bp,
434		      struct perf_sample_data *data, struct pt_regs *regs)
435{
436	struct perf_event_attr attr;
437
438	/*
439	 * Disable the breakpoint request here since ptrace has defined a
440	 * one-shot behaviour for breakpoint exceptions in PPC64.
441	 * The SIGTRAP signal is generated automatically for us in do_dabr().
442	 * We don't have to do anything about that here
443	 */
444	attr = bp->attr;
445	attr.disabled = true;
446	modify_user_hw_breakpoint(bp, &attr);
447}