PageRenderTime 192ms CodeModel.GetById 34ms app.highlight 139ms RepoModel.GetById 1ms app.codeStats 1ms

/arch/powerpc/kernel/ptrace.c

http://github.com/mirrors/linux
C | 3378 lines | 2347 code | 403 blank | 628 comment | 428 complexity | ded8c5fcfef1f6a76c1bdf310e429be0 MD5 | raw file
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Derived from "arch/m68k/kernel/ptrace.c"
   6 *  Copyright (C) 1994 by Hamish Macdonald
   7 *  Taken from linux/kernel/ptrace.c and modified for M680x0.
   8 *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
   9 *
  10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
  11 * and Paul Mackerras (paulus@samba.org).
  12 *
  13 * This file is subject to the terms and conditions of the GNU General
  14 * Public License.  See the file README.legal in the main directory of
  15 * this archive for more details.
  16 */
  17
  18#include <linux/kernel.h>
  19#include <linux/sched.h>
  20#include <linux/mm.h>
  21#include <linux/smp.h>
  22#include <linux/errno.h>
  23#include <linux/ptrace.h>
  24#include <linux/regset.h>
  25#include <linux/tracehook.h>
  26#include <linux/elf.h>
  27#include <linux/user.h>
  28#include <linux/security.h>
  29#include <linux/signal.h>
  30#include <linux/seccomp.h>
  31#include <linux/audit.h>
  32#include <trace/syscall.h>
  33#include <linux/hw_breakpoint.h>
  34#include <linux/perf_event.h>
  35#include <linux/context_tracking.h>
  36
  37#include <asm/uaccess.h>
  38#include <asm/page.h>
  39#include <asm/pgtable.h>
  40#include <asm/switch_to.h>
  41
  42#define CREATE_TRACE_POINTS
  43#include <trace/events/syscalls.h>
  44
  45/*
  46 * The parameter save area on the stack is used to store arguments being passed
  47 * to callee function and is located at fixed offset from stack pointer.
  48 */
  49#ifdef CONFIG_PPC32
  50#define PARAMETER_SAVE_AREA_OFFSET	24  /* bytes */
  51#else /* CONFIG_PPC32 */
  52#define PARAMETER_SAVE_AREA_OFFSET	48  /* bytes */
  53#endif
  54
  55struct pt_regs_offset {
  56	const char *name;
  57	int offset;
  58};
  59
  60#define STR(s)	#s			/* convert to string */
  61#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
  62#define GPR_OFFSET_NAME(num)	\
  63	{.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
  64	{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
  65#define REG_OFFSET_END {.name = NULL, .offset = 0}
  66
  67#define TVSO(f)	(offsetof(struct thread_vr_state, f))
  68#define TFSO(f)	(offsetof(struct thread_fp_state, f))
  69#define TSO(f)	(offsetof(struct thread_struct, f))
  70
  71static const struct pt_regs_offset regoffset_table[] = {
  72	GPR_OFFSET_NAME(0),
  73	GPR_OFFSET_NAME(1),
  74	GPR_OFFSET_NAME(2),
  75	GPR_OFFSET_NAME(3),
  76	GPR_OFFSET_NAME(4),
  77	GPR_OFFSET_NAME(5),
  78	GPR_OFFSET_NAME(6),
  79	GPR_OFFSET_NAME(7),
  80	GPR_OFFSET_NAME(8),
  81	GPR_OFFSET_NAME(9),
  82	GPR_OFFSET_NAME(10),
  83	GPR_OFFSET_NAME(11),
  84	GPR_OFFSET_NAME(12),
  85	GPR_OFFSET_NAME(13),
  86	GPR_OFFSET_NAME(14),
  87	GPR_OFFSET_NAME(15),
  88	GPR_OFFSET_NAME(16),
  89	GPR_OFFSET_NAME(17),
  90	GPR_OFFSET_NAME(18),
  91	GPR_OFFSET_NAME(19),
  92	GPR_OFFSET_NAME(20),
  93	GPR_OFFSET_NAME(21),
  94	GPR_OFFSET_NAME(22),
  95	GPR_OFFSET_NAME(23),
  96	GPR_OFFSET_NAME(24),
  97	GPR_OFFSET_NAME(25),
  98	GPR_OFFSET_NAME(26),
  99	GPR_OFFSET_NAME(27),
 100	GPR_OFFSET_NAME(28),
 101	GPR_OFFSET_NAME(29),
 102	GPR_OFFSET_NAME(30),
 103	GPR_OFFSET_NAME(31),
 104	REG_OFFSET_NAME(nip),
 105	REG_OFFSET_NAME(msr),
 106	REG_OFFSET_NAME(ctr),
 107	REG_OFFSET_NAME(link),
 108	REG_OFFSET_NAME(xer),
 109	REG_OFFSET_NAME(ccr),
 110#ifdef CONFIG_PPC64
 111	REG_OFFSET_NAME(softe),
 112#else
 113	REG_OFFSET_NAME(mq),
 114#endif
 115	REG_OFFSET_NAME(trap),
 116	REG_OFFSET_NAME(dar),
 117	REG_OFFSET_NAME(dsisr),
 118	REG_OFFSET_END,
 119};
 120
 121/**
 122 * regs_query_register_offset() - query register offset from its name
 123 * @name:	the name of a register
 124 *
 125 * regs_query_register_offset() returns the offset of a register in struct
 126 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
 127 */
 128int regs_query_register_offset(const char *name)
 129{
 130	const struct pt_regs_offset *roff;
 131	for (roff = regoffset_table; roff->name != NULL; roff++)
 132		if (!strcmp(roff->name, name))
 133			return roff->offset;
 134	return -EINVAL;
 135}
 136
 137/**
 138 * regs_query_register_name() - query register name from its offset
 139 * @offset:	the offset of a register in struct pt_regs.
 140 *
 141 * regs_query_register_name() returns the name of a register from its
 142 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
 143 */
 144const char *regs_query_register_name(unsigned int offset)
 145{
 146	const struct pt_regs_offset *roff;
 147	for (roff = regoffset_table; roff->name != NULL; roff++)
 148		if (roff->offset == offset)
 149			return roff->name;
 150	return NULL;
 151}
 152
 153/*
 154 * does not yet catch signals sent when the child dies.
 155 * in exit.c or in signal.c.
 156 */
 157
 158/*
 159 * Set of msr bits that gdb can change on behalf of a process.
 160 */
 161#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 162#define MSR_DEBUGCHANGE	0
 163#else
 164#define MSR_DEBUGCHANGE	(MSR_SE | MSR_BE)
 165#endif
 166
 167/*
 168 * Max register writeable via put_reg
 169 */
 170#ifdef CONFIG_PPC32
 171#define PT_MAX_PUT_REG	PT_MQ
 172#else
 173#define PT_MAX_PUT_REG	PT_CCR
 174#endif
 175
 176static unsigned long get_user_msr(struct task_struct *task)
 177{
 178	return task->thread.regs->msr | task->thread.fpexc_mode;
 179}
 180
 181static int set_user_msr(struct task_struct *task, unsigned long msr)
 182{
 183	task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
 184	task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
 185	return 0;
 186}
 187
 188#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 189static unsigned long get_user_ckpt_msr(struct task_struct *task)
 190{
 191	return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
 192}
 193
 194static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
 195{
 196	task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
 197	task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
 198	return 0;
 199}
 200
 201static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
 202{
 203	task->thread.ckpt_regs.trap = trap & 0xfff0;
 204	return 0;
 205}
 206#endif
 207
 208#ifdef CONFIG_PPC64
 209static int get_user_dscr(struct task_struct *task, unsigned long *data)
 210{
 211	*data = task->thread.dscr;
 212	return 0;
 213}
 214
 215static int set_user_dscr(struct task_struct *task, unsigned long dscr)
 216{
 217	task->thread.dscr = dscr;
 218	task->thread.dscr_inherit = 1;
 219	return 0;
 220}
 221#else
 222static int get_user_dscr(struct task_struct *task, unsigned long *data)
 223{
 224	return -EIO;
 225}
 226
 227static int set_user_dscr(struct task_struct *task, unsigned long dscr)
 228{
 229	return -EIO;
 230}
 231#endif
 232
 233/*
 234 * We prevent mucking around with the reserved area of trap
 235 * which are used internally by the kernel.
 236 */
 237static int set_user_trap(struct task_struct *task, unsigned long trap)
 238{
 239	task->thread.regs->trap = trap & 0xfff0;
 240	return 0;
 241}
 242
 243/*
 244 * Get contents of register REGNO in task TASK.
 245 */
 246int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
 247{
 248	if ((task->thread.regs == NULL) || !data)
 249		return -EIO;
 250
 251	if (regno == PT_MSR) {
 252		*data = get_user_msr(task);
 253		return 0;
 254	}
 255
 256	if (regno == PT_DSCR)
 257		return get_user_dscr(task, data);
 258
 259	if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
 260		*data = ((unsigned long *)task->thread.regs)[regno];
 261		return 0;
 262	}
 263
 264	return -EIO;
 265}
 266
 267/*
 268 * Write contents of register REGNO in task TASK.
 269 */
 270int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
 271{
 272	if (task->thread.regs == NULL)
 273		return -EIO;
 274
 275	if (regno == PT_MSR)
 276		return set_user_msr(task, data);
 277	if (regno == PT_TRAP)
 278		return set_user_trap(task, data);
 279	if (regno == PT_DSCR)
 280		return set_user_dscr(task, data);
 281
 282	if (regno <= PT_MAX_PUT_REG) {
 283		((unsigned long *)task->thread.regs)[regno] = data;
 284		return 0;
 285	}
 286	return -EIO;
 287}
 288
 289static int gpr_get(struct task_struct *target, const struct user_regset *regset,
 290		   unsigned int pos, unsigned int count,
 291		   void *kbuf, void __user *ubuf)
 292{
 293	int i, ret;
 294
 295	if (target->thread.regs == NULL)
 296		return -EIO;
 297
 298	if (!FULL_REGS(target->thread.regs)) {
 299		/* We have a partial register set.  Fill 14-31 with bogus values */
 300		for (i = 14; i < 32; i++)
 301			target->thread.regs->gpr[i] = NV_REG_POISON;
 302	}
 303
 304	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 305				  target->thread.regs,
 306				  0, offsetof(struct pt_regs, msr));
 307	if (!ret) {
 308		unsigned long msr = get_user_msr(target);
 309		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
 310					  offsetof(struct pt_regs, msr),
 311					  offsetof(struct pt_regs, msr) +
 312					  sizeof(msr));
 313	}
 314
 315	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
 316		     offsetof(struct pt_regs, msr) + sizeof(long));
 317
 318	if (!ret)
 319		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 320					  &target->thread.regs->orig_gpr3,
 321					  offsetof(struct pt_regs, orig_gpr3),
 322					  sizeof(struct pt_regs));
 323	if (!ret)
 324		ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
 325					       sizeof(struct pt_regs), -1);
 326
 327	return ret;
 328}
 329
 330static int gpr_set(struct task_struct *target, const struct user_regset *regset,
 331		   unsigned int pos, unsigned int count,
 332		   const void *kbuf, const void __user *ubuf)
 333{
 334	unsigned long reg;
 335	int ret;
 336
 337	if (target->thread.regs == NULL)
 338		return -EIO;
 339
 340	CHECK_FULL_REGS(target->thread.regs);
 341
 342	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 343				 target->thread.regs,
 344				 0, PT_MSR * sizeof(reg));
 345
 346	if (!ret && count > 0) {
 347		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
 348					 PT_MSR * sizeof(reg),
 349					 (PT_MSR + 1) * sizeof(reg));
 350		if (!ret)
 351			ret = set_user_msr(target, reg);
 352	}
 353
 354	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
 355		     offsetof(struct pt_regs, msr) + sizeof(long));
 356
 357	if (!ret)
 358		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 359					 &target->thread.regs->orig_gpr3,
 360					 PT_ORIG_R3 * sizeof(reg),
 361					 (PT_MAX_PUT_REG + 1) * sizeof(reg));
 362
 363	if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
 364		ret = user_regset_copyin_ignore(
 365			&pos, &count, &kbuf, &ubuf,
 366			(PT_MAX_PUT_REG + 1) * sizeof(reg),
 367			PT_TRAP * sizeof(reg));
 368
 369	if (!ret && count > 0) {
 370		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
 371					 PT_TRAP * sizeof(reg),
 372					 (PT_TRAP + 1) * sizeof(reg));
 373		if (!ret)
 374			ret = set_user_trap(target, reg);
 375	}
 376
 377	if (!ret)
 378		ret = user_regset_copyin_ignore(
 379			&pos, &count, &kbuf, &ubuf,
 380			(PT_TRAP + 1) * sizeof(reg), -1);
 381
 382	return ret;
 383}
 384
 385/*
 386 * When the transaction is active, 'transact_fp' holds the current running
 387 * value of all FPR registers and 'fp_state' holds the last checkpointed
 388 * value of all FPR registers for the current transaction. When transaction
 389 * is not active 'fp_state' holds the current running state of all the FPR
 390 * registers. So this function which returns the current running values of
 391 * all the FPR registers, needs to know whether any transaction is active
 392 * or not.
 393 *
 394 * Userspace interface buffer layout:
 395 *
 396 * struct data {
 397 *	u64	fpr[32];
 398 *	u64	fpscr;
 399 * };
 400 *
 401 * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
 402 * which determines the final code in this function. All the combinations of
 403 * these two config options are possible except the one below as transactional
 404 * memory config pulls in CONFIG_VSX automatically.
 405 *
 406 *	!defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
 407 */
 408static int fpr_get(struct task_struct *target, const struct user_regset *regset,
 409		   unsigned int pos, unsigned int count,
 410		   void *kbuf, void __user *ubuf)
 411{
 412#ifdef CONFIG_VSX
 413	u64 buf[33];
 414	int i;
 415#endif
 416	flush_fp_to_thread(target);
 417
 418#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
 419	/* copy to local buffer then write that out */
 420	if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
 421		flush_altivec_to_thread(target);
 422		flush_tmregs_to_thread(target);
 423		for (i = 0; i < 32 ; i++)
 424			buf[i] = target->thread.TS_TRANS_FPR(i);
 425		buf[32] = target->thread.transact_fp.fpscr;
 426	} else {
 427		for (i = 0; i < 32 ; i++)
 428			buf[i] = target->thread.TS_FPR(i);
 429		buf[32] = target->thread.fp_state.fpscr;
 430	}
 431	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
 432#endif
 433
 434#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
 435	/* copy to local buffer then write that out */
 436	for (i = 0; i < 32 ; i++)
 437		buf[i] = target->thread.TS_FPR(i);
 438	buf[32] = target->thread.fp_state.fpscr;
 439	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
 440#endif
 441
 442#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
 443	BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
 444		     offsetof(struct thread_fp_state, fpr[32]));
 445
 446	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 447				   &target->thread.fp_state, 0, -1);
 448#endif
 449}
 450
 451/*
 452 * When the transaction is active, 'transact_fp' holds the current running
 453 * value of all FPR registers and 'fp_state' holds the last checkpointed
 454 * value of all FPR registers for the current transaction. When transaction
 455 * is not active 'fp_state' holds the current running state of all the FPR
 456 * registers. So this function which setss the current running values of
 457 * all the FPR registers, needs to know whether any transaction is active
 458 * or not.
 459 *
 460 * Userspace interface buffer layout:
 461 *
 462 * struct data {
 463 *	u64	fpr[32];
 464 *	u64	fpscr;
 465 * };
 466 *
 467 * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
 468 * which determines the final code in this function. All the combinations of
 469 * these two config options are possible except the one below as transactional
 470 * memory config pulls in CONFIG_VSX automatically.
 471 *
 472 *	!defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
 473 */
 474static int fpr_set(struct task_struct *target, const struct user_regset *regset,
 475		   unsigned int pos, unsigned int count,
 476		   const void *kbuf, const void __user *ubuf)
 477{
 478#ifdef CONFIG_VSX
 479	u64 buf[33];
 480	int i;
 481#endif
 482	flush_fp_to_thread(target);
 483
 484#if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
 485	/* copy to local buffer then write that out */
 486	i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
 487	if (i)
 488		return i;
 489
 490	if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
 491		flush_altivec_to_thread(target);
 492		flush_tmregs_to_thread(target);
 493		for (i = 0; i < 32 ; i++)
 494			target->thread.TS_TRANS_FPR(i) = buf[i];
 495		target->thread.transact_fp.fpscr = buf[32];
 496	} else {
 497		for (i = 0; i < 32 ; i++)
 498			target->thread.TS_FPR(i) = buf[i];
 499		target->thread.fp_state.fpscr = buf[32];
 500	}
 501	return 0;
 502#endif
 503
 504#if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
 505	/* copy to local buffer then write that out */
 506	i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
 507	if (i)
 508		return i;
 509	for (i = 0; i < 32 ; i++)
 510		target->thread.TS_FPR(i) = buf[i];
 511	target->thread.fp_state.fpscr = buf[32];
 512	return 0;
 513#endif
 514
 515#if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
 516	BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
 517		     offsetof(struct thread_fp_state, fpr[32]));
 518
 519	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 520				  &target->thread.fp_state, 0, -1);
 521#endif
 522}
 523
 524#ifdef CONFIG_ALTIVEC
 525/*
 526 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
 527 * The transfer totals 34 quadword.  Quadwords 0-31 contain the
 528 * corresponding vector registers.  Quadword 32 contains the vscr as the
 529 * last word (offset 12) within that quadword.  Quadword 33 contains the
 530 * vrsave as the first word (offset 0) within the quadword.
 531 *
 532 * This definition of the VMX state is compatible with the current PPC32
 533 * ptrace interface.  This allows signal handling and ptrace to use the
 534 * same structures.  This also simplifies the implementation of a bi-arch
 535 * (combined (32- and 64-bit) gdb.
 536 */
 537
 538static int vr_active(struct task_struct *target,
 539		     const struct user_regset *regset)
 540{
 541	flush_altivec_to_thread(target);
 542	return target->thread.used_vr ? regset->n : 0;
 543}
 544
 545/*
 546 * When the transaction is active, 'transact_vr' holds the current running
 547 * value of all the VMX registers and 'vr_state' holds the last checkpointed
 548 * value of all the VMX registers for the current transaction to fall back
 549 * on in case it aborts. When transaction is not active 'vr_state' holds
 550 * the current running state of all the VMX registers. So this function which
 551 * gets the current running values of all the VMX registers, needs to know
 552 * whether any transaction is active or not.
 553 *
 554 * Userspace interface buffer layout:
 555 *
 556 * struct data {
 557 *	vector128	vr[32];
 558 *	vector128	vscr;
 559 *	vector128	vrsave;
 560 * };
 561 */
 562static int vr_get(struct task_struct *target, const struct user_regset *regset,
 563		  unsigned int pos, unsigned int count,
 564		  void *kbuf, void __user *ubuf)
 565{
 566	struct thread_vr_state *addr;
 567	int ret;
 568
 569	flush_altivec_to_thread(target);
 570
 571	BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
 572		     offsetof(struct thread_vr_state, vr[32]));
 573
 574#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 575	if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
 576		flush_fp_to_thread(target);
 577		flush_tmregs_to_thread(target);
 578		addr = &target->thread.transact_vr;
 579	} else {
 580		addr = &target->thread.vr_state;
 581	}
 582#else
 583	addr = &target->thread.vr_state;
 584#endif
 585	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 586				  addr, 0,
 587				  33 * sizeof(vector128));
 588	if (!ret) {
 589		/*
 590		 * Copy out only the low-order word of vrsave.
 591		 */
 592		union {
 593			elf_vrreg_t reg;
 594			u32 word;
 595		} vrsave;
 596		memset(&vrsave, 0, sizeof(vrsave));
 597
 598#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 599		if (MSR_TM_ACTIVE(target->thread.regs->msr))
 600			vrsave.word = target->thread.transact_vrsave;
 601		else
 602			vrsave.word = target->thread.vrsave;
 603#else
 604		vrsave.word = target->thread.vrsave;
 605#endif
 606
 607		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
 608					  33 * sizeof(vector128), -1);
 609	}
 610
 611	return ret;
 612}
 613
 614/*
 615 * When the transaction is active, 'transact_vr' holds the current running
 616 * value of all the VMX registers and 'vr_state' holds the last checkpointed
 617 * value of all the VMX registers for the current transaction to fall back
 618 * on in case it aborts. When transaction is not active 'vr_state' holds
 619 * the current running state of all the VMX registers. So this function which
 620 * sets the current running values of all the VMX registers, needs to know
 621 * whether any transaction is active or not.
 622 *
 623 * Userspace interface buffer layout:
 624 *
 625 * struct data {
 626 *	vector128	vr[32];
 627 *	vector128	vscr;
 628 *	vector128	vrsave;
 629 * };
 630 */
 631static int vr_set(struct task_struct *target, const struct user_regset *regset,
 632		  unsigned int pos, unsigned int count,
 633		  const void *kbuf, const void __user *ubuf)
 634{
 635	struct thread_vr_state *addr;
 636	int ret;
 637
 638	flush_altivec_to_thread(target);
 639
 640	BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
 641		     offsetof(struct thread_vr_state, vr[32]));
 642
 643#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 644	if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
 645		flush_fp_to_thread(target);
 646		flush_tmregs_to_thread(target);
 647		addr = &target->thread.transact_vr;
 648	} else {
 649		addr = &target->thread.vr_state;
 650	}
 651#else
 652	addr = &target->thread.vr_state;
 653#endif
 654	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 655				 addr, 0,
 656				 33 * sizeof(vector128));
 657	if (!ret && count > 0) {
 658		/*
 659		 * We use only the first word of vrsave.
 660		 */
 661		union {
 662			elf_vrreg_t reg;
 663			u32 word;
 664		} vrsave;
 665		memset(&vrsave, 0, sizeof(vrsave));
 666
 667#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 668		if (MSR_TM_ACTIVE(target->thread.regs->msr))
 669			vrsave.word = target->thread.transact_vrsave;
 670		else
 671			vrsave.word = target->thread.vrsave;
 672#else
 673		vrsave.word = target->thread.vrsave;
 674#endif
 675		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
 676					 33 * sizeof(vector128), -1);
 677		if (!ret) {
 678
 679#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 680			if (MSR_TM_ACTIVE(target->thread.regs->msr))
 681				target->thread.transact_vrsave = vrsave.word;
 682			else
 683				target->thread.vrsave = vrsave.word;
 684#else
 685			target->thread.vrsave = vrsave.word;
 686#endif
 687		}
 688	}
 689
 690	return ret;
 691}
 692#endif /* CONFIG_ALTIVEC */
 693
 694#ifdef CONFIG_VSX
 695/*
 696 * Currently to set and and get all the vsx state, you need to call
 697 * the fp and VMX calls as well.  This only get/sets the lower 32
 698 * 128bit VSX registers.
 699 */
 700
 701static int vsr_active(struct task_struct *target,
 702		      const struct user_regset *regset)
 703{
 704	flush_vsx_to_thread(target);
 705	return target->thread.used_vsr ? regset->n : 0;
 706}
 707
 708/*
 709 * When the transaction is active, 'transact_fp' holds the current running
 710 * value of all FPR registers and 'fp_state' holds the last checkpointed
 711 * value of all FPR registers for the current transaction. When transaction
 712 * is not active 'fp_state' holds the current running state of all the FPR
 713 * registers. So this function which returns the current running values of
 714 * all the FPR registers, needs to know whether any transaction is active
 715 * or not.
 716 *
 717 * Userspace interface buffer layout:
 718 *
 719 * struct data {
 720 *	u64	vsx[32];
 721 * };
 722 */
 723static int vsr_get(struct task_struct *target, const struct user_regset *regset,
 724		   unsigned int pos, unsigned int count,
 725		   void *kbuf, void __user *ubuf)
 726{
 727	u64 buf[32];
 728	int ret, i;
 729
 730#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 731	flush_fp_to_thread(target);
 732	flush_altivec_to_thread(target);
 733	flush_tmregs_to_thread(target);
 734#endif
 735	flush_vsx_to_thread(target);
 736
 737#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 738	if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
 739		for (i = 0; i < 32 ; i++)
 740			buf[i] = target->thread.
 741				transact_fp.fpr[i][TS_VSRLOWOFFSET];
 742	} else {
 743		for (i = 0; i < 32 ; i++)
 744			buf[i] = target->thread.
 745				fp_state.fpr[i][TS_VSRLOWOFFSET];
 746	}
 747#else
 748	for (i = 0; i < 32 ; i++)
 749		buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
 750#endif
 751	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 752				  buf, 0, 32 * sizeof(double));
 753
 754	return ret;
 755}
 756
 757/*
 758 * When the transaction is active, 'transact_fp' holds the current running
 759 * value of all FPR registers and 'fp_state' holds the last checkpointed
 760 * value of all FPR registers for the current transaction. When transaction
 761 * is not active 'fp_state' holds the current running state of all the FPR
 762 * registers. So this function which sets the current running values of all
 763 * the FPR registers, needs to know whether any transaction is active or not.
 764 *
 765 * Userspace interface buffer layout:
 766 *
 767 * struct data {
 768 *	u64	vsx[32];
 769 * };
 770 */
 771static int vsr_set(struct task_struct *target, const struct user_regset *regset,
 772		   unsigned int pos, unsigned int count,
 773		   const void *kbuf, const void __user *ubuf)
 774{
 775	u64 buf[32];
 776	int ret,i;
 777
 778#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 779	flush_fp_to_thread(target);
 780	flush_altivec_to_thread(target);
 781	flush_tmregs_to_thread(target);
 782#endif
 783	flush_vsx_to_thread(target);
 784
 785	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 786				 buf, 0, 32 * sizeof(double));
 787
 788#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 789	if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
 790		for (i = 0; i < 32 ; i++)
 791			target->thread.transact_fp.
 792				fpr[i][TS_VSRLOWOFFSET] = buf[i];
 793	} else {
 794		for (i = 0; i < 32 ; i++)
 795			target->thread.fp_state.
 796				fpr[i][TS_VSRLOWOFFSET] = buf[i];
 797	}
 798#else
 799	for (i = 0; i < 32 ; i++)
 800		target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
 801#endif
 802
 803
 804	return ret;
 805}
 806#endif /* CONFIG_VSX */
 807
 808#ifdef CONFIG_SPE
 809
 810/*
 811 * For get_evrregs/set_evrregs functions 'data' has the following layout:
 812 *
 813 * struct {
 814 *   u32 evr[32];
 815 *   u64 acc;
 816 *   u32 spefscr;
 817 * }
 818 */
 819
 820static int evr_active(struct task_struct *target,
 821		      const struct user_regset *regset)
 822{
 823	flush_spe_to_thread(target);
 824	return target->thread.used_spe ? regset->n : 0;
 825}
 826
 827static int evr_get(struct task_struct *target, const struct user_regset *regset,
 828		   unsigned int pos, unsigned int count,
 829		   void *kbuf, void __user *ubuf)
 830{
 831	int ret;
 832
 833	flush_spe_to_thread(target);
 834
 835	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 836				  &target->thread.evr,
 837				  0, sizeof(target->thread.evr));
 838
 839	BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
 840		     offsetof(struct thread_struct, spefscr));
 841
 842	if (!ret)
 843		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 844					  &target->thread.acc,
 845					  sizeof(target->thread.evr), -1);
 846
 847	return ret;
 848}
 849
 850static int evr_set(struct task_struct *target, const struct user_regset *regset,
 851		   unsigned int pos, unsigned int count,
 852		   const void *kbuf, const void __user *ubuf)
 853{
 854	int ret;
 855
 856	flush_spe_to_thread(target);
 857
 858	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 859				 &target->thread.evr,
 860				 0, sizeof(target->thread.evr));
 861
 862	BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
 863		     offsetof(struct thread_struct, spefscr));
 864
 865	if (!ret)
 866		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 867					 &target->thread.acc,
 868					 sizeof(target->thread.evr), -1);
 869
 870	return ret;
 871}
 872#endif /* CONFIG_SPE */
 873
 874#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 875/**
 876 * tm_cgpr_active - get active number of registers in CGPR
 877 * @target:	The target task.
 878 * @regset:	The user regset structure.
 879 *
 880 * This function checks for the active number of available
 881 * regisers in transaction checkpointed GPR category.
 882 */
 883static int tm_cgpr_active(struct task_struct *target,
 884			  const struct user_regset *regset)
 885{
 886	if (!cpu_has_feature(CPU_FTR_TM))
 887		return -ENODEV;
 888
 889	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
 890		return 0;
 891
 892	return regset->n;
 893}
 894
 895/**
 896 * tm_cgpr_get - get CGPR registers
 897 * @target:	The target task.
 898 * @regset:	The user regset structure.
 899 * @pos:	The buffer position.
 900 * @count:	Number of bytes to copy.
 901 * @kbuf:	Kernel buffer to copy from.
 902 * @ubuf:	User buffer to copy into.
 903 *
 904 * This function gets transaction checkpointed GPR registers.
 905 *
 906 * When the transaction is active, 'ckpt_regs' holds all the checkpointed
 907 * GPR register values for the current transaction to fall back on if it
 908 * aborts in between. This function gets those checkpointed GPR registers.
 909 * The userspace interface buffer layout is as follows.
 910 *
 911 * struct data {
 912 *	struct pt_regs ckpt_regs;
 913 * };
 914 */
 915static int tm_cgpr_get(struct task_struct *target,
 916			const struct user_regset *regset,
 917			unsigned int pos, unsigned int count,
 918			void *kbuf, void __user *ubuf)
 919{
 920	int ret;
 921
 922	if (!cpu_has_feature(CPU_FTR_TM))
 923		return -ENODEV;
 924
 925	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
 926		return -ENODATA;
 927
 928	flush_fp_to_thread(target);
 929	flush_altivec_to_thread(target);
 930	flush_tmregs_to_thread(target);
 931
 932	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 933				  &target->thread.ckpt_regs,
 934				  0, offsetof(struct pt_regs, msr));
 935	if (!ret) {
 936		unsigned long msr = get_user_ckpt_msr(target);
 937
 938		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
 939					  offsetof(struct pt_regs, msr),
 940					  offsetof(struct pt_regs, msr) +
 941					  sizeof(msr));
 942	}
 943
 944	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
 945		     offsetof(struct pt_regs, msr) + sizeof(long));
 946
 947	if (!ret)
 948		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 949					  &target->thread.ckpt_regs.orig_gpr3,
 950					  offsetof(struct pt_regs, orig_gpr3),
 951					  sizeof(struct pt_regs));
 952	if (!ret)
 953		ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
 954					       sizeof(struct pt_regs), -1);
 955
 956	return ret;
 957}
 958
 959/*
 960 * tm_cgpr_set - set the CGPR registers
 961 * @target:	The target task.
 962 * @regset:	The user regset structure.
 963 * @pos:	The buffer position.
 964 * @count:	Number of bytes to copy.
 965 * @kbuf:	Kernel buffer to copy into.
 966 * @ubuf:	User buffer to copy from.
 967 *
 968 * This function sets in transaction checkpointed GPR registers.
 969 *
 970 * When the transaction is active, 'ckpt_regs' holds the checkpointed
 971 * GPR register values for the current transaction to fall back on if it
 972 * aborts in between. This function sets those checkpointed GPR registers.
 973 * The userspace interface buffer layout is as follows.
 974 *
 975 * struct data {
 976 *	struct pt_regs ckpt_regs;
 977 * };
 978 */
 979static int tm_cgpr_set(struct task_struct *target,
 980			const struct user_regset *regset,
 981			unsigned int pos, unsigned int count,
 982			const void *kbuf, const void __user *ubuf)
 983{
 984	unsigned long reg;
 985	int ret;
 986
 987	if (!cpu_has_feature(CPU_FTR_TM))
 988		return -ENODEV;
 989
 990	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
 991		return -ENODATA;
 992
 993	flush_fp_to_thread(target);
 994	flush_altivec_to_thread(target);
 995	flush_tmregs_to_thread(target);
 996
 997	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 998				 &target->thread.ckpt_regs,
 999				 0, PT_MSR * sizeof(reg));
1000
1001	if (!ret && count > 0) {
1002		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
1003					 PT_MSR * sizeof(reg),
1004					 (PT_MSR + 1) * sizeof(reg));
1005		if (!ret)
1006			ret = set_user_ckpt_msr(target, reg);
1007	}
1008
1009	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
1010		     offsetof(struct pt_regs, msr) + sizeof(long));
1011
1012	if (!ret)
1013		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1014					 &target->thread.ckpt_regs.orig_gpr3,
1015					 PT_ORIG_R3 * sizeof(reg),
1016					 (PT_MAX_PUT_REG + 1) * sizeof(reg));
1017
1018	if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
1019		ret = user_regset_copyin_ignore(
1020			&pos, &count, &kbuf, &ubuf,
1021			(PT_MAX_PUT_REG + 1) * sizeof(reg),
1022			PT_TRAP * sizeof(reg));
1023
1024	if (!ret && count > 0) {
1025		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
1026					 PT_TRAP * sizeof(reg),
1027					 (PT_TRAP + 1) * sizeof(reg));
1028		if (!ret)
1029			ret = set_user_ckpt_trap(target, reg);
1030	}
1031
1032	if (!ret)
1033		ret = user_regset_copyin_ignore(
1034			&pos, &count, &kbuf, &ubuf,
1035			(PT_TRAP + 1) * sizeof(reg), -1);
1036
1037	return ret;
1038}
1039
1040/**
1041 * tm_cfpr_active - get active number of registers in CFPR
1042 * @target:	The target task.
1043 * @regset:	The user regset structure.
1044 *
1045 * This function checks for the active number of available
1046 * regisers in transaction checkpointed FPR category.
1047 */
1048static int tm_cfpr_active(struct task_struct *target,
1049				const struct user_regset *regset)
1050{
1051	if (!cpu_has_feature(CPU_FTR_TM))
1052		return -ENODEV;
1053
1054	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1055		return 0;
1056
1057	return regset->n;
1058}
1059
1060/**
1061 * tm_cfpr_get - get CFPR registers
1062 * @target:	The target task.
1063 * @regset:	The user regset structure.
1064 * @pos:	The buffer position.
1065 * @count:	Number of bytes to copy.
1066 * @kbuf:	Kernel buffer to copy from.
1067 * @ubuf:	User buffer to copy into.
1068 *
1069 * This function gets in transaction checkpointed FPR registers.
1070 *
1071 * When the transaction is active 'fp_state' holds the checkpointed
1072 * values for the current transaction to fall back on if it aborts
1073 * in between. This function gets those checkpointed FPR registers.
1074 * The userspace interface buffer layout is as follows.
1075 *
1076 * struct data {
1077 *	u64	fpr[32];
1078 *	u64	fpscr;
1079 *};
1080 */
1081static int tm_cfpr_get(struct task_struct *target,
1082			const struct user_regset *regset,
1083			unsigned int pos, unsigned int count,
1084			void *kbuf, void __user *ubuf)
1085{
1086	u64 buf[33];
1087	int i;
1088
1089	if (!cpu_has_feature(CPU_FTR_TM))
1090		return -ENODEV;
1091
1092	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1093		return -ENODATA;
1094
1095	flush_fp_to_thread(target);
1096	flush_altivec_to_thread(target);
1097	flush_tmregs_to_thread(target);
1098
1099	/* copy to local buffer then write that out */
1100	for (i = 0; i < 32 ; i++)
1101		buf[i] = target->thread.TS_FPR(i);
1102	buf[32] = target->thread.fp_state.fpscr;
1103	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1104}
1105
1106/**
1107 * tm_cfpr_set - set CFPR registers
1108 * @target:	The target task.
1109 * @regset:	The user regset structure.
1110 * @pos:	The buffer position.
1111 * @count:	Number of bytes to copy.
1112 * @kbuf:	Kernel buffer to copy into.
1113 * @ubuf:	User buffer to copy from.
1114 *
1115 * This function sets in transaction checkpointed FPR registers.
1116 *
1117 * When the transaction is active 'fp_state' holds the checkpointed
1118 * FPR register values for the current transaction to fall back on
1119 * if it aborts in between. This function sets these checkpointed
1120 * FPR registers. The userspace interface buffer layout is as follows.
1121 *
1122 * struct data {
1123 *	u64	fpr[32];
1124 *	u64	fpscr;
1125 *};
1126 */
1127static int tm_cfpr_set(struct task_struct *target,
1128			const struct user_regset *regset,
1129			unsigned int pos, unsigned int count,
1130			const void *kbuf, const void __user *ubuf)
1131{
1132	u64 buf[33];
1133	int i;
1134
1135	if (!cpu_has_feature(CPU_FTR_TM))
1136		return -ENODEV;
1137
1138	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1139		return -ENODATA;
1140
1141	flush_fp_to_thread(target);
1142	flush_altivec_to_thread(target);
1143	flush_tmregs_to_thread(target);
1144
1145	/* copy to local buffer then write that out */
1146	i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1147	if (i)
1148		return i;
1149	for (i = 0; i < 32 ; i++)
1150		target->thread.TS_FPR(i) = buf[i];
1151	target->thread.fp_state.fpscr = buf[32];
1152	return 0;
1153}
1154
1155/**
1156 * tm_cvmx_active - get active number of registers in CVMX
1157 * @target:	The target task.
1158 * @regset:	The user regset structure.
1159 *
1160 * This function checks for the active number of available
1161 * regisers in checkpointed VMX category.
1162 */
1163static int tm_cvmx_active(struct task_struct *target,
1164				const struct user_regset *regset)
1165{
1166	if (!cpu_has_feature(CPU_FTR_TM))
1167		return -ENODEV;
1168
1169	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1170		return 0;
1171
1172	return regset->n;
1173}
1174
1175/**
1176 * tm_cvmx_get - get CMVX registers
1177 * @target:	The target task.
1178 * @regset:	The user regset structure.
1179 * @pos:	The buffer position.
1180 * @count:	Number of bytes to copy.
1181 * @kbuf:	Kernel buffer to copy from.
1182 * @ubuf:	User buffer to copy into.
1183 *
1184 * This function gets in transaction checkpointed VMX registers.
1185 *
1186 * When the transaction is active 'vr_state' and 'vr_save' hold
1187 * the checkpointed values for the current transaction to fall
1188 * back on if it aborts in between. The userspace interface buffer
1189 * layout is as follows.
1190 *
1191 * struct data {
1192 *	vector128	vr[32];
1193 *	vector128	vscr;
1194 *	vector128	vrsave;
1195 *};
1196 */
1197static int tm_cvmx_get(struct task_struct *target,
1198			const struct user_regset *regset,
1199			unsigned int pos, unsigned int count,
1200			void *kbuf, void __user *ubuf)
1201{
1202	int ret;
1203
1204	BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1205
1206	if (!cpu_has_feature(CPU_FTR_TM))
1207		return -ENODEV;
1208
1209	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1210		return -ENODATA;
1211
1212	/* Flush the state */
1213	flush_fp_to_thread(target);
1214	flush_altivec_to_thread(target);
1215	flush_tmregs_to_thread(target);
1216
1217	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1218					&target->thread.vr_state, 0,
1219					33 * sizeof(vector128));
1220	if (!ret) {
1221		/*
1222		 * Copy out only the low-order word of vrsave.
1223		 */
1224		union {
1225			elf_vrreg_t reg;
1226			u32 word;
1227		} vrsave;
1228		memset(&vrsave, 0, sizeof(vrsave));
1229		vrsave.word = target->thread.vrsave;
1230		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1231						33 * sizeof(vector128), -1);
1232	}
1233
1234	return ret;
1235}
1236
1237/**
1238 * tm_cvmx_set - set CMVX registers
1239 * @target:	The target task.
1240 * @regset:	The user regset structure.
1241 * @pos:	The buffer position.
1242 * @count:	Number of bytes to copy.
1243 * @kbuf:	Kernel buffer to copy into.
1244 * @ubuf:	User buffer to copy from.
1245 *
1246 * This function sets in transaction checkpointed VMX registers.
1247 *
1248 * When the transaction is active 'vr_state' and 'vr_save' hold
1249 * the checkpointed values for the current transaction to fall
1250 * back on if it aborts in between. The userspace interface buffer
1251 * layout is as follows.
1252 *
1253 * struct data {
1254 *	vector128	vr[32];
1255 *	vector128	vscr;
1256 *	vector128	vrsave;
1257 *};
1258 */
1259static int tm_cvmx_set(struct task_struct *target,
1260			const struct user_regset *regset,
1261			unsigned int pos, unsigned int count,
1262			const void *kbuf, const void __user *ubuf)
1263{
1264	int ret;
1265
1266	BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1267
1268	if (!cpu_has_feature(CPU_FTR_TM))
1269		return -ENODEV;
1270
1271	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1272		return -ENODATA;
1273
1274	flush_fp_to_thread(target);
1275	flush_altivec_to_thread(target);
1276	flush_tmregs_to_thread(target);
1277
1278	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1279					&target->thread.vr_state, 0,
1280					33 * sizeof(vector128));
1281	if (!ret && count > 0) {
1282		/*
1283		 * We use only the low-order word of vrsave.
1284		 */
1285		union {
1286			elf_vrreg_t reg;
1287			u32 word;
1288		} vrsave;
1289		memset(&vrsave, 0, sizeof(vrsave));
1290		vrsave.word = target->thread.vrsave;
1291		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1292						33 * sizeof(vector128), -1);
1293		if (!ret)
1294			target->thread.vrsave = vrsave.word;
1295	}
1296
1297	return ret;
1298}
1299
1300/**
1301 * tm_cvsx_active - get active number of registers in CVSX
1302 * @target:	The target task.
1303 * @regset:	The user regset structure.
1304 *
1305 * This function checks for the active number of available
1306 * regisers in transaction checkpointed VSX category.
1307 */
1308static int tm_cvsx_active(struct task_struct *target,
1309				const struct user_regset *regset)
1310{
1311	if (!cpu_has_feature(CPU_FTR_TM))
1312		return -ENODEV;
1313
1314	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1315		return 0;
1316
1317	flush_vsx_to_thread(target);
1318	return target->thread.used_vsr ? regset->n : 0;
1319}
1320
1321/**
1322 * tm_cvsx_get - get CVSX registers
1323 * @target:	The target task.
1324 * @regset:	The user regset structure.
1325 * @pos:	The buffer position.
1326 * @count:	Number of bytes to copy.
1327 * @kbuf:	Kernel buffer to copy from.
1328 * @ubuf:	User buffer to copy into.
1329 *
1330 * This function gets in transaction checkpointed VSX registers.
1331 *
1332 * When the transaction is active 'fp_state' holds the checkpointed
1333 * values for the current transaction to fall back on if it aborts
1334 * in between. This function gets those checkpointed VSX registers.
1335 * The userspace interface buffer layout is as follows.
1336 *
1337 * struct data {
1338 *	u64	vsx[32];
1339 *};
1340 */
1341static int tm_cvsx_get(struct task_struct *target,
1342			const struct user_regset *regset,
1343			unsigned int pos, unsigned int count,
1344			void *kbuf, void __user *ubuf)
1345{
1346	u64 buf[32];
1347	int ret, i;
1348
1349	if (!cpu_has_feature(CPU_FTR_TM))
1350		return -ENODEV;
1351
1352	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1353		return -ENODATA;
1354
1355	/* Flush the state */
1356	flush_fp_to_thread(target);
1357	flush_altivec_to_thread(target);
1358	flush_tmregs_to_thread(target);
1359	flush_vsx_to_thread(target);
1360
1361	for (i = 0; i < 32 ; i++)
1362		buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
1363	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1364				  buf, 0, 32 * sizeof(double));
1365
1366	return ret;
1367}
1368
1369/**
1370 * tm_cvsx_set - set CFPR registers
1371 * @target:	The target task.
1372 * @regset:	The user regset structure.
1373 * @pos:	The buffer position.
1374 * @count:	Number of bytes to copy.
1375 * @kbuf:	Kernel buffer to copy into.
1376 * @ubuf:	User buffer to copy from.
1377 *
1378 * This function sets in transaction checkpointed VSX registers.
1379 *
1380 * When the transaction is active 'fp_state' holds the checkpointed
1381 * VSX register values for the current transaction to fall back on
1382 * if it aborts in between. This function sets these checkpointed
1383 * FPR registers. The userspace interface buffer layout is as follows.
1384 *
1385 * struct data {
1386 *	u64	vsx[32];
1387 *};
1388 */
1389static int tm_cvsx_set(struct task_struct *target,
1390			const struct user_regset *regset,
1391			unsigned int pos, unsigned int count,
1392			const void *kbuf, const void __user *ubuf)
1393{
1394	u64 buf[32];
1395	int ret, i;
1396
1397	if (!cpu_has_feature(CPU_FTR_TM))
1398		return -ENODEV;
1399
1400	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1401		return -ENODATA;
1402
1403	/* Flush the state */
1404	flush_fp_to_thread(target);
1405	flush_altivec_to_thread(target);
1406	flush_tmregs_to_thread(target);
1407	flush_vsx_to_thread(target);
1408
1409	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1410				 buf, 0, 32 * sizeof(double));
1411	for (i = 0; i < 32 ; i++)
1412		target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
1413
1414	return ret;
1415}
1416
1417/**
1418 * tm_spr_active - get active number of registers in TM SPR
1419 * @target:	The target task.
1420 * @regset:	The user regset structure.
1421 *
1422 * This function checks the active number of available
1423 * regisers in the transactional memory SPR category.
1424 */
1425static int tm_spr_active(struct task_struct *target,
1426			 const struct user_regset *regset)
1427{
1428	if (!cpu_has_feature(CPU_FTR_TM))
1429		return -ENODEV;
1430
1431	return regset->n;
1432}
1433
1434/**
1435 * tm_spr_get - get the TM related SPR registers
1436 * @target:	The target task.
1437 * @regset:	The user regset structure.
1438 * @pos:	The buffer position.
1439 * @count:	Number of bytes to copy.
1440 * @kbuf:	Kernel buffer to copy from.
1441 * @ubuf:	User buffer to copy into.
1442 *
1443 * This function gets transactional memory related SPR registers.
1444 * The userspace interface buffer layout is as follows.
1445 *
1446 * struct {
1447 *	u64		tm_tfhar;
1448 *	u64		tm_texasr;
1449 *	u64		tm_tfiar;
1450 * };
1451 */
1452static int tm_spr_get(struct task_struct *target,
1453		      const struct user_regset *regset,
1454		      unsigned int pos, unsigned int count,
1455		      void *kbuf, void __user *ubuf)
1456{
1457	int ret;
1458
1459	/* Build tests */
1460	BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1461	BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1462	BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1463
1464	if (!cpu_has_feature(CPU_FTR_TM))
1465		return -ENODEV;
1466
1467	/* Flush the states */
1468	flush_fp_to_thread(target);
1469	flush_altivec_to_thread(target);
1470	flush_tmregs_to_thread(target);
1471
1472	/* TFHAR register */
1473	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1474				&target->thread.tm_tfhar, 0, sizeof(u64));
1475
1476	/* TEXASR register */
1477	if (!ret)
1478		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1479				&target->thread.tm_texasr, sizeof(u64),
1480				2 * sizeof(u64));
1481
1482	/* TFIAR register */
1483	if (!ret)
1484		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1485				&target->thread.tm_tfiar,
1486				2 * sizeof(u64), 3 * sizeof(u64));
1487	return ret;
1488}
1489
1490/**
1491 * tm_spr_set - set the TM related SPR registers
1492 * @target:	The target task.
1493 * @regset:	The user regset structure.
1494 * @pos:	The buffer position.
1495 * @count:	Number of bytes to copy.
1496 * @kbuf:	Kernel buffer to copy into.
1497 * @ubuf:	User buffer to copy from.
1498 *
1499 * This function sets transactional memory related SPR registers.
1500 * The userspace interface buffer layout is as follows.
1501 *
1502 * struct {
1503 *	u64		tm_tfhar;
1504 *	u64		tm_texasr;
1505 *	u64		tm_tfiar;
1506 * };
1507 */
1508static int tm_spr_set(struct task_struct *target,
1509		      const struct user_regset *regset,
1510		      unsigned int pos, unsigned int count,
1511		      const void *kbuf, const void __user *ubuf)
1512{
1513	int ret;
1514
1515	/* Build tests */
1516	BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1517	BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1518	BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1519
1520	if (!cpu_has_feature(CPU_FTR_TM))
1521		return -ENODEV;
1522
1523	/* Flush the states */
1524	flush_fp_to_thread(target);
1525	flush_altivec_to_thread(target);
1526	flush_tmregs_to_thread(target);
1527
1528	/* TFHAR register */
1529	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1530				&target->thread.tm_tfhar, 0, sizeof(u64));
1531
1532	/* TEXASR register */
1533	if (!ret)
1534		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1535				&target->thread.tm_texasr, sizeof(u64),
1536				2 * sizeof(u64));
1537
1538	/* TFIAR register */
1539	if (!ret)
1540		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1541				&target->thread.tm_tfiar,
1542				 2 * sizeof(u64), 3 * sizeof(u64));
1543	return ret;
1544}
1545
1546static int tm_tar_active(struct task_struct *target,
1547			 const struct user_regset *regset)
1548{
1549	if (!cpu_has_feature(CPU_FTR_TM))
1550		return -ENODEV;
1551
1552	if (MSR_TM_ACTIVE(target->thread.regs->msr))
1553		return regset->n;
1554
1555	return 0;
1556}
1557
1558static int tm_tar_get(struct task_struct *target,
1559		      const struct user_regset *regset,
1560		      unsigned int pos, unsigned int count,
1561		      void *kbuf, void __user *ubuf)
1562{
1563	int ret;
1564
1565	if (!cpu_has_feature(CPU_FTR_TM))
1566		return -ENODEV;
1567
1568	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1569		return -ENODATA;
1570
1571	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1572				&target->thread.tm_tar, 0, sizeof(u64));
1573	return ret;
1574}
1575
1576static int tm_tar_set(struct task_struct *target,
1577		      const struct user_regset *regset,
1578		      unsigned int pos, unsigned int count,
1579		      const void *kbuf, const void __user *ubuf)
1580{
1581	int ret;
1582
1583	if (!cpu_has_feature(CPU_FTR_TM))
1584		return -ENODEV;
1585
1586	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1587		return -ENODATA;
1588
1589	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1590				&target->thread.tm_tar, 0, sizeof(u64));
1591	return ret;
1592}
1593
1594static int tm_ppr_active(struct task_struct *target,
1595			 const struct user_regset *regset)
1596{
1597	if (!cpu_has_feature(CPU_FTR_TM))
1598		return -ENODEV;
1599
1600	if (MSR_TM_ACTIVE(target->thread.regs->msr))
1601		return regset->n;
1602
1603	return 0;
1604}
1605
1606
1607static int tm_ppr_get(struct task_struct *target,
1608		      const struct user_regset *regset,
1609		      unsigned int pos, unsigned int count,
1610		      void *kbuf, void __user *ubuf)
1611{
1612	int ret;
1613
1614	if (!cpu_has_feature(CPU_FTR_TM))
1615		return -ENODEV;
1616
1617	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1618		return -ENODATA;
1619
1620	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1621				&target->thread.tm_ppr, 0, sizeof(u64));
1622	return ret;
1623}
1624
1625static int tm_ppr_set(struct task_struct *target,
1626		      const struct user_regset *regset,
1627		      unsigned int pos, unsigned int count,
1628		      const void *kbuf, const void __user *ubuf)
1629{
1630	int ret;
1631
1632	if (!cpu_has_feature(CPU_FTR_TM))
1633		return -ENODEV;
1634
1635	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1636		return -ENODATA;
1637
1638	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1639				&target->thread.tm_ppr, 0, sizeof(u64));
1640	return ret;
1641}
1642
1643static int tm_dscr_active(struct task_struct *target,
1644			 const struct user_regset *regset)
1645{
1646	if (!cpu_has_feature(CPU_FTR_TM))
1647		return -ENODEV;
1648
1649	if (MSR_TM_ACTIVE(target->thread.regs->msr))
1650		return regset->n;
1651
1652	return 0;
1653}
1654
1655static int tm_dscr_get(struct task_struct *target,
1656		      const struct user_regset *regset,
1657		      unsigned int pos, unsigned int count,
1658		      void *kbuf, void __user *ubuf)
1659{
1660	int ret;
1661
1662	if (!cpu_has_feature(CPU_FTR_TM))
1663		return -ENODEV;
1664
1665	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1666		return -ENODATA;
1667
1668	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1669				&target->thread.tm_dscr, 0, sizeof(u64));
1670	return ret;
1671}
1672
1673static int tm_dscr_set(struct task_struct *target,
1674		      const struct user_regset *regset,
1675		      unsigned int pos, unsigned int count,
1676		      const void *kbuf, const void __user *ubuf)
1677{
1678	int ret;
1679
1680	if (!cpu_has_feature(CPU_FTR_TM))
1681		return -ENODEV;
1682
1683	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1684		return -ENODATA;
1685
1686	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1687				&target->thread.tm_dscr, 0, sizeof(u64));
1688	return ret;
1689}
1690#endif	/* CONFIG_PPC_TRANSACTIONAL_MEM */
1691
1692#ifdef CONFIG_PPC64
1693static int ppr_get(struct task_struct *target,
1694		      const struct user_regset *regset,
1695		      unsigned int pos, unsigned int count,
1696		      void *kbuf, void __user *ubuf)
1697{
1698	int ret;
1699
1700	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1701				&target->thread.ppr, 0, sizeof(u64));
1702	return ret;
1703}
1704
1705static int ppr_set(struct task_struct *target,
1706		      const struct user_regset *regset,
1707		      unsigned int pos, unsigned int count,
1708		      const void *kbuf, const void __user *ubuf)
1709{
1710	int ret;
1711
1712	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1713				&target->thread.ppr, 0, sizeof(u64));
1714	return ret;
1715}
1716
1717static int dscr_get(struct task_struct *target,
1718		      const struct user_regset *regset,
1719		      unsigned int pos, unsigned int count,
1720		      void *kbuf, void __user *ubuf)
1721{
1722	int ret;
1723
1724	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1725				&target->thread.dscr, 0, sizeof(u64));
1726	return ret;
1727}
1728static int dscr_set(struct task_struct *target,
1729		      const struct user_regset *regset,
1730		      unsigned int pos, unsigned int count,
1731		      const void *kbuf, const void __user *ubuf)
1732{
1733	int ret;
1734
1735	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1736				&target->thread.dscr, 0, sizeof(u64));
1737	return ret;
1738}
1739#endif
1740#ifdef CONFIG_PPC_BOOK3S_64
1741static int tar_get(struct task_struct *target,
1742		      const struct user_regset *regset,
1743		      unsigned int pos, unsigned int count,
1744		      void *kbuf, void __user *ubuf)
1745{
1746	int ret;
1747
1748	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1749				&target->thread.tar, 0, sizeof(u64));
1750	return ret;
1751}
1752static int tar_set(struct task_struct *target,
1753		      const struct user_regset *regset,
1754		      unsigned int pos, unsigned int count,
1755		      const void *kbuf, const void __user *ubuf)
1756{
1757	int ret;
1758
1759	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1760				&target->thread.tar, 0, sizeof(u64));
1761	return ret;
1762}
1763
1764static int ebb_active(struct task_struct *target,
1765			 const struct user_regset *regset)
1766{
1767	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1768		return -ENODEV;
1769
1770	if (target->thread.used_ebb)
1771		return regset->n;
1772
1773	return 0;
1774}
1775
1776static int ebb_get(struct task_struct *target,
1777		      const struct user_regset *regset,
1778		      unsigned int pos, unsigned int count,
1779		      void *kbuf, void __user *ubuf)
1780{
1781	/* Build tests */
1782	BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1783	BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1784
1785	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1786		return -ENODEV;
1787
1788	if (!target->thread.used_ebb)
1789		return -ENODATA;
1790
1791	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1792			&target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1793}
1794
1795static int ebb_set(struct task_struct *target,
1796		      const struct user_regset *regset,
1797		      unsigned int pos, unsigned int count,
1798		      const void *kbuf, const void __user *ubuf)
1799{
1800	int ret = 0;
1801
1802	/* Build tests */
1803	BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1804	BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1805
1806	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1807		return -ENODEV;
1808
1809	if (target->thread.used_ebb)
1810		return -ENODATA;
1811
1812	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1813			&target->thread.ebbrr, 0, sizeof(unsigned long));
1814
1815	if (!ret)
1816		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1817			&target->thread.ebbhr, sizeof(unsigned long),
1818			2 * sizeof(unsigned long));
1819
1820	if (!ret)
1821		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1822			&target->thread.bescr,
1823			2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1824
1825	return ret;
1826}
1827static int pmu_active(struct task_struct *target,
1828			 const struct user_regset *regset)
1829{
1830	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1831		return -ENODEV;
1832
1833	return regset->n;
1834}
1835
1836static int pmu_get(struct task_struct *target,
1837		      const struct user_regset *regset,
1838		      unsigned int pos, unsigned int count,
1839		      void *kbuf, void __user *ubuf)
1840{
1841	/* Build tests */
1842	BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1843	BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1844	BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1845	BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1846
1847	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1848		return -ENODEV;
1849
1850	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1851			&target->thread.siar, 0,
1852			5 * sizeof(unsigned long));
1853}
1854
1855static int pmu_set(struct task_struct *target,
1856		      const struct user_regset *regset,
1857		      unsigned int pos, unsigned int count,
1858		      const void *kbuf, const void __user *ubuf)
1859{
1860	int ret = 0;
1861
1862	/* Build tests */
1863	BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1864	BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1865	BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1866	BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1867
1868	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1869		return -ENODEV;
1870
1871	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1872			&target->thread.siar, 0,
1873			sizeof(unsigned long));
1874
1875	if (!ret)
1876		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1877			&target->thread.sdar, sizeof(unsigned long),
1878			2 * sizeof(unsigned long));
1879
1880	if (!ret)
1881		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1882			&target->thread.sier, 2 * sizeof(unsigned long),
1883			3 * sizeof(unsigned long));
1884
1885	if (!ret)
1886		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1887			&target->thread.mmcr2, 3 * sizeof(unsigned long),
1888			4 * sizeof(unsigned long));
1889
1890	if (!ret)
1891		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1892			&target->thread.mmcr0, 4 * sizeof(unsigned long),
1893			5 * sizeof(unsigned long));
1894	return ret;
1895}
1896#endif
1897/*
1898 * These are our native regset flavors.
1899 */
1900enum powerpc_regset {
1901	REGSET_GPR,
1902	REGSET_FPR,
1903#ifdef CONFIG_ALTIVEC
1904	REGSET_VMX,
1905#endif
1906#ifdef CONFIG_VSX
1907	REGSET_VSX,
1908#endif
1909#ifdef CONFIG_SPE
1910	REGSET_SPE,
1911#endif
1912#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1913	REGSET_TM_CGPR,		/* TM checkpointed GPR registers */
1914	REGSET_TM_CFPR,		/* TM checkpointed FPR registers */
1915	REGSET_TM_CVMX,		/* TM checkpointed VMX registers */
1916	REGSET_TM_CVSX,		/* TM checkpointed VSX registers */
1917	REGSET_TM_SPR,		/* TM specific SPR registers */
1918	REGSET_TM_CTAR,		/* TM checkpointed TAR register */
1919	REGSET_TM_CPPR,		/* TM checkpointed PPR register */
1920	REGSET_TM_CDSCR,	/* TM checkpointed DSCR register */
1921#endif
1922#ifdef CONFIG_PPC64
1923	REGSET_PPR,		/* PPR register */
1924	REGSET_DSCR,		/* DSCR register */
1925#endif
1926#ifdef CONFIG_PPC_BOOK3S_64
1927	REGSET_TAR,		/* TAR register */
1928	REGSET_EBB,		/* EBB registers */
1929	REGSET_PMR,		/* Performance Monitor Registers */
1930#endif
1931};
1932
1933static const struct user_regset native_regsets[] = {
1934	[REGSET_GPR] = {
1935		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1936		.size = sizeof(long), .align = sizeof(long),
1937		.get = gpr_get, .set = gpr_set
1938	},
1939	[REGSET_FPR] = {
1940		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1941		.size = sizeof(double), .align = sizeof(double),
1942		.get = fpr_get, .set = fpr_set
1943	},
1944#ifdef CONFIG_ALTIVEC
1945	[REGSET_VMX] = {
1946		.core_note_type = NT_PPC_VMX, .n = 34,
1947		.size = sizeof(vector128), .align = sizeof(vector128),
1948		.active = vr_active, .get = vr_get, .set = vr_set
1949	},
1950#endif
1951#ifdef CONFIG_VSX
1952	[REGSET_VSX] = {
1953		.core_note_type = NT_PPC_VSX, .n = 32,
1954		.size = sizeof(double), .align = sizeof(double),
1955		.active = vsr_active, .get = vsr_get, .set = vsr_set
1956	},
1957#endif
1958#ifdef CONFIG_SPE
1959	[REGSET_SPE] = {
1960		.core_note_type = NT_PPC_SPE, .n = 35,
1961		.size = sizeof(u32), .align = sizeof(u32),
1962		.active = evr_active, .get = evr_get, .set = evr_set
1963	},
1964#endif
1965#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1966	[REGSET_TM_CGPR] = {
1967		.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1968		.size = sizeof(long), .align = sizeof(long),
1969		.active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1970	},
1971	[REGSET_TM_CFPR] = {
1972		.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1973		.size = sizeof(double), .align = sizeof(double),
1974		.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1975	},
1976	[REGSET_TM_CVMX] = {
1977		.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1978		.size = sizeof(vector128), .align = sizeof(vector128),
1979		.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1980	},
1981	[REGSET_TM_CVSX] = {
1982		.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1983		.size = sizeof(double), .align = sizeof(double),
1984		.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1985	},
1986	[REGSET_TM_SPR] = {
1987		.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1988		.size = sizeof(u64), .align = sizeof(u64),
1989		.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1990	},
1991	[REGSET_TM_CTAR] = {
1992		.core_note_type = NT_PPC_TM_CTAR, .n = 1,
1993		.size = sizeof(u64), .align = sizeof(u64),
1994		.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1995	},
1996	[REGSET_TM_CPPR] = {
1997		.core_note_type = NT_PPC_TM_CPPR, .n = 1,
1998		.size = sizeof(u64), .align = sizeof(u64),
1999		.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2000	},
2001	[REGSET_TM_CDSCR] = {
2002		.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2003		.size = sizeof(u64), .align = sizeof(u64),
2004		.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2005	},
2006#endif
2007#ifdef CONFIG_PPC64
2008	[REGSET_PPR] = {
2009		.core_note_type = NT_PPC_PPR, .n = 1,
2010		.size = sizeof(u64), .align = sizeof(u64),
2011		.get = ppr_get, .set = ppr_set
2012	},
2013	[REGSET_DSCR] = {
2014		.core_note_type = NT_PPC_DSCR, .n = 1,
2015		.size = sizeof(u64), .align = sizeof(u64),
2016		.get = dscr_get, .set = dscr_set
2017	},
2018#endif
2019#ifdef CONFIG_PPC_BOOK3S_64
2020	[REGSET_TAR] = {
2021		.core_note_type = NT_PPC_TAR, .n = 1,
2022		.size = sizeof(u64), .align = sizeof(u64),
2023		.get = tar_get, .set = tar_set
2024	},
2025	[REGSET_EBB] = {
2026		.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2027		.size = sizeof(u64), .align = sizeof(u64),
2028		.active = ebb_active, .get = ebb_get, .set = ebb_set
2029	},
2030	[REGSET_PMR] = {
2031		.core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
2032		.size = sizeof(u64), .align = sizeof(u64),
2033		.active = pmu_active, .get = pmu_get, .set = pmu_set
2034	},
2035#endif
2036};
2037
2038static const struct user_regset_view user_ppc_native_view = {
2039	.name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
2040	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2041};
2042
2043#ifdef CONFIG_PPC64
2044#include <linux/compat.h>
2045
2046static int gpr32_get_common(struct task_struct *target,
2047		     const struct user_regset *regset,
2048		     unsigned int pos, unsigned int count,
2049			    void *kbuf, void __user *ubuf, bool tm_active)
2050{
2051	const unsigned long *regs = &target->thread.regs->gpr[0];
2052	const unsigned long *ckpt_regs;
2053	compat_ulong_t *k = kbuf;
2054	compat_ulong_t __user *u = ubuf;
2055	compat_ulong_t reg;
2056	int i;
2057
2058#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2059	ckpt_regs = &target->thread.ckpt_regs.gpr[0];
2060#endif
2061	if (tm_active) {
2062		regs = ckpt_regs;
2063	} else {
2064		if (target->thread.regs == NULL)
2065			return -EIO;
2066
2067		if (!FULL_REGS(target->thread.regs)) {
2068			/*
2069			 * We have a partial register set.
2070			 * Fill 14-31 with bogus values.
2071			 */
2072			for (i = 14; i < 32; i++)
2073				target->thread.regs->gpr[i] = NV_REG_POISON;
2074		}
2075	}
2076
2077	pos /= sizeof(reg);
2078	count /= sizeof(reg);
2079
2080	if (kbuf)
2081		for (; count > 0 && pos < PT_MSR; --count)
2082			*k++ = regs[pos++];
2083	else
2084		for (; count > 0 && pos < PT_MSR; --count)
2085			if (__put_user((compat_ulong_t) regs[pos++], u++))
2086				return -EFAULT;
2087
2088	if (count > 0 && pos == PT_MSR) {
2089		reg = get_user_msr(target);
2090		if (kbuf)
2091			*k++ = reg;
2092		else if (__put_user(reg, u++))
2093			return -EFAULT;
2094		++pos;
2095		--count;
2096	}
2097
2098	if (kbuf)
2099		for (; count > 0 && pos < PT_REGS_COUNT; --count)
2100			*k++ = regs[pos++];
2101	else
2102		for (; count > 0 && pos < PT_REGS_COUNT; --count)
2103			if (__put_user((compat_ulong_t) regs[pos++], u++))
2104				return -EFAULT;
2105
2106	kbuf = k;
2107	ubuf = u;
2108	pos *= sizeof(reg);
2109	count *= sizeof(reg);
2110	return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
2111					PT_REGS_COUNT * sizeof(reg), -1);
2112}
2113
2114static int gpr32_set_common(struct task_struct *target,
2115		     const struct user_regset *regset,
2116		     unsigned int pos, unsigned int count,
2117		     const void *kbuf, const void __user *ubuf, bool tm_active)
2118{
2119	unsigned long *regs = &target->thread.regs->gpr[0];
2120	unsigned long *ckpt_regs;
2121	const compat_ulong_t *k = kbuf;
2122	const compat_ulong_t __user *u = ubuf;
2123	compat_ulong_t reg;
2124
2125#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2126	ckpt_regs = &target->thread.ckpt_regs.gpr[0];
2127#endif
2128
2129	if (tm_active) {
2130		regs = ckpt_regs;
2131	} else {
2132		regs = &target->thread.regs->gpr[0];
2133
2134		if (target->thread.regs == NULL)
2135			return -EIO;
2136
2137		CHECK_FULL_REGS(target->thread.regs);
2138	}
2139
2140	pos /= sizeof(reg);
2141	count /= sizeof(reg);
2142
2143	if (kbuf)
2144		for (; count > 0 && pos < PT_MSR; --count)
2145			regs[pos++] = *k++;
2146	else
2147		for (; count > 0 && pos < PT_MSR; --count) {
2148			if (__get_user(reg, u++))
2149				return -EFAULT;
2150			regs[pos++] = reg;
2151		}
2152
2153
2154	if (count > 0 && pos == PT_MSR) {
2155		if (kbuf)
2156			reg = *k++;
2157		else if (__get_user(reg, u++))
2158			return -EFAULT;
2159		set_user_msr(target, reg);
2160		++pos;
2161		--count;
2162	}
2163
2164	if (kbuf) {
2165		for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
2166			regs[pos++] = *k++;
2167		for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2168			++k;
2169	} else {
2170		for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
2171			if (__get_user(reg, u++))
2172				return -EFAULT;
2173			regs[pos++] = reg;
2174		}
2175		for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2176			if (__get_user(reg, u++))
2177				return -EFAULT;
2178	}
2179
2180	if (count > 0 && pos == PT_TRAP) {
2181		if (kbuf)
2182			reg = *k++;
2183		else if (__get_user(reg, u++))
2184			return -EFAULT;
2185		set_user_trap(target, reg);
2186		++pos;
2187		--count;
2188	}
2189
2190	kbuf = k;
2191	ubuf = u;
2192	pos *= sizeof(reg);
2193	count *= sizeof(reg);
2194	return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
2195					 (PT_TRAP + 1) * sizeof(reg), -1);
2196}
2197
2198#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2199static int tm_cgpr32_get(struct task_struct *target,
2200		     const struct user_regset *regset,
2201		     unsigned int pos, unsigned int count,
2202		     void *kbuf, void __user *ubuf)
2203{
2204	return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 1);
2205}
2206
2207static int tm_cgpr32_set(struct task_struct *target,
2208		     const struct user_regset *regset,
2209		     unsigned int pos, unsigned int count,
2210		     const void *kbuf, const void __user *ubuf)
2211{
2212	return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 1);
2213}
2214#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2215
2216static int gpr32_get(struct task_struct *target,
2217		     const struct user_regset *regset,
2218		     unsigned int pos, unsigned int count,
2219		     void *kbuf, void __user *ubuf)
2220{
2221	return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 0);
2222}
2223
2224static int gpr32_set(struct task_struct *target,
2225		     const struct user_regset *regset,
2226		     unsigned int pos, unsigned int count,
2227		     const void *kbuf, const void __user *ubuf)
2228{
2229	return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 0);
2230}
2231
2232/*
2233 * These are the regset flavors matching the CONFIG_PPC32 native set.
2234 */
2235static const struct user_regset compat_regsets[] = {
2236	[REGSET_GPR] = {
2237		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
2238		.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
2239		.get = gpr32_get, .set = gpr32_set
2240	},
2241	[REGSET_FPR] = {
2242		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
2243		.size = sizeof(double), .align = sizeof(double),
2244		.get = fpr_get, .set = fpr_set
2245	},
2246#ifdef CONFIG_ALTIVEC
2247	[REGSET_VMX] = {
2248		.core_note_type = NT_PPC_VMX, .n = 34,
2249		.size = sizeof(vector128), .align = sizeof(vector128),
2250		.active = vr_active, .get = vr_get, .set = vr_set
2251	},
2252#endif
2253#ifdef CONFIG_SPE
2254	[REGSET_SPE] = {
2255		.core_note_type = NT_PPC_SPE, .n = 35,
2256		.size = sizeof(u32), .align = sizeof(u32),
2257		.active = evr_active, .get = evr_get, .set = evr_set
2258	},
2259#endif
2260#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2261	[REGSET_TM_CGPR] = {
2262		.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
2263		.size = sizeof(long), .align = sizeof(long),
2264		.active = tm_cgpr_active,
2265		.get = tm_cgpr32_get, .set = tm_cgpr32_set
2266	},
2267	[REGSET_TM_CFPR] = {
2268		.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
2269		.size = sizeof(double), .align = sizeof(double),
2270		.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
2271	},
2272	[REGSET_TM_CVMX] = {
2273		.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
2274		.size = sizeof(vector128), .align = sizeof(vector128),
2275		.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
2276	},
2277	[REGSET_TM_CVSX] = {
2278		.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2279		.size = sizeof(double), .align = sizeof(double),
2280		.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2281	},
2282	[REGSET_TM_SPR] = {
2283		.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2284		.size = sizeof(u64), .align = sizeof(u64),
2285		.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2286	},
2287	[REGSET_TM_CTAR] = {
2288		.core_note_type = NT_PPC_TM_CTAR, .n = 1,
2289		.size = sizeof(u64), .align = sizeof(u64),
2290		.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2291	},
2292	[REGSET_TM_CPPR] = {
2293		.core_note_type = NT_PPC_TM_CPPR, .n = 1,
2294		.size = sizeof(u64), .align = sizeof(u64),
2295		.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2296	},
2297	[REGSET_TM_CDSCR] = {
2298		.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2299		.size = sizeof(u64), .align = sizeof(u64),
2300		.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2301	},
2302#endif
2303#ifdef CONFIG_PPC64
2304	[REGSET_PPR] = {
2305		.core_note_type = NT_PPC_PPR, .n = 1,
2306		.size = sizeof(u64), .align = sizeof(u64),
2307		.get = ppr_get, .set = ppr_set
2308	},
2309	[REGSET_DSCR] = {
2310		.core_note_type = NT_PPC_DSCR, .n = 1,
2311		.size = sizeof(u64), .align = sizeof(u64),
2312		.get = dscr_get, .set = dscr_set
2313	},
2314#endif
2315#ifdef CONFIG_PPC_BOOK3S_64
2316	[REGSET_TAR] = {
2317		.core_note_type = NT_PPC_TAR, .n = 1,
2318		.size = sizeof(u64), .align = sizeof(u64),
2319		.get = tar_get, .set = tar_set
2320	},
2321	[REGSET_EBB] = {
2322		.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2323		.size = sizeof(u64), .align = sizeof(u64),
2324		.active = ebb_active, .get = ebb_get, .set = ebb_set
2325	},
2326#endif
2327};
2328
2329static const struct user_regset_view user_ppc_compat_view = {
2330	.name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
2331	.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
2332};
2333#endif	/* CONFIG_PPC64 */
2334
2335const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2336{
2337#ifdef CONFIG_PPC64
2338	if (test_tsk_thread_flag(task, TIF_32BIT))
2339		return &user_ppc_compat_view;
2340#endif
2341	return &user_ppc_native_view;
2342}
2343
2344
2345void user_enable_single_step(struct task_struct *task)
2346{
2347	struct pt_regs *regs = task->thread.regs;
2348
2349	if (regs != NULL) {
2350#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2351		task->thread.debug.dbcr0 &= ~DBCR0_BT;
2352		task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2353		regs->msr |= MSR_DE;
2354#else
2355		regs->msr &= ~MSR_BE;
2356		regs->msr |= MSR_SE;
2357#endif
2358	}
2359	set_tsk_thread_flag(task, TIF_SINGLESTEP);
2360}
2361
2362void user_enable_block_step(struct task_struct *task)
2363{
2364	struct pt_regs *regs = task->thread.regs;
2365
2366	if (regs != NULL) {
2367#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2368		task->thread.debug.dbcr0 &= ~DBCR0_IC;
2369		task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
2370		regs->msr |= MSR_DE;
2371#else
2372		regs->msr &= ~MSR_SE;
2373		regs->msr |= MSR_BE;
2374#endif
2375	}
2376	set_tsk_thread_flag(task, TIF_SINGLESTEP);
2377}
2378
2379void user_disable_single_step(struct task_struct *task)
2380{
2381	struct pt_regs *regs = task->thread.regs;
2382
2383	if (regs != NULL) {
2384#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2385		/*
2386		 * The logic to disable single stepping should be as
2387		 * simple as turning off the Instruction Complete flag.
2388		 * And, after doing so, if all debug flags are off, turn
2389		 * off DBCR0(IDM) and MSR(DE) .... Torez
2390		 */
2391		task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
2392		/*
2393		 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2394		 */
2395		if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2396					task->thread.debug.dbcr1)) {
2397			/*
2398			 * All debug events were off.....
2399			 */
2400			task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2401			regs->msr &= ~MSR_DE;
2402		}
2403#else
2404		regs->msr &= ~(MSR_SE | MSR_BE);
2405#endif
2406	}
2407	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
2408}
2409
2410#ifdef CONFIG_HAVE_HW_BREAKPOINT
2411void ptrace_triggered(struct perf_event *bp,
2412		      struct perf_sample_data *data, struct pt_regs *regs)
2413{
2414	struct perf_event_attr attr;
2415
2416	/*
2417	 * Disable the breakpoint request here since ptrace has defined a
2418	 * one-shot behaviour for breakpoint exceptions in PPC64.
2419	 * The SIGTRAP signal is generated automatically for us in do_dabr().
2420	 * We don't have to do anything about that here
2421	 */
2422	attr = bp->attr;
2423	attr.disabled = true;
2424	modify_user_hw_breakpoint(bp, &attr);
2425}
2426#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2427
2428static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
2429			       unsigned long data)
2430{
2431#ifdef CONFIG_HAVE_HW_BREAKPOINT
2432	int ret;
2433	struct thread_struct *thread = &(task->thread);
2434	struct perf_event *bp;
2435	struct perf_event_attr attr;
2436#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2437#ifndef CONFIG_PPC_ADV_DEBUG_REGS
2438	struct arch_hw_breakpoint hw_brk;
2439#endif
2440
2441	/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2442	 *  For embedded processors we support one DAC and no IAC's at the
2443	 *  moment.
2444	 */
2445	if (addr > 0)
2446		return -EINVAL;
2447
2448	/* The bottom 3 bits in dabr are flags */
2449	if ((data & ~0x7UL) >= TASK_SIZE)
2450		return -EIO;
2451
2452#ifndef CONFIG_PPC_ADV_DEBUG_REGS
2453	/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2454	 *  It was assumed, on previous implementations, that 3 bits were
2455	 *  passed together with the data address, fitting the design of the
2456	 *  DABR register, as follows:
2457	 *
2458	 *  bit 0: Read flag
2459	 *  bit 1: Write flag
2460	 *  bit 2: Breakpoint translation
2461	 *
2462	 *  Thus, we use them here as so.
2463	 */
2464
2465	/* Ensure breakpoint translation bit is set */
2466	if (data && !(data & HW_BRK_TYPE_TRANSLATE))
2467		return -EIO;
2468	hw_brk.address = data & (~HW_BRK_TYPE_DABR);
2469	hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2470	hw_brk.len = 8;
2471#ifdef CONFIG_HAVE_HW_BREAKPOINT
2472	bp = thread->ptrace_bps[0];
2473	if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) {
2474		if (bp) {
2475			unregister_hw_breakpoint(bp);
2476			thread->ptrace_bps[0] = NULL;
2477		}
2478		return 0;
2479	}
2480	if (bp) {
2481		attr = bp->attr;
2482		attr.bp_addr = hw_brk.address;
2483		arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
2484
2485		/* Enable breakpoint */
2486		attr.disabled = false;
2487
2488		ret =  modify_user_hw_breakpoint(bp, &attr);
2489		if (ret) {
2490			return ret;
2491		}
2492		thread->ptrace_bps[0] = bp;
2493		thread->hw_brk = hw_brk;
2494		return 0;
2495	}
2496
2497	/* Create a new breakpoint request if one doesn't exist already */
2498	hw_breakpoint_init(&attr);
2499	attr.bp_addr = hw_brk.address;
2500	arch_bp_generic_fields(hw_brk.type,
2501			       &attr.bp_type);
2502
2503	thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2504					       ptrace_triggered, NULL, task);
2505	if (IS_ERR(bp)) {
2506		thread->ptrace_bps[0] = NULL;
2507		return PTR_ERR(bp);
2508	}
2509
2510#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2511	task->thread.hw_brk = hw_brk;
2512#else /* CONFIG_PPC_ADV_DEBUG_REGS */
2513	/* As described above, it was assumed 3 bits were passed with the data
2514	 *  address, but we will assume only the mode bits will be passed
2515	 *  as to not cause alignment restrictions for DAC-based processors.
2516	 */
2517
2518	/* DAC's hold the whole address without any mode flags */
2519	task->thread.debug.dac1 = data & ~0x3UL;
2520
2521	if (task->thread.debug.dac1 == 0) {
2522		dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2523		if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2524					task->thread.debug.dbcr1)) {
2525			task->thread.regs->msr &= ~MSR_DE;
2526			task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2527		}
2528		return 0;
2529	}
2530
2531	/* Read or Write bits must be set */
2532
2533	if (!(data & 0x3UL))
2534		return -EINVAL;
2535
2536	/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2537	   register */
2538	task->thread.debug.dbcr0 |= DBCR0_IDM;
2539
2540	/* Check for write and read flags and set DBCR0
2541	   accordingly */
2542	dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
2543	if (data & 0x1UL)
2544		dbcr_dac(task) |= DBCR_DAC1R;
2545	if (data & 0x2UL)
2546		dbcr_dac(task) |= DBCR_DAC1W;
2547	task->thread.regs->msr |= MSR_DE;
2548#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2549	return 0;
2550}
2551
2552/*
2553 * Called by kernel/ptrace.c when detaching..
2554 *
2555 * Make sure single step bits etc are not set.
2556 */
2557void ptrace_disable(struct task_struct *child)
2558{
2559	/* make sure the single step bit is not set. */
2560	user_disable_single_step(child);
2561}
2562
2563#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2564static long set_instruction_bp(struct task_struct *child,
2565			      struct ppc_hw_breakpoint *bp_info)
2566{
2567	int slot;
2568	int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
2569	int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
2570	int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
2571	int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
2572
2573	if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2574		slot2_in_use = 1;
2575	if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2576		slot4_in_use = 1;
2577
2578	if (bp_info->addr >= TASK_SIZE)
2579		return -EIO;
2580
2581	if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
2582
2583		/* Make sure range is valid. */
2584		if (bp_info->addr2 >= TASK_SIZE)
2585			return -EIO;
2586
2587		/* We need a pair of IAC regsisters */
2588		if ((!slot1_in_use) && (!slot2_in_use)) {
2589			slot = 1;
2590			child->thread.debug.iac1 = bp_info->addr;
2591			child->thread.debug.iac2 = bp_info->addr2;
2592			child->thread.debug.dbcr0 |= DBCR0_IAC1;
2593			if (bp_info->addr_mode ==
2594					PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2595				dbcr_iac_range(child) |= DBCR_IAC12X;
2596			else
2597				dbcr_iac_range(child) |= DBCR_IAC12I;
2598#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2599		} else if ((!slot3_in_use) && (!slot4_in_use)) {
2600			slot = 3;
2601			child->thread.debug.iac3 = bp_info->addr;
2602			child->thread.debug.iac4 = bp_info->addr2;
2603			child->thread.debug.dbcr0 |= DBCR0_IAC3;
2604			if (bp_info->addr_mode ==
2605					PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2606				dbcr_iac_range(child) |= DBCR_IAC34X;
2607			else
2608				dbcr_iac_range(child) |= DBCR_IAC34I;
2609#endif
2610		} else
2611			return -ENOSPC;
2612	} else {
2613		/* We only need one.  If possible leave a pair free in
2614		 * case a range is needed later
2615		 */
2616		if (!slot1_in_use) {
2617			/*
2618			 * Don't use iac1 if iac1-iac2 are free and either
2619			 * iac3 or iac4 (but not both) are free
2620			 */
2621			if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
2622				slot = 1;
2623				child->thread.debug.iac1 = bp_info->addr;
2624				child->thread.debug.dbcr0 |= DBCR0_IAC1;
2625				goto out;
2626			}
2627		}
2628		if (!slot2_in_use) {
2629			slot = 2;
2630			child->thread.debug.iac2 = bp_info->addr;
2631			child->thread.debug.dbcr0 |= DBCR0_IAC2;
2632#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2633		} else if (!slot3_in_use) {
2634			slot = 3;
2635			child->thread.debug.iac3 = bp_info->addr;
2636			child->thread.debug.dbcr0 |= DBCR0_IAC3;
2637		} else if (!slot4_in_use) {
2638			slot = 4;
2639			child->thread.debug.iac4 = bp_info->addr;
2640			child->thread.debug.dbcr0 |= DBCR0_IAC4;
2641#endif
2642		} else
2643			return -ENOSPC;
2644	}
2645out:
2646	child->thread.debug.dbcr0 |= DBCR0_IDM;
2647	child->thread.regs->msr |= MSR_DE;
2648
2649	return slot;
2650}
2651
2652static int del_instruction_bp(struct task_struct *child, int slot)
2653{
2654	switch (slot) {
2655	case 1:
2656		if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
2657			return -ENOENT;
2658
2659		if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
2660			/* address range - clear slots 1 & 2 */
2661			child->thread.debug.iac2 = 0;
2662			dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
2663		}
2664		child->thread.debug.iac1 = 0;
2665		child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
2666		break;
2667	case 2:
2668		if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
2669			return -ENOENT;
2670
2671		if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2672			/* used in a range */
2673			return -EINVAL;
2674		child->thread.debug.iac2 = 0;
2675		child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
2676		break;
2677#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2678	case 3:
2679		if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
2680			return -ENOENT;
2681
2682		if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
2683			/* address range - clear slots 3 & 4 */
2684			child->thread.debug.iac4 = 0;
2685			dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
2686		}
2687		child->thread.debug.iac3 = 0;
2688		child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
2689		break;
2690	case 4:
2691		if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
2692			return -ENOENT;
2693
2694		if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2695			/* Used in a range */
2696			return -EINVAL;
2697		child->thread.debug.iac4 = 0;
2698		child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
2699		break;
2700#endif
2701	default:
2702		return -EINVAL;
2703	}
2704	return 0;
2705}
2706
2707static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
2708{
2709	int byte_enable =
2710		(bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
2711		& 0xf;
2712	int condition_mode =
2713		bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
2714	int slot;
2715
2716	if (byte_enable && (condition_mode == 0))
2717		return -EINVAL;
2718
2719	if (bp_info->addr >= TASK_SIZE)
2720		return -EIO;
2721
2722	if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2723		slot = 1;
2724		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2725			dbcr_dac(child) |= DBCR_DAC1R;
2726		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2727			dbcr_dac(child) |= DBCR_DAC1W;
2728		child->thread.debug.dac1 = (unsigned long)bp_info->addr;
2729#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2730		if (byte_enable) {
2731			child->thread.debug.dvc1 =
2732				(unsigned long)bp_info->condition_value;
2733			child->thread.debug.dbcr2 |=
2734				((byte_enable << DBCR2_DVC1BE_SHIFT) |
2735				 (condition_mode << DBCR2_DVC1M_SHIFT));
2736		}
2737#endif
2738#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2739	} else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2740		/* Both dac1 and dac2 are part of a range */
2741		return -ENOSPC;
2742#endif
2743	} else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2744		slot = 2;
2745		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2746			dbcr_dac(child) |= DBCR_DAC2R;
2747		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2748			dbcr_dac(child) |= DBCR_DAC2W;
2749		child->thread.debug.dac2 = (unsigned long)bp_info->addr;
2750#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2751		if (byte_enable) {
2752			child->thread.debug.dvc2 =
2753				(unsigned long)bp_info->condition_value;
2754			child->thread.debug.dbcr2 |=
2755				((byte_enable << DBCR2_DVC2BE_SHIFT) |
2756				 (condition_mode << DBCR2_DVC2M_SHIFT));
2757		}
2758#endif
2759	} else
2760		return -ENOSPC;
2761	child->thread.debug.dbcr0 |= DBCR0_IDM;
2762	child->thread.regs->msr |= MSR_DE;
2763
2764	return slot + 4;
2765}
2766
2767static int del_dac(struct task_struct *child, int slot)
2768{
2769	if (slot == 1) {
2770		if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
2771			return -ENOENT;
2772
2773		child->thread.debug.dac1 = 0;
2774		dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2775#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2776		if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2777			child->thread.debug.dac2 = 0;
2778			child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
2779		}
2780		child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
2781#endif
2782#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2783		child->thread.debug.dvc1 = 0;
2784#endif
2785	} else if (slot == 2) {
2786		if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
2787			return -ENOENT;
2788
2789#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2790		if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
2791			/* Part of a range */
2792			return -EINVAL;
2793		child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
2794#endif
2795#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2796		child->thread.debug.dvc2 = 0;
2797#endif
2798		child->thread.debug.dac2 = 0;
2799		dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2800	} else
2801		return -EINVAL;
2802
2803	return 0;
2804}
2805#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2806
2807#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2808static int set_dac_range(struct task_struct *child,
2809			 struct ppc_hw_breakpoint *bp_info)
2810{
2811	int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2812
2813	/* We don't allow range watchpoints to be used with DVC */
2814	if (bp_info->condition_mode)
2815		return -EINVAL;
2816
2817	/*
2818	 * Best effort to verify the address range.  The user/supervisor bits
2819	 * prevent trapping in kernel space, but let's fail on an obvious bad
2820	 * range.  The simple test on the mask is not fool-proof, and any
2821	 * exclusive range will spill over into kernel space.
2822	 */
2823	if (bp_info->addr >= TASK_SIZE)
2824		return -EIO;
2825	if (mode == PPC_BREAKPOINT_MODE_MASK) {
2826		/*
2827		 * dac2 is a bitmask.  Don't allow a mask that makes a
2828		 * kernel space address from a valid dac1 value
2829		 */
2830		if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2831			return -EIO;
2832	} else {
2833		/*
2834		 * For range breakpoints, addr2 must also be a valid address
2835		 */
2836		if (bp_info->addr2 >= TASK_SIZE)
2837			return -EIO;
2838	}
2839
2840	if (child->thread.debug.dbcr0 &
2841	    (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2842		return -ENOSPC;
2843
2844	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2845		child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
2846	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2847		child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2848	child->thread.debug.dac1 = bp_info->addr;
2849	child->thread.debug.dac2 = bp_info->addr2;
2850	if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2851		child->thread.debug.dbcr2  |= DBCR2_DAC12M;
2852	else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2853		child->thread.debug.dbcr2  |= DBCR2_DAC12MX;
2854	else	/* PPC_BREAKPOINT_MODE_MASK */
2855		child->thread.debug.dbcr2  |= DBCR2_DAC12MM;
2856	child->thread.regs->msr |= MSR_DE;
2857
2858	return 5;
2859}
2860#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2861
2862static long ppc_set_hwdebug(struct task_struct *child,
2863		     struct ppc_hw_breakpoint *bp_info)
2864{
2865#ifdef CONFIG_HAVE_HW_BREAKPOINT
2866	int len = 0;
2867	struct thread_struct *thread = &(child->thread);
2868	struct perf_event *bp;
2869	struct perf_event_attr attr;
2870#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2871#ifndef CONFIG_PPC_ADV_DEBUG_REGS
2872	struct arch_hw_breakpoint brk;
2873#endif
2874
2875	if (bp_info->version != 1)
2876		return -ENOTSUPP;
2877#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2878	/*
2879	 * Check for invalid flags and combinations
2880	 */
2881	if ((bp_info->trigger_type == 0) ||
2882	    (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2883				       PPC_BREAKPOINT_TRIGGER_RW)) ||
2884	    (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2885	    (bp_info->condition_mode &
2886	     ~(PPC_BREAKPOINT_CONDITION_MODE |
2887	       PPC_BREAKPOINT_CONDITION_BE_ALL)))
2888		return -EINVAL;
2889#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2890	if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2891		return -EINVAL;
2892#endif
2893
2894	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2895		if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2896		    (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2897			return -EINVAL;
2898		return set_instruction_bp(child, bp_info);
2899	}
2900	if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2901		return set_dac(child, bp_info);
2902
2903#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2904	return set_dac_range(child, bp_info);
2905#else
2906	return -EINVAL;
2907#endif
2908#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2909	/*
2910	 * We only support one data breakpoint
2911	 */
2912	if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2913	    (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
2914	    bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2915		return -EINVAL;
2916
2917	if ((unsigned long)bp_info->addr >= TASK_SIZE)
2918		return -EIO;
2919
2920	brk.address = bp_info->addr & ~7UL;
2921	brk.type = HW_BRK_TYPE_TRANSLATE;
2922	brk.len = 8;
2923	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2924		brk.type |= HW_BRK_TYPE_READ;
2925	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2926		brk.type |= HW_BRK_TYPE_WRITE;
2927#ifdef CONFIG_HAVE_HW_BREAKPOINT
2928	/*
2929	 * Check if the request is for 'range' breakpoints. We can
2930	 * support it if range < 8 bytes.
2931	 */
2932	if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2933		len = bp_info->addr2 - bp_info->addr;
2934	else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2935		len = 1;
2936	else
2937		return -EINVAL;
2938	bp = thread->ptrace_bps[0];
2939	if (bp)
2940		return -ENOSPC;
2941
2942	/* Create a new breakpoint request if one doesn't exist already */
2943	hw_breakpoint_init(&attr);
2944	attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2945	attr.bp_len = len;
2946	arch_bp_generic_fields(brk.type, &attr.bp_type);
2947
2948	thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2949					       ptrace_triggered, NULL, child);
2950	if (IS_ERR(bp)) {
2951		thread->ptrace_bps[0] = NULL;
2952		return PTR_ERR(bp);
2953	}
2954
2955	return 1;
2956#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2957
2958	if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2959		return -EINVAL;
2960
2961	if (child->thread.hw_brk.address)
2962		return -ENOSPC;
2963
2964	child->thread.hw_brk = brk;
2965
2966	return 1;
2967#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2968}
2969
2970static long ppc_del_hwdebug(struct task_struct *child, long data)
2971{
2972#ifdef CONFIG_HAVE_HW_BREAKPOINT
2973	int ret = 0;
2974	struct thread_struct *thread = &(child->thread);
2975	struct perf_event *bp;
2976#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2977#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2978	int rc;
2979
2980	if (data <= 4)
2981		rc = del_instruction_bp(child, (int)data);
2982	else
2983		rc = del_dac(child, (int)data - 4);
2984
2985	if (!rc) {
2986		if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
2987					child->thread.debug.dbcr1)) {
2988			child->thread.debug.dbcr0 &= ~DBCR0_IDM;
2989			child->thread.regs->msr &= ~MSR_DE;
2990		}
2991	}
2992	return rc;
2993#else
2994	if (data != 1)
2995		return -EINVAL;
2996
2997#ifdef CONFIG_HAVE_HW_BREAKPOINT
2998	bp = thread->ptrace_bps[0];
2999	if (bp) {
3000		unregister_hw_breakpoint(bp);
3001		thread->ptrace_bps[0] = NULL;
3002	} else
3003		ret = -ENOENT;
3004	return ret;
3005#else /* CONFIG_HAVE_HW_BREAKPOINT */
3006	if (child->thread.hw_brk.address == 0)
3007		return -ENOENT;
3008
3009	child->thread.hw_brk.address = 0;
3010	child->thread.hw_brk.type = 0;
3011#endif /* CONFIG_HAVE_HW_BREAKPOINT */
3012
3013	return 0;
3014#endif
3015}
3016
3017long arch_ptrace(struct task_struct *child, long request,
3018		 unsigned long addr, unsigned long data)
3019{
3020	int ret = -EPERM;
3021	void __user *datavp = (void __user *) data;
3022	unsigned long __user *datalp = datavp;
3023
3024	switch (request) {
3025	/* read the word at location addr in the USER area. */
3026	case PTRACE_PEEKUSR: {
3027		unsigned long index, tmp;
3028
3029		ret = -EIO;
3030		/* convert to index and check */
3031#ifdef CONFIG_PPC32
3032		index = addr >> 2;
3033		if ((addr & 3) || (index > PT_FPSCR)
3034		    || (child->thread.regs == NULL))
3035#else
3036		index = addr >> 3;
3037		if ((addr & 7) || (index > PT_FPSCR))
3038#endif
3039			break;
3040
3041		CHECK_FULL_REGS(child->thread.regs);
3042		if (index < PT_FPR0) {
3043			ret = ptrace_get_reg(child, (int) index, &tmp);
3044			if (ret)
3045				break;
3046		} else {
3047			unsigned int fpidx = index - PT_FPR0;
3048
3049			flush_fp_to_thread(child);
3050			if (fpidx < (PT_FPSCR - PT_FPR0))
3051				memcpy(&tmp, &child->thread.TS_FPR(fpidx),
3052				       sizeof(long));
3053			else
3054				tmp = child->thread.fp_state.fpscr;
3055		}
3056		ret = put_user(tmp, datalp);
3057		break;
3058	}
3059
3060	/* write the word at location addr in the USER area */
3061	case PTRACE_POKEUSR: {
3062		unsigned long index;
3063
3064		ret = -EIO;
3065		/* convert to index and check */
3066#ifdef CONFIG_PPC32
3067		index = addr >> 2;
3068		if ((addr & 3) || (index > PT_FPSCR)
3069		    || (child->thread.regs == NULL))
3070#else
3071		index = addr >> 3;
3072		if ((addr & 7) || (index > PT_FPSCR))
3073#endif
3074			break;
3075
3076		CHECK_FULL_REGS(child->thread.regs);
3077		if (index < PT_FPR0) {
3078			ret = ptrace_put_reg(child, index, data);
3079		} else {
3080			unsigned int fpidx = index - PT_FPR0;
3081
3082			flush_fp_to_thread(child);
3083			if (fpidx < (PT_FPSCR - PT_FPR0))
3084				memcpy(&child->thread.TS_FPR(fpidx), &data,
3085				       sizeof(long));
3086			else
3087				child->thread.fp_state.fpscr = data;
3088			ret = 0;
3089		}
3090		break;
3091	}
3092
3093	case PPC_PTRACE_GETHWDBGINFO: {
3094		struct ppc_debug_info dbginfo;
3095
3096		dbginfo.version = 1;
3097#ifdef CONFIG_PPC_ADV_DEBUG_REGS
3098		dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
3099		dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
3100		dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
3101		dbginfo.data_bp_alignment = 4;
3102		dbginfo.sizeof_condition = 4;
3103		dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
3104				   PPC_DEBUG_FEATURE_INSN_BP_MASK;
3105#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
3106		dbginfo.features |=
3107				   PPC_DEBUG_FEATURE_DATA_BP_RANGE |
3108				   PPC_DEBUG_FEATURE_DATA_BP_MASK;
3109#endif
3110#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
3111		dbginfo.num_instruction_bps = 0;
3112		dbginfo.num_data_bps = 1;
3113		dbginfo.num_condition_regs = 0;
3114#ifdef CONFIG_PPC64
3115		dbginfo.data_bp_alignment = 8;
3116#else
3117		dbginfo.data_bp_alignment = 4;
3118#endif
3119		dbginfo.sizeof_condition = 0;
3120#ifdef CONFIG_HAVE_HW_BREAKPOINT
3121		dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
3122		if (cpu_has_feature(CPU_FTR_DAWR))
3123			dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
3124#else
3125		dbginfo.features = 0;
3126#endif /* CONFIG_HAVE_HW_BREAKPOINT */
3127#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
3128
3129		if (!access_ok(VERIFY_WRITE, datavp,
3130			       sizeof(struct ppc_debug_info)))
3131			return -EFAULT;
3132		ret = __copy_to_user(datavp, &dbginfo,
3133				     sizeof(struct ppc_debug_info)) ?
3134		      -EFAULT : 0;
3135		break;
3136	}
3137
3138	case PPC_PTRACE_SETHWDEBUG: {
3139		struct ppc_hw_breakpoint bp_info;
3140
3141		if (!access_ok(VERIFY_READ, datavp,
3142			       sizeof(struct ppc_hw_breakpoint)))
3143			return -EFAULT;
3144		ret = __copy_from_user(&bp_info, datavp,
3145				       sizeof(struct ppc_hw_breakpoint)) ?
3146		      -EFAULT : 0;
3147		if (!ret)
3148			ret = ppc_set_hwdebug(child, &bp_info);
3149		break;
3150	}
3151
3152	case PPC_PTRACE_DELHWDEBUG: {
3153		ret = ppc_del_hwdebug(child, data);
3154		break;
3155	}
3156
3157	case PTRACE_GET_DEBUGREG: {
3158#ifndef CONFIG_PPC_ADV_DEBUG_REGS
3159		unsigned long dabr_fake;
3160#endif
3161		ret = -EINVAL;
3162		/* We only support one DABR and no IABRS at the moment */
3163		if (addr > 0)
3164			break;
3165#ifdef CONFIG_PPC_ADV_DEBUG_REGS
3166		ret = put_user(child->thread.debug.dac1, datalp);
3167#else
3168		dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
3169			     (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
3170		ret = put_user(dabr_fake, datalp);
3171#endif
3172		break;
3173	}
3174
3175	case PTRACE_SET_DEBUGREG:
3176		ret = ptrace_set_debugreg(child, addr, data);
3177		break;
3178
3179#ifdef CONFIG_PPC64
3180	case PTRACE_GETREGS64:
3181#endif
3182	case PTRACE_GETREGS:	/* Get all pt_regs from the child. */
3183		return copy_regset_to_user(child, &user_ppc_native_view,
3184					   REGSET_GPR,
3185					   0, sizeof(struct pt_regs),
3186					   datavp);
3187
3188#ifdef CONFIG_PPC64
3189	case PTRACE_SETREGS64:
3190#endif
3191	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
3192		return copy_regset_from_user(child, &user_ppc_native_view,
3193					     REGSET_GPR,
3194					     0, sizeof(struct pt_regs),
3195					     datavp);
3196
3197	case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
3198		return copy_regset_to_user(child, &user_ppc_native_view,
3199					   REGSET_FPR,
3200					   0, sizeof(elf_fpregset_t),
3201					   datavp);
3202
3203	case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
3204		return copy_regset_from_user(child, &user_ppc_native_view,
3205					     REGSET_FPR,
3206					     0, sizeof(elf_fpregset_t),
3207					     datavp);
3208
3209#ifdef CONFIG_ALTIVEC
3210	case PTRACE_GETVRREGS:
3211		return copy_regset_to_user(child, &user_ppc_native_view,
3212					   REGSET_VMX,
3213					   0, (33 * sizeof(vector128) +
3214					       sizeof(u32)),
3215					   datavp);
3216
3217	case PTRACE_SETVRREGS:
3218		return copy_regset_from_user(child, &user_ppc_native_view,
3219					     REGSET_VMX,
3220					     0, (33 * sizeof(vector128) +
3221						 sizeof(u32)),
3222					     datavp);
3223#endif
3224#ifdef CONFIG_VSX
3225	case PTRACE_GETVSRREGS:
3226		return copy_regset_to_user(child, &user_ppc_native_view,
3227					   REGSET_VSX,
3228					   0, 32 * sizeof(double),
3229					   datavp);
3230
3231	case PTRACE_SETVSRREGS:
3232		return copy_regset_from_user(child, &user_ppc_native_view,
3233					     REGSET_VSX,
3234					     0, 32 * sizeof(double),
3235					     datavp);
3236#endif
3237#ifdef CONFIG_SPE
3238	case PTRACE_GETEVRREGS:
3239		/* Get the child spe register state. */
3240		return copy_regset_to_user(child, &user_ppc_native_view,
3241					   REGSET_SPE, 0, 35 * sizeof(u32),
3242					   datavp);
3243
3244	case PTRACE_SETEVRREGS:
3245		/* Set the child spe register state. */
3246		return copy_regset_from_user(child, &user_ppc_native_view,
3247					     REGSET_SPE, 0, 35 * sizeof(u32),
3248					     datavp);
3249#endif
3250
3251	default:
3252		ret = ptrace_request(child, request, addr, data);
3253		break;
3254	}
3255	return ret;
3256}
3257
3258#ifdef CONFIG_SECCOMP
3259static int do_seccomp(struct pt_regs *regs)
3260{
3261	if (!test_thread_flag(TIF_SECCOMP))
3262		return 0;
3263
3264	/*
3265	 * The ABI we present to seccomp tracers is that r3 contains
3266	 * the syscall return value and orig_gpr3 contains the first
3267	 * syscall parameter. This is different to the ptrace ABI where
3268	 * both r3 and orig_gpr3 contain the first syscall parameter.
3269	 */
3270	regs->gpr[3] = -ENOSYS;
3271
3272	/*
3273	 * We use the __ version here because we have already checked
3274	 * TIF_SECCOMP. If this fails, there is nothing left to do, we
3275	 * have already loaded -ENOSYS into r3, or seccomp has put
3276	 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3277	 */
3278	if (__secure_computing(NULL))
3279		return -1;
3280
3281	/*
3282	 * The syscall was allowed by seccomp, restore the register
3283	 * state to what audit expects.
3284	 * Note that we use orig_gpr3, which means a seccomp tracer can
3285	 * modify the first syscall parameter (in orig_gpr3) and also
3286	 * allow the syscall to proceed.
3287	 */
3288	regs->gpr[3] = regs->orig_gpr3;
3289
3290	return 0;
3291}
3292#else
3293static inline int do_seccomp(struct pt_regs *regs) { return 0; }
3294#endif /* CONFIG_SECCOMP */
3295
3296/**
3297 * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3298 * @regs: the pt_regs of the task to trace (current)
3299 *
3300 * Performs various types of tracing on syscall entry. This includes seccomp,
3301 * ptrace, syscall tracepoints and audit.
3302 *
3303 * The pt_regs are potentially visible to userspace via ptrace, so their
3304 * contents is ABI.
3305 *
3306 * One or more of the tracers may modify the contents of pt_regs, in particular
3307 * to modify arguments or even the syscall number itself.
3308 *
3309 * It's also possible that a tracer can choose to reject the system call. In
3310 * that case this function will return an illegal syscall number, and will put
3311 * an appropriate return value in regs->r3.
3312 *
3313 * Return: the (possibly changed) syscall number.
3314 */
3315long do_syscall_trace_enter(struct pt_regs *regs)
3316{
3317	user_exit();
3318
3319	/*
3320	 * The tracer may decide to abort the syscall, if so tracehook
3321	 * will return !0. Note that the tracer may also just change
3322	 * regs->gpr[0] to an invalid syscall number, that is handled
3323	 * below on the exit path.
3324	 */
3325	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
3326	    tracehook_report_syscall_entry(regs))
3327		goto skip;
3328
3329	/* Run seccomp after ptrace; allow it to set gpr[3]. */
3330	if (do_seccomp(regs))
3331		return -1;
3332
3333	/* Avoid trace and audit when syscall is invalid. */
3334	if (regs->gpr[0] >= NR_syscalls)
3335		goto skip;
3336
3337	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3338		trace_sys_enter(regs, regs->gpr[0]);
3339
3340#ifdef CONFIG_PPC64
3341	if (!is_32bit_task())
3342		audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
3343				    regs->gpr[5], regs->gpr[6]);
3344	else
3345#endif
3346		audit_syscall_entry(regs->gpr[0],
3347				    regs->gpr[3] & 0xffffffff,
3348				    regs->gpr[4] & 0xffffffff,
3349				    regs->gpr[5] & 0xffffffff,
3350				    regs->gpr[6] & 0xffffffff);
3351
3352	/* Return the possibly modified but valid syscall number */
3353	return regs->gpr[0];
3354
3355skip:
3356	/*
3357	 * If we are aborting explicitly, or if the syscall number is
3358	 * now invalid, set the return value to -ENOSYS.
3359	 */
3360	regs->gpr[3] = -ENOSYS;
3361	return -1;
3362}
3363
3364void do_syscall_trace_leave(struct pt_regs *regs)
3365{
3366	int step;
3367
3368	audit_syscall_exit(regs);
3369
3370	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3371		trace_sys_exit(regs, regs->result);
3372
3373	step = test_thread_flag(TIF_SINGLESTEP);
3374	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
3375		tracehook_report_syscall_exit(regs, step);
3376
3377	user_enter();
3378}