/arch/sh/kernel/ptrace_32.c

https://gitlab.com/teobaluta/opw · C · 535 lines · 421 code · 71 blank · 43 comment · 55 complexity · 0a3ebd5c9ceb44765f6eba9be4fc29e2 MD5 · raw file

  1. /*
  2. * SuperH process tracing
  3. *
  4. * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
  5. * Copyright (C) 2002 - 2009 Paul Mundt
  6. *
  7. * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/sched.h>
  15. #include <linux/mm.h>
  16. #include <linux/smp.h>
  17. #include <linux/errno.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/user.h>
  20. #include <linux/security.h>
  21. #include <linux/signal.h>
  22. #include <linux/io.h>
  23. #include <linux/audit.h>
  24. #include <linux/seccomp.h>
  25. #include <linux/tracehook.h>
  26. #include <linux/elf.h>
  27. #include <linux/regset.h>
  28. #include <linux/hw_breakpoint.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/pgtable.h>
  31. #include <asm/processor.h>
  32. #include <asm/mmu_context.h>
  33. #include <asm/syscalls.h>
  34. #include <asm/fpu.h>
  35. #define CREATE_TRACE_POINTS
  36. #include <trace/events/syscalls.h>
  37. /*
  38. * This routine will get a word off of the process kernel stack.
  39. */
  40. static inline int get_stack_long(struct task_struct *task, int offset)
  41. {
  42. unsigned char *stack;
  43. stack = (unsigned char *)task_pt_regs(task);
  44. stack += offset;
  45. return (*((int *)stack));
  46. }
  47. /*
  48. * This routine will put a word on the process kernel stack.
  49. */
  50. static inline int put_stack_long(struct task_struct *task, int offset,
  51. unsigned long data)
  52. {
  53. unsigned char *stack;
  54. stack = (unsigned char *)task_pt_regs(task);
  55. stack += offset;
  56. *(unsigned long *) stack = data;
  57. return 0;
  58. }
  59. void ptrace_triggered(struct perf_event *bp,
  60. struct perf_sample_data *data, struct pt_regs *regs)
  61. {
  62. struct perf_event_attr attr;
  63. /*
  64. * Disable the breakpoint request here since ptrace has defined a
  65. * one-shot behaviour for breakpoint exceptions.
  66. */
  67. attr = bp->attr;
  68. attr.disabled = true;
  69. modify_user_hw_breakpoint(bp, &attr);
  70. }
  71. static int set_single_step(struct task_struct *tsk, unsigned long addr)
  72. {
  73. struct thread_struct *thread = &tsk->thread;
  74. struct perf_event *bp;
  75. struct perf_event_attr attr;
  76. bp = thread->ptrace_bps[0];
  77. if (!bp) {
  78. ptrace_breakpoint_init(&attr);
  79. attr.bp_addr = addr;
  80. attr.bp_len = HW_BREAKPOINT_LEN_2;
  81. attr.bp_type = HW_BREAKPOINT_R;
  82. bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
  83. NULL, tsk);
  84. if (IS_ERR(bp))
  85. return PTR_ERR(bp);
  86. thread->ptrace_bps[0] = bp;
  87. } else {
  88. int err;
  89. attr = bp->attr;
  90. attr.bp_addr = addr;
  91. /* reenable breakpoint */
  92. attr.disabled = false;
  93. err = modify_user_hw_breakpoint(bp, &attr);
  94. if (unlikely(err))
  95. return err;
  96. }
  97. return 0;
  98. }
  99. void user_enable_single_step(struct task_struct *child)
  100. {
  101. unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
  102. set_tsk_thread_flag(child, TIF_SINGLESTEP);
  103. set_single_step(child, pc);
  104. }
  105. void user_disable_single_step(struct task_struct *child)
  106. {
  107. clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  108. }
  109. /*
  110. * Called by kernel/ptrace.c when detaching..
  111. *
  112. * Make sure single step bits etc are not set.
  113. */
  114. void ptrace_disable(struct task_struct *child)
  115. {
  116. user_disable_single_step(child);
  117. }
  118. static int genregs_get(struct task_struct *target,
  119. const struct user_regset *regset,
  120. unsigned int pos, unsigned int count,
  121. void *kbuf, void __user *ubuf)
  122. {
  123. const struct pt_regs *regs = task_pt_regs(target);
  124. int ret;
  125. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  126. regs->regs,
  127. 0, 16 * sizeof(unsigned long));
  128. if (!ret)
  129. /* PC, PR, SR, GBR, MACH, MACL, TRA */
  130. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  131. &regs->pc,
  132. offsetof(struct pt_regs, pc),
  133. sizeof(struct pt_regs));
  134. if (!ret)
  135. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  136. sizeof(struct pt_regs), -1);
  137. return ret;
  138. }
  139. static int genregs_set(struct task_struct *target,
  140. const struct user_regset *regset,
  141. unsigned int pos, unsigned int count,
  142. const void *kbuf, const void __user *ubuf)
  143. {
  144. struct pt_regs *regs = task_pt_regs(target);
  145. int ret;
  146. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  147. regs->regs,
  148. 0, 16 * sizeof(unsigned long));
  149. if (!ret && count > 0)
  150. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  151. &regs->pc,
  152. offsetof(struct pt_regs, pc),
  153. sizeof(struct pt_regs));
  154. if (!ret)
  155. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  156. sizeof(struct pt_regs), -1);
  157. return ret;
  158. }
  159. #ifdef CONFIG_SH_FPU
  160. int fpregs_get(struct task_struct *target,
  161. const struct user_regset *regset,
  162. unsigned int pos, unsigned int count,
  163. void *kbuf, void __user *ubuf)
  164. {
  165. int ret;
  166. ret = init_fpu(target);
  167. if (ret)
  168. return ret;
  169. if ((boot_cpu_data.flags & CPU_HAS_FPU))
  170. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  171. &target->thread.xstate->hardfpu, 0, -1);
  172. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  173. &target->thread.xstate->softfpu, 0, -1);
  174. }
  175. static int fpregs_set(struct task_struct *target,
  176. const struct user_regset *regset,
  177. unsigned int pos, unsigned int count,
  178. const void *kbuf, const void __user *ubuf)
  179. {
  180. int ret;
  181. ret = init_fpu(target);
  182. if (ret)
  183. return ret;
  184. set_stopped_child_used_math(target);
  185. if ((boot_cpu_data.flags & CPU_HAS_FPU))
  186. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  187. &target->thread.xstate->hardfpu, 0, -1);
  188. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  189. &target->thread.xstate->softfpu, 0, -1);
  190. }
  191. static int fpregs_active(struct task_struct *target,
  192. const struct user_regset *regset)
  193. {
  194. return tsk_used_math(target) ? regset->n : 0;
  195. }
  196. #endif
  197. #ifdef CONFIG_SH_DSP
  198. static int dspregs_get(struct task_struct *target,
  199. const struct user_regset *regset,
  200. unsigned int pos, unsigned int count,
  201. void *kbuf, void __user *ubuf)
  202. {
  203. const struct pt_dspregs *regs =
  204. (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
  205. int ret;
  206. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
  207. 0, sizeof(struct pt_dspregs));
  208. if (!ret)
  209. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  210. sizeof(struct pt_dspregs), -1);
  211. return ret;
  212. }
  213. static int dspregs_set(struct task_struct *target,
  214. const struct user_regset *regset,
  215. unsigned int pos, unsigned int count,
  216. const void *kbuf, const void __user *ubuf)
  217. {
  218. struct pt_dspregs *regs =
  219. (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
  220. int ret;
  221. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
  222. 0, sizeof(struct pt_dspregs));
  223. if (!ret)
  224. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  225. sizeof(struct pt_dspregs), -1);
  226. return ret;
  227. }
  228. static int dspregs_active(struct task_struct *target,
  229. const struct user_regset *regset)
  230. {
  231. struct pt_regs *regs = task_pt_regs(target);
  232. return regs->sr & SR_DSP ? regset->n : 0;
  233. }
  234. #endif
  235. const struct pt_regs_offset regoffset_table[] = {
  236. REGS_OFFSET_NAME(0),
  237. REGS_OFFSET_NAME(1),
  238. REGS_OFFSET_NAME(2),
  239. REGS_OFFSET_NAME(3),
  240. REGS_OFFSET_NAME(4),
  241. REGS_OFFSET_NAME(5),
  242. REGS_OFFSET_NAME(6),
  243. REGS_OFFSET_NAME(7),
  244. REGS_OFFSET_NAME(8),
  245. REGS_OFFSET_NAME(9),
  246. REGS_OFFSET_NAME(10),
  247. REGS_OFFSET_NAME(11),
  248. REGS_OFFSET_NAME(12),
  249. REGS_OFFSET_NAME(13),
  250. REGS_OFFSET_NAME(14),
  251. REGS_OFFSET_NAME(15),
  252. REG_OFFSET_NAME(pc),
  253. REG_OFFSET_NAME(pr),
  254. REG_OFFSET_NAME(sr),
  255. REG_OFFSET_NAME(gbr),
  256. REG_OFFSET_NAME(mach),
  257. REG_OFFSET_NAME(macl),
  258. REG_OFFSET_NAME(tra),
  259. REG_OFFSET_END,
  260. };
  261. /*
  262. * These are our native regset flavours.
  263. */
  264. enum sh_regset {
  265. REGSET_GENERAL,
  266. #ifdef CONFIG_SH_FPU
  267. REGSET_FPU,
  268. #endif
  269. #ifdef CONFIG_SH_DSP
  270. REGSET_DSP,
  271. #endif
  272. };
  273. static const struct user_regset sh_regsets[] = {
  274. /*
  275. * Format is:
  276. * R0 --> R15
  277. * PC, PR, SR, GBR, MACH, MACL, TRA
  278. */
  279. [REGSET_GENERAL] = {
  280. .core_note_type = NT_PRSTATUS,
  281. .n = ELF_NGREG,
  282. .size = sizeof(long),
  283. .align = sizeof(long),
  284. .get = genregs_get,
  285. .set = genregs_set,
  286. },
  287. #ifdef CONFIG_SH_FPU
  288. [REGSET_FPU] = {
  289. .core_note_type = NT_PRFPREG,
  290. .n = sizeof(struct user_fpu_struct) / sizeof(long),
  291. .size = sizeof(long),
  292. .align = sizeof(long),
  293. .get = fpregs_get,
  294. .set = fpregs_set,
  295. .active = fpregs_active,
  296. },
  297. #endif
  298. #ifdef CONFIG_SH_DSP
  299. [REGSET_DSP] = {
  300. .n = sizeof(struct pt_dspregs) / sizeof(long),
  301. .size = sizeof(long),
  302. .align = sizeof(long),
  303. .get = dspregs_get,
  304. .set = dspregs_set,
  305. .active = dspregs_active,
  306. },
  307. #endif
  308. };
  309. static const struct user_regset_view user_sh_native_view = {
  310. .name = "sh",
  311. .e_machine = EM_SH,
  312. .regsets = sh_regsets,
  313. .n = ARRAY_SIZE(sh_regsets),
  314. };
  315. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  316. {
  317. return &user_sh_native_view;
  318. }
  319. long arch_ptrace(struct task_struct *child, long request,
  320. unsigned long addr, unsigned long data)
  321. {
  322. unsigned long __user *datap = (unsigned long __user *)data;
  323. int ret;
  324. switch (request) {
  325. /* read the word at location addr in the USER area. */
  326. case PTRACE_PEEKUSR: {
  327. unsigned long tmp;
  328. ret = -EIO;
  329. if ((addr & 3) || addr < 0 ||
  330. addr > sizeof(struct user) - 3)
  331. break;
  332. if (addr < sizeof(struct pt_regs))
  333. tmp = get_stack_long(child, addr);
  334. else if (addr >= offsetof(struct user, fpu) &&
  335. addr < offsetof(struct user, u_fpvalid)) {
  336. if (!tsk_used_math(child)) {
  337. if (addr == offsetof(struct user, fpu.fpscr))
  338. tmp = FPSCR_INIT;
  339. else
  340. tmp = 0;
  341. } else {
  342. unsigned long index;
  343. ret = init_fpu(child);
  344. if (ret)
  345. break;
  346. index = addr - offsetof(struct user, fpu);
  347. tmp = ((unsigned long *)child->thread.xstate)
  348. [index >> 2];
  349. }
  350. } else if (addr == offsetof(struct user, u_fpvalid))
  351. tmp = !!tsk_used_math(child);
  352. else if (addr == PT_TEXT_ADDR)
  353. tmp = child->mm->start_code;
  354. else if (addr == PT_DATA_ADDR)
  355. tmp = child->mm->start_data;
  356. else if (addr == PT_TEXT_END_ADDR)
  357. tmp = child->mm->end_code;
  358. else if (addr == PT_TEXT_LEN)
  359. tmp = child->mm->end_code - child->mm->start_code;
  360. else
  361. tmp = 0;
  362. ret = put_user(tmp, datap);
  363. break;
  364. }
  365. case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
  366. ret = -EIO;
  367. if ((addr & 3) || addr < 0 ||
  368. addr > sizeof(struct user) - 3)
  369. break;
  370. if (addr < sizeof(struct pt_regs))
  371. ret = put_stack_long(child, addr, data);
  372. else if (addr >= offsetof(struct user, fpu) &&
  373. addr < offsetof(struct user, u_fpvalid)) {
  374. unsigned long index;
  375. ret = init_fpu(child);
  376. if (ret)
  377. break;
  378. index = addr - offsetof(struct user, fpu);
  379. set_stopped_child_used_math(child);
  380. ((unsigned long *)child->thread.xstate)
  381. [index >> 2] = data;
  382. ret = 0;
  383. } else if (addr == offsetof(struct user, u_fpvalid)) {
  384. conditional_stopped_child_used_math(data, child);
  385. ret = 0;
  386. }
  387. break;
  388. case PTRACE_GETREGS:
  389. return copy_regset_to_user(child, &user_sh_native_view,
  390. REGSET_GENERAL,
  391. 0, sizeof(struct pt_regs),
  392. datap);
  393. case PTRACE_SETREGS:
  394. return copy_regset_from_user(child, &user_sh_native_view,
  395. REGSET_GENERAL,
  396. 0, sizeof(struct pt_regs),
  397. datap);
  398. #ifdef CONFIG_SH_FPU
  399. case PTRACE_GETFPREGS:
  400. return copy_regset_to_user(child, &user_sh_native_view,
  401. REGSET_FPU,
  402. 0, sizeof(struct user_fpu_struct),
  403. datap);
  404. case PTRACE_SETFPREGS:
  405. return copy_regset_from_user(child, &user_sh_native_view,
  406. REGSET_FPU,
  407. 0, sizeof(struct user_fpu_struct),
  408. datap);
  409. #endif
  410. #ifdef CONFIG_SH_DSP
  411. case PTRACE_GETDSPREGS:
  412. return copy_regset_to_user(child, &user_sh_native_view,
  413. REGSET_DSP,
  414. 0, sizeof(struct pt_dspregs),
  415. datap);
  416. case PTRACE_SETDSPREGS:
  417. return copy_regset_from_user(child, &user_sh_native_view,
  418. REGSET_DSP,
  419. 0, sizeof(struct pt_dspregs),
  420. datap);
  421. #endif
  422. default:
  423. ret = ptrace_request(child, request, addr, data);
  424. break;
  425. }
  426. return ret;
  427. }
  428. static inline int audit_arch(void)
  429. {
  430. int arch = EM_SH;
  431. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  432. arch |= __AUDIT_ARCH_LE;
  433. #endif
  434. return arch;
  435. }
  436. asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
  437. {
  438. long ret = 0;
  439. secure_computing_strict(regs->regs[0]);
  440. if (test_thread_flag(TIF_SYSCALL_TRACE) &&
  441. tracehook_report_syscall_entry(regs))
  442. /*
  443. * Tracing decided this syscall should not happen.
  444. * We'll return a bogus call number to get an ENOSYS
  445. * error, but leave the original number in regs->regs[0].
  446. */
  447. ret = -1L;
  448. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  449. trace_sys_enter(regs, regs->regs[0]);
  450. audit_syscall_entry(audit_arch(), regs->regs[3],
  451. regs->regs[4], regs->regs[5],
  452. regs->regs[6], regs->regs[7]);
  453. return ret ?: regs->regs[0];
  454. }
  455. asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
  456. {
  457. int step;
  458. audit_syscall_exit(regs);
  459. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  460. trace_sys_exit(regs, regs->regs[0]);
  461. step = test_thread_flag(TIF_SINGLESTEP);
  462. if (step || test_thread_flag(TIF_SYSCALL_TRACE))
  463. tracehook_report_syscall_exit(regs, step);
  464. }