PageRenderTime 70ms CodeModel.GetById 29ms RepoModel.GetById 1ms app.codeStats 0ms

/gdb-linaro-dev/gdb/gdbserver/linux-x86-low.c

https://bitbucket.org/codefirex/toolchain_gdb
C | 3217 lines | 2567 code | 398 blank | 252 comment | 216 complexity | 1246ff3f5ff83516c045e12523599d66 MD5 | raw file
Possible License(s): BSD-3-Clause, GPL-2.0, LGPL-2.0, GPL-3.0, LGPL-2.1

Large files files are truncated, but you can click here to view the full file

  1. /* GNU/Linux/x86-64 specific low level interface, for the remote server
  2. for GDB.
  3. Copyright (C) 2002-2013 Free Software Foundation, Inc.
  4. This file is part of GDB.
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 3 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. #include <stddef.h>
  16. #include <signal.h>
  17. #include <limits.h>
  18. #include <inttypes.h>
  19. #include "server.h"
  20. #include "linux-low.h"
  21. #include "i387-fp.h"
  22. #include "i386-low.h"
  23. #include "i386-xstate.h"
  24. #include "elf/common.h"
  25. #include "gdb_proc_service.h"
  26. #include "agent.h"
  27. /* Defined in auto-generated file i386-linux.c. */
  28. void init_registers_i386_linux (void);
  29. /* Defined in auto-generated file amd64-linux.c. */
  30. void init_registers_amd64_linux (void);
  31. /* Defined in auto-generated file i386-avx-linux.c. */
  32. void init_registers_i386_avx_linux (void);
  33. /* Defined in auto-generated file amd64-avx-linux.c. */
  34. void init_registers_amd64_avx_linux (void);
  35. /* Defined in auto-generated file i386-mmx-linux.c. */
  36. void init_registers_i386_mmx_linux (void);
  37. /* Defined in auto-generated file x32-linux.c. */
  38. void init_registers_x32_linux (void);
  39. /* Defined in auto-generated file x32-avx-linux.c. */
  40. void init_registers_x32_avx_linux (void);
  41. static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
  42. static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
  43. /* Backward compatibility for gdb without XML support. */
  44. static const char *xmltarget_i386_linux_no_xml = "@<target>\
  45. <architecture>i386</architecture>\
  46. <osabi>GNU/Linux</osabi>\
  47. </target>";
  48. #ifdef __x86_64__
  49. static const char *xmltarget_amd64_linux_no_xml = "@<target>\
  50. <architecture>i386:x86-64</architecture>\
  51. <osabi>GNU/Linux</osabi>\
  52. </target>";
  53. #endif
  54. #include <sys/reg.h>
  55. #include <sys/procfs.h>
  56. #include <sys/ptrace.h>
  57. #include <sys/uio.h>
  58. #ifndef PTRACE_GETREGSET
  59. #define PTRACE_GETREGSET 0x4204
  60. #endif
  61. #ifndef PTRACE_SETREGSET
  62. #define PTRACE_SETREGSET 0x4205
  63. #endif
  64. #ifndef PTRACE_GET_THREAD_AREA
  65. #define PTRACE_GET_THREAD_AREA 25
  66. #endif
  67. /* This definition comes from prctl.h, but some kernels may not have it. */
  68. #ifndef PTRACE_ARCH_PRCTL
  69. #define PTRACE_ARCH_PRCTL 30
  70. #endif
  71. /* The following definitions come from prctl.h, but may be absent
  72. for certain configurations. */
  73. #ifndef ARCH_GET_FS
  74. #define ARCH_SET_GS 0x1001
  75. #define ARCH_SET_FS 0x1002
  76. #define ARCH_GET_FS 0x1003
  77. #define ARCH_GET_GS 0x1004
  78. #endif
  79. /* Per-process arch-specific data we want to keep. */
  80. struct arch_process_info
  81. {
  82. struct i386_debug_reg_state debug_reg_state;
  83. };
  84. /* Per-thread arch-specific data we want to keep. */
  85. struct arch_lwp_info
  86. {
  87. /* Non-zero if our copy differs from what's recorded in the thread. */
  88. int debug_registers_changed;
  89. };
  90. #ifdef __x86_64__
  91. /* Mapping between the general-purpose registers in `struct user'
  92. format and GDB's register array layout.
  93. Note that the transfer layout uses 64-bit regs. */
  94. static /*const*/ int i386_regmap[] =
  95. {
  96. RAX * 8, RCX * 8, RDX * 8, RBX * 8,
  97. RSP * 8, RBP * 8, RSI * 8, RDI * 8,
  98. RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
  99. DS * 8, ES * 8, FS * 8, GS * 8
  100. };
  101. #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
  102. /* So code below doesn't have to care, i386 or amd64. */
  103. #define ORIG_EAX ORIG_RAX
  104. static const int x86_64_regmap[] =
  105. {
  106. RAX * 8, RBX * 8, RCX * 8, RDX * 8,
  107. RSI * 8, RDI * 8, RBP * 8, RSP * 8,
  108. R8 * 8, R9 * 8, R10 * 8, R11 * 8,
  109. R12 * 8, R13 * 8, R14 * 8, R15 * 8,
  110. RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
  111. DS * 8, ES * 8, FS * 8, GS * 8,
  112. -1, -1, -1, -1, -1, -1, -1, -1,
  113. -1, -1, -1, -1, -1, -1, -1, -1,
  114. -1, -1, -1, -1, -1, -1, -1, -1,
  115. -1, -1, -1, -1, -1, -1, -1, -1, -1,
  116. ORIG_RAX * 8
  117. };
  118. #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
  119. #else /* ! __x86_64__ */
  120. /* Mapping between the general-purpose registers in `struct user'
  121. format and GDB's register array layout. */
  122. static /*const*/ int i386_regmap[] =
  123. {
  124. EAX * 4, ECX * 4, EDX * 4, EBX * 4,
  125. UESP * 4, EBP * 4, ESI * 4, EDI * 4,
  126. EIP * 4, EFL * 4, CS * 4, SS * 4,
  127. DS * 4, ES * 4, FS * 4, GS * 4
  128. };
  129. #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
  130. #endif
  131. /* Called by libthread_db. */
  132. ps_err_e
  133. ps_get_thread_area (const struct ps_prochandle *ph,
  134. lwpid_t lwpid, int idx, void **base)
  135. {
  136. #ifdef __x86_64__
  137. int use_64bit = register_size (0) == 8;
  138. if (use_64bit)
  139. {
  140. switch (idx)
  141. {
  142. case FS:
  143. if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
  144. return PS_OK;
  145. break;
  146. case GS:
  147. if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
  148. return PS_OK;
  149. break;
  150. default:
  151. return PS_BADADDR;
  152. }
  153. return PS_ERR;
  154. }
  155. #endif
  156. {
  157. unsigned int desc[4];
  158. if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
  159. (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
  160. return PS_ERR;
  161. *(int *)base = desc[1];
  162. return PS_OK;
  163. }
  164. }
  165. /* Get the thread area address. This is used to recognize which
  166. thread is which when tracing with the in-process agent library. We
  167. don't read anything from the address, and treat it as opaque; it's
  168. the address itself that we assume is unique per-thread. */
  169. static int
  170. x86_get_thread_area (int lwpid, CORE_ADDR *addr)
  171. {
  172. #ifdef __x86_64__
  173. int use_64bit = register_size (0) == 8;
  174. if (use_64bit)
  175. {
  176. void *base;
  177. if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
  178. {
  179. *addr = (CORE_ADDR) (uintptr_t) base;
  180. return 0;
  181. }
  182. return -1;
  183. }
  184. #endif
  185. {
  186. struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
  187. struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
  188. unsigned int desc[4];
  189. ULONGEST gs = 0;
  190. const int reg_thread_area = 3; /* bits to scale down register value. */
  191. int idx;
  192. collect_register_by_name (regcache, "gs", &gs);
  193. idx = gs >> reg_thread_area;
  194. if (ptrace (PTRACE_GET_THREAD_AREA,
  195. lwpid_of (lwp),
  196. (void *) (long) idx, (unsigned long) &desc) < 0)
  197. return -1;
  198. *addr = desc[1];
  199. return 0;
  200. }
  201. }
  202. static int
  203. i386_cannot_store_register (int regno)
  204. {
  205. return regno >= I386_NUM_REGS;
  206. }
  207. static int
  208. i386_cannot_fetch_register (int regno)
  209. {
  210. return regno >= I386_NUM_REGS;
  211. }
  212. static void
  213. x86_fill_gregset (struct regcache *regcache, void *buf)
  214. {
  215. int i;
  216. #ifdef __x86_64__
  217. if (register_size (0) == 8)
  218. {
  219. for (i = 0; i < X86_64_NUM_REGS; i++)
  220. if (x86_64_regmap[i] != -1)
  221. collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
  222. return;
  223. }
  224. #endif
  225. for (i = 0; i < I386_NUM_REGS; i++)
  226. collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
  227. collect_register_by_name (regcache, "orig_eax",
  228. ((char *) buf) + ORIG_EAX * 4);
  229. }
  230. static void
  231. x86_store_gregset (struct regcache *regcache, const void *buf)
  232. {
  233. int i;
  234. #ifdef __x86_64__
  235. if (register_size (0) == 8)
  236. {
  237. for (i = 0; i < X86_64_NUM_REGS; i++)
  238. if (x86_64_regmap[i] != -1)
  239. supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
  240. return;
  241. }
  242. #endif
  243. for (i = 0; i < I386_NUM_REGS; i++)
  244. supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
  245. supply_register_by_name (regcache, "orig_eax",
  246. ((char *) buf) + ORIG_EAX * 4);
  247. }
  248. static void
  249. x86_fill_fpregset (struct regcache *regcache, void *buf)
  250. {
  251. #ifdef __x86_64__
  252. i387_cache_to_fxsave (regcache, buf);
  253. #else
  254. i387_cache_to_fsave (regcache, buf);
  255. #endif
  256. }
  257. static void
  258. x86_store_fpregset (struct regcache *regcache, const void *buf)
  259. {
  260. #ifdef __x86_64__
  261. i387_fxsave_to_cache (regcache, buf);
  262. #else
  263. i387_fsave_to_cache (regcache, buf);
  264. #endif
  265. }
  266. #ifndef __x86_64__
  267. static void
  268. x86_fill_fpxregset (struct regcache *regcache, void *buf)
  269. {
  270. i387_cache_to_fxsave (regcache, buf);
  271. }
  272. static void
  273. x86_store_fpxregset (struct regcache *regcache, const void *buf)
  274. {
  275. i387_fxsave_to_cache (regcache, buf);
  276. }
  277. #endif
  278. static void
  279. x86_fill_xstateregset (struct regcache *regcache, void *buf)
  280. {
  281. i387_cache_to_xsave (regcache, buf);
  282. }
  283. static void
  284. x86_store_xstateregset (struct regcache *regcache, const void *buf)
  285. {
  286. i387_xsave_to_cache (regcache, buf);
  287. }
  288. /* ??? The non-biarch i386 case stores all the i387 regs twice.
  289. Once in i387_.*fsave.* and once in i387_.*fxsave.*.
  290. This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
  291. doesn't work. IWBN to avoid the duplication in the case where it
  292. does work. Maybe the arch_setup routine could check whether it works
  293. and update target_regsets accordingly, maybe by moving target_regsets
  294. to linux_target_ops and set the right one there, rather than having to
  295. modify the target_regsets global. */
  296. struct regset_info target_regsets[] =
  297. {
  298. #ifdef HAVE_PTRACE_GETREGS
  299. { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
  300. GENERAL_REGS,
  301. x86_fill_gregset, x86_store_gregset },
  302. { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
  303. EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
  304. # ifndef __x86_64__
  305. # ifdef HAVE_PTRACE_GETFPXREGS
  306. { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
  307. EXTENDED_REGS,
  308. x86_fill_fpxregset, x86_store_fpxregset },
  309. # endif
  310. # endif
  311. { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
  312. FP_REGS,
  313. x86_fill_fpregset, x86_store_fpregset },
  314. #endif /* HAVE_PTRACE_GETREGS */
  315. { 0, 0, 0, -1, -1, NULL, NULL }
  316. };
  317. static CORE_ADDR
  318. x86_get_pc (struct regcache *regcache)
  319. {
  320. int use_64bit = register_size (0) == 8;
  321. if (use_64bit)
  322. {
  323. unsigned long pc;
  324. collect_register_by_name (regcache, "rip", &pc);
  325. return (CORE_ADDR) pc;
  326. }
  327. else
  328. {
  329. unsigned int pc;
  330. collect_register_by_name (regcache, "eip", &pc);
  331. return (CORE_ADDR) pc;
  332. }
  333. }
  334. static void
  335. x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
  336. {
  337. int use_64bit = register_size (0) == 8;
  338. if (use_64bit)
  339. {
  340. unsigned long newpc = pc;
  341. supply_register_by_name (regcache, "rip", &newpc);
  342. }
  343. else
  344. {
  345. unsigned int newpc = pc;
  346. supply_register_by_name (regcache, "eip", &newpc);
  347. }
  348. }
  349. static const unsigned char x86_breakpoint[] = { 0xCC };
  350. #define x86_breakpoint_len 1
  351. static int
  352. x86_breakpoint_at (CORE_ADDR pc)
  353. {
  354. unsigned char c;
  355. (*the_target->read_memory) (pc, &c, 1);
  356. if (c == 0xCC)
  357. return 1;
  358. return 0;
  359. }
  360. /* Support for debug registers. */
  361. static unsigned long
  362. x86_linux_dr_get (ptid_t ptid, int regnum)
  363. {
  364. int tid;
  365. unsigned long value;
  366. tid = ptid_get_lwp (ptid);
  367. errno = 0;
  368. value = ptrace (PTRACE_PEEKUSER, tid,
  369. offsetof (struct user, u_debugreg[regnum]), 0);
  370. if (errno != 0)
  371. error ("Couldn't read debug register");
  372. return value;
  373. }
  374. static void
  375. x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
  376. {
  377. int tid;
  378. tid = ptid_get_lwp (ptid);
  379. errno = 0;
  380. ptrace (PTRACE_POKEUSER, tid,
  381. offsetof (struct user, u_debugreg[regnum]), value);
  382. if (errno != 0)
  383. error ("Couldn't write debug register");
  384. }
  385. static int
  386. update_debug_registers_callback (struct inferior_list_entry *entry,
  387. void *pid_p)
  388. {
  389. struct lwp_info *lwp = (struct lwp_info *) entry;
  390. int pid = *(int *) pid_p;
  391. /* Only update the threads of this process. */
  392. if (pid_of (lwp) == pid)
  393. {
  394. /* The actual update is done later just before resuming the lwp,
  395. we just mark that the registers need updating. */
  396. lwp->arch_private->debug_registers_changed = 1;
  397. /* If the lwp isn't stopped, force it to momentarily pause, so
  398. we can update its debug registers. */
  399. if (!lwp->stopped)
  400. linux_stop_lwp (lwp);
  401. }
  402. return 0;
  403. }
  404. /* Update the inferior's debug register REGNUM from STATE. */
  405. void
  406. i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
  407. {
  408. /* Only update the threads of this process. */
  409. int pid = pid_of (get_thread_lwp (current_inferior));
  410. if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
  411. fatal ("Invalid debug register %d", regnum);
  412. find_inferior (&all_lwps, update_debug_registers_callback, &pid);
  413. }
  414. /* Return the inferior's debug register REGNUM. */
  415. CORE_ADDR
  416. i386_dr_low_get_addr (int regnum)
  417. {
  418. struct lwp_info *lwp = get_thread_lwp (current_inferior);
  419. ptid_t ptid = ptid_of (lwp);
  420. /* DR6 and DR7 are retrieved with some other way. */
  421. gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
  422. return x86_linux_dr_get (ptid, regnum);
  423. }
  424. /* Update the inferior's DR7 debug control register from STATE. */
  425. void
  426. i386_dr_low_set_control (const struct i386_debug_reg_state *state)
  427. {
  428. /* Only update the threads of this process. */
  429. int pid = pid_of (get_thread_lwp (current_inferior));
  430. find_inferior (&all_lwps, update_debug_registers_callback, &pid);
  431. }
  432. /* Return the inferior's DR7 debug control register. */
  433. unsigned
  434. i386_dr_low_get_control (void)
  435. {
  436. struct lwp_info *lwp = get_thread_lwp (current_inferior);
  437. ptid_t ptid = ptid_of (lwp);
  438. return x86_linux_dr_get (ptid, DR_CONTROL);
  439. }
  440. /* Get the value of the DR6 debug status register from the inferior
  441. and record it in STATE. */
  442. unsigned
  443. i386_dr_low_get_status (void)
  444. {
  445. struct lwp_info *lwp = get_thread_lwp (current_inferior);
  446. ptid_t ptid = ptid_of (lwp);
  447. return x86_linux_dr_get (ptid, DR_STATUS);
  448. }
  449. /* Breakpoint/Watchpoint support. */
  450. static int
  451. x86_insert_point (char type, CORE_ADDR addr, int len)
  452. {
  453. struct process_info *proc = current_process ();
  454. switch (type)
  455. {
  456. case '0': /* software-breakpoint */
  457. {
  458. int ret;
  459. ret = prepare_to_access_memory ();
  460. if (ret)
  461. return -1;
  462. ret = set_gdb_breakpoint_at (addr);
  463. done_accessing_memory ();
  464. return ret;
  465. }
  466. case '1': /* hardware-breakpoint */
  467. case '2': /* write watchpoint */
  468. case '3': /* read watchpoint */
  469. case '4': /* access watchpoint */
  470. return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
  471. type, addr, len);
  472. default:
  473. /* Unsupported. */
  474. return 1;
  475. }
  476. }
  477. static int
  478. x86_remove_point (char type, CORE_ADDR addr, int len)
  479. {
  480. struct process_info *proc = current_process ();
  481. switch (type)
  482. {
  483. case '0': /* software-breakpoint */
  484. {
  485. int ret;
  486. ret = prepare_to_access_memory ();
  487. if (ret)
  488. return -1;
  489. ret = delete_gdb_breakpoint_at (addr);
  490. done_accessing_memory ();
  491. return ret;
  492. }
  493. case '1': /* hardware-breakpoint */
  494. case '2': /* write watchpoint */
  495. case '3': /* read watchpoint */
  496. case '4': /* access watchpoint */
  497. return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
  498. type, addr, len);
  499. default:
  500. /* Unsupported. */
  501. return 1;
  502. }
  503. }
  504. static int
  505. x86_stopped_by_watchpoint (void)
  506. {
  507. struct process_info *proc = current_process ();
  508. return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
  509. }
  510. static CORE_ADDR
  511. x86_stopped_data_address (void)
  512. {
  513. struct process_info *proc = current_process ();
  514. CORE_ADDR addr;
  515. if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
  516. &addr))
  517. return addr;
  518. return 0;
  519. }
  520. /* Called when a new process is created. */
  521. static struct arch_process_info *
  522. x86_linux_new_process (void)
  523. {
  524. struct arch_process_info *info = xcalloc (1, sizeof (*info));
  525. i386_low_init_dregs (&info->debug_reg_state);
  526. return info;
  527. }
  528. /* Called when a new thread is detected. */
  529. static struct arch_lwp_info *
  530. x86_linux_new_thread (void)
  531. {
  532. struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
  533. info->debug_registers_changed = 1;
  534. return info;
  535. }
  536. /* Called when resuming a thread.
  537. If the debug regs have changed, update the thread's copies. */
  538. static void
  539. x86_linux_prepare_to_resume (struct lwp_info *lwp)
  540. {
  541. ptid_t ptid = ptid_of (lwp);
  542. int clear_status = 0;
  543. if (lwp->arch_private->debug_registers_changed)
  544. {
  545. int i;
  546. int pid = ptid_get_pid (ptid);
  547. struct process_info *proc = find_process_pid (pid);
  548. struct i386_debug_reg_state *state
  549. = &proc->private->arch_private->debug_reg_state;
  550. for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
  551. if (state->dr_ref_count[i] > 0)
  552. {
  553. x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
  554. /* If we're setting a watchpoint, any change the inferior
  555. had done itself to the debug registers needs to be
  556. discarded, otherwise, i386_low_stopped_data_address can
  557. get confused. */
  558. clear_status = 1;
  559. }
  560. x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
  561. lwp->arch_private->debug_registers_changed = 0;
  562. }
  563. if (clear_status || lwp->stopped_by_watchpoint)
  564. x86_linux_dr_set (ptid, DR_STATUS, 0);
  565. }
  566. /* When GDBSERVER is built as a 64-bit application on linux, the
  567. PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
  568. debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
  569. as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
  570. conversion in-place ourselves. */
  571. /* These types below (compat_*) define a siginfo type that is layout
  572. compatible with the siginfo type exported by the 32-bit userspace
  573. support. */
  574. #ifdef __x86_64__
  575. typedef int compat_int_t;
  576. typedef unsigned int compat_uptr_t;
  577. typedef int compat_time_t;
  578. typedef int compat_timer_t;
  579. typedef int compat_clock_t;
  580. struct compat_timeval
  581. {
  582. compat_time_t tv_sec;
  583. int tv_usec;
  584. };
  585. typedef union compat_sigval
  586. {
  587. compat_int_t sival_int;
  588. compat_uptr_t sival_ptr;
  589. } compat_sigval_t;
  590. typedef struct compat_siginfo
  591. {
  592. int si_signo;
  593. int si_errno;
  594. int si_code;
  595. union
  596. {
  597. int _pad[((128 / sizeof (int)) - 3)];
  598. /* kill() */
  599. struct
  600. {
  601. unsigned int _pid;
  602. unsigned int _uid;
  603. } _kill;
  604. /* POSIX.1b timers */
  605. struct
  606. {
  607. compat_timer_t _tid;
  608. int _overrun;
  609. compat_sigval_t _sigval;
  610. } _timer;
  611. /* POSIX.1b signals */
  612. struct
  613. {
  614. unsigned int _pid;
  615. unsigned int _uid;
  616. compat_sigval_t _sigval;
  617. } _rt;
  618. /* SIGCHLD */
  619. struct
  620. {
  621. unsigned int _pid;
  622. unsigned int _uid;
  623. int _status;
  624. compat_clock_t _utime;
  625. compat_clock_t _stime;
  626. } _sigchld;
  627. /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
  628. struct
  629. {
  630. unsigned int _addr;
  631. } _sigfault;
  632. /* SIGPOLL */
  633. struct
  634. {
  635. int _band;
  636. int _fd;
  637. } _sigpoll;
  638. } _sifields;
  639. } compat_siginfo_t;
  640. /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
  641. typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
  642. typedef struct compat_x32_siginfo
  643. {
  644. int si_signo;
  645. int si_errno;
  646. int si_code;
  647. union
  648. {
  649. int _pad[((128 / sizeof (int)) - 3)];
  650. /* kill() */
  651. struct
  652. {
  653. unsigned int _pid;
  654. unsigned int _uid;
  655. } _kill;
  656. /* POSIX.1b timers */
  657. struct
  658. {
  659. compat_timer_t _tid;
  660. int _overrun;
  661. compat_sigval_t _sigval;
  662. } _timer;
  663. /* POSIX.1b signals */
  664. struct
  665. {
  666. unsigned int _pid;
  667. unsigned int _uid;
  668. compat_sigval_t _sigval;
  669. } _rt;
  670. /* SIGCHLD */
  671. struct
  672. {
  673. unsigned int _pid;
  674. unsigned int _uid;
  675. int _status;
  676. compat_x32_clock_t _utime;
  677. compat_x32_clock_t _stime;
  678. } _sigchld;
  679. /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
  680. struct
  681. {
  682. unsigned int _addr;
  683. } _sigfault;
  684. /* SIGPOLL */
  685. struct
  686. {
  687. int _band;
  688. int _fd;
  689. } _sigpoll;
  690. } _sifields;
  691. } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
  692. #define cpt_si_pid _sifields._kill._pid
  693. #define cpt_si_uid _sifields._kill._uid
  694. #define cpt_si_timerid _sifields._timer._tid
  695. #define cpt_si_overrun _sifields._timer._overrun
  696. #define cpt_si_status _sifields._sigchld._status
  697. #define cpt_si_utime _sifields._sigchld._utime
  698. #define cpt_si_stime _sifields._sigchld._stime
  699. #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
  700. #define cpt_si_addr _sifields._sigfault._addr
  701. #define cpt_si_band _sifields._sigpoll._band
  702. #define cpt_si_fd _sifields._sigpoll._fd
  703. /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
  704. In their place is si_timer1,si_timer2. */
  705. #ifndef si_timerid
  706. #define si_timerid si_timer1
  707. #endif
  708. #ifndef si_overrun
  709. #define si_overrun si_timer2
  710. #endif
  711. static void
  712. compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
  713. {
  714. memset (to, 0, sizeof (*to));
  715. to->si_signo = from->si_signo;
  716. to->si_errno = from->si_errno;
  717. to->si_code = from->si_code;
  718. if (to->si_code == SI_TIMER)
  719. {
  720. to->cpt_si_timerid = from->si_timerid;
  721. to->cpt_si_overrun = from->si_overrun;
  722. to->cpt_si_ptr = (intptr_t) from->si_ptr;
  723. }
  724. else if (to->si_code == SI_USER)
  725. {
  726. to->cpt_si_pid = from->si_pid;
  727. to->cpt_si_uid = from->si_uid;
  728. }
  729. else if (to->si_code < 0)
  730. {
  731. to->cpt_si_pid = from->si_pid;
  732. to->cpt_si_uid = from->si_uid;
  733. to->cpt_si_ptr = (intptr_t) from->si_ptr;
  734. }
  735. else
  736. {
  737. switch (to->si_signo)
  738. {
  739. case SIGCHLD:
  740. to->cpt_si_pid = from->si_pid;
  741. to->cpt_si_uid = from->si_uid;
  742. to->cpt_si_status = from->si_status;
  743. to->cpt_si_utime = from->si_utime;
  744. to->cpt_si_stime = from->si_stime;
  745. break;
  746. case SIGILL:
  747. case SIGFPE:
  748. case SIGSEGV:
  749. case SIGBUS:
  750. to->cpt_si_addr = (intptr_t) from->si_addr;
  751. break;
  752. case SIGPOLL:
  753. to->cpt_si_band = from->si_band;
  754. to->cpt_si_fd = from->si_fd;
  755. break;
  756. default:
  757. to->cpt_si_pid = from->si_pid;
  758. to->cpt_si_uid = from->si_uid;
  759. to->cpt_si_ptr = (intptr_t) from->si_ptr;
  760. break;
  761. }
  762. }
  763. }
  764. static void
  765. siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
  766. {
  767. memset (to, 0, sizeof (*to));
  768. to->si_signo = from->si_signo;
  769. to->si_errno = from->si_errno;
  770. to->si_code = from->si_code;
  771. if (to->si_code == SI_TIMER)
  772. {
  773. to->si_timerid = from->cpt_si_timerid;
  774. to->si_overrun = from->cpt_si_overrun;
  775. to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
  776. }
  777. else if (to->si_code == SI_USER)
  778. {
  779. to->si_pid = from->cpt_si_pid;
  780. to->si_uid = from->cpt_si_uid;
  781. }
  782. else if (to->si_code < 0)
  783. {
  784. to->si_pid = from->cpt_si_pid;
  785. to->si_uid = from->cpt_si_uid;
  786. to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
  787. }
  788. else
  789. {
  790. switch (to->si_signo)
  791. {
  792. case SIGCHLD:
  793. to->si_pid = from->cpt_si_pid;
  794. to->si_uid = from->cpt_si_uid;
  795. to->si_status = from->cpt_si_status;
  796. to->si_utime = from->cpt_si_utime;
  797. to->si_stime = from->cpt_si_stime;
  798. break;
  799. case SIGILL:
  800. case SIGFPE:
  801. case SIGSEGV:
  802. case SIGBUS:
  803. to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
  804. break;
  805. case SIGPOLL:
  806. to->si_band = from->cpt_si_band;
  807. to->si_fd = from->cpt_si_fd;
  808. break;
  809. default:
  810. to->si_pid = from->cpt_si_pid;
  811. to->si_uid = from->cpt_si_uid;
  812. to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
  813. break;
  814. }
  815. }
  816. }
  817. static void
  818. compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
  819. siginfo_t *from)
  820. {
  821. memset (to, 0, sizeof (*to));
  822. to->si_signo = from->si_signo;
  823. to->si_errno = from->si_errno;
  824. to->si_code = from->si_code;
  825. if (to->si_code == SI_TIMER)
  826. {
  827. to->cpt_si_timerid = from->si_timerid;
  828. to->cpt_si_overrun = from->si_overrun;
  829. to->cpt_si_ptr = (intptr_t) from->si_ptr;
  830. }
  831. else if (to->si_code == SI_USER)
  832. {
  833. to->cpt_si_pid = from->si_pid;
  834. to->cpt_si_uid = from->si_uid;
  835. }
  836. else if (to->si_code < 0)
  837. {
  838. to->cpt_si_pid = from->si_pid;
  839. to->cpt_si_uid = from->si_uid;
  840. to->cpt_si_ptr = (intptr_t) from->si_ptr;
  841. }
  842. else
  843. {
  844. switch (to->si_signo)
  845. {
  846. case SIGCHLD:
  847. to->cpt_si_pid = from->si_pid;
  848. to->cpt_si_uid = from->si_uid;
  849. to->cpt_si_status = from->si_status;
  850. to->cpt_si_utime = from->si_utime;
  851. to->cpt_si_stime = from->si_stime;
  852. break;
  853. case SIGILL:
  854. case SIGFPE:
  855. case SIGSEGV:
  856. case SIGBUS:
  857. to->cpt_si_addr = (intptr_t) from->si_addr;
  858. break;
  859. case SIGPOLL:
  860. to->cpt_si_band = from->si_band;
  861. to->cpt_si_fd = from->si_fd;
  862. break;
  863. default:
  864. to->cpt_si_pid = from->si_pid;
  865. to->cpt_si_uid = from->si_uid;
  866. to->cpt_si_ptr = (intptr_t) from->si_ptr;
  867. break;
  868. }
  869. }
  870. }
  871. static void
  872. siginfo_from_compat_x32_siginfo (siginfo_t *to,
  873. compat_x32_siginfo_t *from)
  874. {
  875. memset (to, 0, sizeof (*to));
  876. to->si_signo = from->si_signo;
  877. to->si_errno = from->si_errno;
  878. to->si_code = from->si_code;
  879. if (to->si_code == SI_TIMER)
  880. {
  881. to->si_timerid = from->cpt_si_timerid;
  882. to->si_overrun = from->cpt_si_overrun;
  883. to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
  884. }
  885. else if (to->si_code == SI_USER)
  886. {
  887. to->si_pid = from->cpt_si_pid;
  888. to->si_uid = from->cpt_si_uid;
  889. }
  890. else if (to->si_code < 0)
  891. {
  892. to->si_pid = from->cpt_si_pid;
  893. to->si_uid = from->cpt_si_uid;
  894. to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
  895. }
  896. else
  897. {
  898. switch (to->si_signo)
  899. {
  900. case SIGCHLD:
  901. to->si_pid = from->cpt_si_pid;
  902. to->si_uid = from->cpt_si_uid;
  903. to->si_status = from->cpt_si_status;
  904. to->si_utime = from->cpt_si_utime;
  905. to->si_stime = from->cpt_si_stime;
  906. break;
  907. case SIGILL:
  908. case SIGFPE:
  909. case SIGSEGV:
  910. case SIGBUS:
  911. to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
  912. break;
  913. case SIGPOLL:
  914. to->si_band = from->cpt_si_band;
  915. to->si_fd = from->cpt_si_fd;
  916. break;
  917. default:
  918. to->si_pid = from->cpt_si_pid;
  919. to->si_uid = from->cpt_si_uid;
  920. to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
  921. break;
  922. }
  923. }
  924. }
  925. /* Is this process 64-bit? */
  926. static int linux_is_elf64;
  927. #endif /* __x86_64__ */
  928. /* Convert a native/host siginfo object, into/from the siginfo in the
  929. layout of the inferiors' architecture. Returns true if any
  930. conversion was done; false otherwise. If DIRECTION is 1, then copy
  931. from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
  932. INF. */
  933. static int
  934. x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
  935. {
  936. #ifdef __x86_64__
  937. /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
  938. if (register_size (0) == 4)
  939. {
  940. if (sizeof (siginfo_t) != sizeof (compat_siginfo_t))
  941. fatal ("unexpected difference in siginfo");
  942. if (direction == 0)
  943. compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
  944. else
  945. siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
  946. return 1;
  947. }
  948. /* No fixup for native x32 GDB. */
  949. else if (!linux_is_elf64 && sizeof (void *) == 8)
  950. {
  951. if (sizeof (siginfo_t) != sizeof (compat_x32_siginfo_t))
  952. fatal ("unexpected difference in siginfo");
  953. if (direction == 0)
  954. compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
  955. native);
  956. else
  957. siginfo_from_compat_x32_siginfo (native,
  958. (struct compat_x32_siginfo *) inf);
  959. return 1;
  960. }
  961. #endif
  962. return 0;
  963. }
  964. static int use_xml;
  965. /* Update gdbserver_xmltarget. */
  966. static void
  967. x86_linux_update_xmltarget (void)
  968. {
  969. int pid;
  970. struct regset_info *regset;
  971. static unsigned long long xcr0;
  972. static int have_ptrace_getregset = -1;
  973. #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
  974. static int have_ptrace_getfpxregs = -1;
  975. #endif
  976. if (!current_inferior)
  977. return;
  978. /* Before changing the register cache internal layout or the target
  979. regsets, flush the contents of the current valid caches back to
  980. the threads. */
  981. regcache_invalidate ();
  982. pid = pid_of (get_thread_lwp (current_inferior));
  983. #ifdef __x86_64__
  984. if (num_xmm_registers == 8)
  985. init_registers_i386_linux ();
  986. else if (linux_is_elf64)
  987. init_registers_amd64_linux ();
  988. else
  989. init_registers_x32_linux ();
  990. #else
  991. {
  992. # ifdef HAVE_PTRACE_GETFPXREGS
  993. if (have_ptrace_getfpxregs == -1)
  994. {
  995. elf_fpxregset_t fpxregs;
  996. if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
  997. {
  998. have_ptrace_getfpxregs = 0;
  999. x86_xcr0 = I386_XSTATE_X87_MASK;
  1000. /* Disable PTRACE_GETFPXREGS. */
  1001. for (regset = target_regsets;
  1002. regset->fill_function != NULL; regset++)
  1003. if (regset->get_request == PTRACE_GETFPXREGS)
  1004. {
  1005. regset->size = 0;
  1006. break;
  1007. }
  1008. }
  1009. else
  1010. have_ptrace_getfpxregs = 1;
  1011. }
  1012. if (!have_ptrace_getfpxregs)
  1013. {
  1014. init_registers_i386_mmx_linux ();
  1015. return;
  1016. }
  1017. # endif
  1018. init_registers_i386_linux ();
  1019. }
  1020. #endif
  1021. if (!use_xml)
  1022. {
  1023. /* Don't use XML. */
  1024. #ifdef __x86_64__
  1025. if (num_xmm_registers == 8)
  1026. gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
  1027. else
  1028. gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
  1029. #else
  1030. gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
  1031. #endif
  1032. x86_xcr0 = I386_XSTATE_SSE_MASK;
  1033. return;
  1034. }
  1035. /* Check if XSAVE extended state is supported. */
  1036. if (have_ptrace_getregset == -1)
  1037. {
  1038. unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
  1039. struct iovec iov;
  1040. iov.iov_base = xstateregs;
  1041. iov.iov_len = sizeof (xstateregs);
  1042. /* Check if PTRACE_GETREGSET works. */
  1043. if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
  1044. &iov) < 0)
  1045. {
  1046. have_ptrace_getregset = 0;
  1047. return;
  1048. }
  1049. else
  1050. have_ptrace_getregset = 1;
  1051. /* Get XCR0 from XSAVE extended state at byte 464. */
  1052. xcr0 = xstateregs[464 / sizeof (long long)];
  1053. /* Use PTRACE_GETREGSET if it is available. */
  1054. for (regset = target_regsets;
  1055. regset->fill_function != NULL; regset++)
  1056. if (regset->get_request == PTRACE_GETREGSET)
  1057. regset->size = I386_XSTATE_SIZE (xcr0);
  1058. else if (regset->type != GENERAL_REGS)
  1059. regset->size = 0;
  1060. }
  1061. if (have_ptrace_getregset)
  1062. {
  1063. /* AVX is the highest feature we support. */
  1064. if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
  1065. {
  1066. x86_xcr0 = xcr0;
  1067. #ifdef __x86_64__
  1068. /* I386 has 8 xmm regs. */
  1069. if (num_xmm_registers == 8)
  1070. init_registers_i386_avx_linux ();
  1071. else if (linux_is_elf64)
  1072. init_registers_amd64_avx_linux ();
  1073. else
  1074. init_registers_x32_avx_linux ();
  1075. #else
  1076. init_registers_i386_avx_linux ();
  1077. #endif
  1078. }
  1079. }
  1080. }
  1081. /* Process qSupported query, "xmlRegisters=". Update the buffer size for
  1082. PTRACE_GETREGSET. */
  1083. static void
  1084. x86_linux_process_qsupported (const char *query)
  1085. {
  1086. /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
  1087. with "i386" in qSupported query, it supports x86 XML target
  1088. descriptions. */
  1089. use_xml = 0;
  1090. if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
  1091. {
  1092. char *copy = xstrdup (query + 13);
  1093. char *p;
  1094. for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
  1095. {
  1096. if (strcmp (p, "i386") == 0)
  1097. {
  1098. use_xml = 1;
  1099. break;
  1100. }
  1101. }
  1102. free (copy);
  1103. }
  1104. x86_linux_update_xmltarget ();
  1105. }
  1106. /* Initialize gdbserver for the architecture of the inferior. */
  1107. static void
  1108. x86_arch_setup (void)
  1109. {
  1110. int pid = pid_of (get_thread_lwp (current_inferior));
  1111. unsigned int machine;
  1112. int is_elf64 = linux_pid_exe_is_elf_64_file (pid, &machine);
  1113. if (sizeof (void *) == 4)
  1114. {
  1115. if (is_elf64 > 0)
  1116. error (_("Can't debug 64-bit process with 32-bit GDBserver"));
  1117. #ifndef __x86_64__
  1118. else if (machine == EM_X86_64)
  1119. error (_("Can't debug x86-64 process with 32-bit GDBserver"));
  1120. #endif
  1121. }
  1122. #ifdef __x86_64__
  1123. if (is_elf64 < 0)
  1124. {
  1125. /* This can only happen if /proc/<pid>/exe is unreadable,
  1126. but "that can't happen" if we've gotten this far.
  1127. Fall through and assume this is a 32-bit program. */
  1128. }
  1129. else if (machine == EM_X86_64)
  1130. {
  1131. /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
  1132. the_low_target.num_regs = -1;
  1133. the_low_target.regmap = NULL;
  1134. the_low_target.cannot_fetch_register = NULL;
  1135. the_low_target.cannot_store_register = NULL;
  1136. /* Amd64 has 16 xmm regs. */
  1137. num_xmm_registers = 16;
  1138. linux_is_elf64 = is_elf64;
  1139. x86_linux_update_xmltarget ();
  1140. return;
  1141. }
  1142. linux_is_elf64 = 0;
  1143. #endif
  1144. /* Ok we have a 32-bit inferior. */
  1145. the_low_target.num_regs = I386_NUM_REGS;
  1146. the_low_target.regmap = i386_regmap;
  1147. the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
  1148. the_low_target.cannot_store_register = i386_cannot_store_register;
  1149. /* I386 has 8 xmm regs. */
  1150. num_xmm_registers = 8;
  1151. x86_linux_update_xmltarget ();
  1152. }
  1153. static int
  1154. x86_supports_tracepoints (void)
  1155. {
  1156. return 1;
  1157. }
  1158. static void
  1159. append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
  1160. {
  1161. write_inferior_memory (*to, buf, len);
  1162. *to += len;
  1163. }
  1164. static int
  1165. push_opcode (unsigned char *buf, char *op)
  1166. {
  1167. unsigned char *buf_org = buf;
  1168. while (1)
  1169. {
  1170. char *endptr;
  1171. unsigned long ul = strtoul (op, &endptr, 16);
  1172. if (endptr == op)
  1173. break;
  1174. *buf++ = ul;
  1175. op = endptr;
  1176. }
  1177. return buf - buf_org;
  1178. }
  1179. #ifdef __x86_64__
  1180. /* Build a jump pad that saves registers and calls a collection
  1181. function. Writes a jump instruction to the jump pad to
  1182. JJUMPAD_INSN. The caller is responsible to write it in at the
  1183. tracepoint address. */
  1184. static int
  1185. amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
  1186. CORE_ADDR collector,
  1187. CORE_ADDR lockaddr,
  1188. ULONGEST orig_size,
  1189. CORE_ADDR *jump_entry,
  1190. CORE_ADDR *trampoline,
  1191. ULONGEST *trampoline_size,
  1192. unsigned char *jjump_pad_insn,
  1193. ULONGEST *jjump_pad_insn_size,
  1194. CORE_ADDR *adjusted_insn_addr,
  1195. CORE_ADDR *adjusted_insn_addr_end,
  1196. char *err)
  1197. {
  1198. unsigned char buf[40];
  1199. int i, offset;
  1200. int64_t loffset;
  1201. CORE_ADDR buildaddr = *jump_entry;
  1202. /* Build the jump pad. */
  1203. /* First, do tracepoint data collection. Save registers. */
  1204. i = 0;
  1205. /* Need to ensure stack pointer saved first. */
  1206. buf[i++] = 0x54; /* push %rsp */
  1207. buf[i++] = 0x55; /* push %rbp */
  1208. buf[i++] = 0x57; /* push %rdi */
  1209. buf[i++] = 0x56; /* push %rsi */
  1210. buf[i++] = 0x52; /* push %rdx */
  1211. buf[i++] = 0x51; /* push %rcx */
  1212. buf[i++] = 0x53; /* push %rbx */
  1213. buf[i++] = 0x50; /* push %rax */
  1214. buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
  1215. buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
  1216. buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
  1217. buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
  1218. buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
  1219. buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
  1220. buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
  1221. buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
  1222. buf[i++] = 0x9c; /* pushfq */
  1223. buf[i++] = 0x48; /* movl <addr>,%rdi */
  1224. buf[i++] = 0xbf;
  1225. *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
  1226. i += sizeof (unsigned long);
  1227. buf[i++] = 0x57; /* push %rdi */
  1228. append_insns (&buildaddr, i, buf);
  1229. /* Stack space for the collecting_t object. */
  1230. i = 0;
  1231. i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
  1232. i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
  1233. memcpy (buf + i, &tpoint, 8);
  1234. i += 8;
  1235. i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
  1236. i += push_opcode (&buf[i],
  1237. "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
  1238. i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
  1239. append_insns (&buildaddr, i, buf);
  1240. /* spin-lock. */
  1241. i = 0;
  1242. i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
  1243. memcpy (&buf[i], (void *) &lockaddr, 8);
  1244. i += 8;
  1245. i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
  1246. i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
  1247. i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
  1248. i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
  1249. i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
  1250. append_insns (&buildaddr, i, buf);
  1251. /* Set up the gdb_collect call. */
  1252. /* At this point, (stack pointer + 0x18) is the base of our saved
  1253. register block. */
  1254. i = 0;
  1255. i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
  1256. i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
  1257. /* tpoint address may be 64-bit wide. */
  1258. i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
  1259. memcpy (buf + i, &tpoint, 8);
  1260. i += 8;
  1261. append_insns (&buildaddr, i, buf);
  1262. /* The collector function being in the shared library, may be
  1263. >31-bits away off the jump pad. */
  1264. i = 0;
  1265. i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
  1266. memcpy (buf + i, &collector, 8);
  1267. i += 8;
  1268. i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
  1269. append_insns (&buildaddr, i, buf);
  1270. /* Clear the spin-lock. */
  1271. i = 0;
  1272. i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
  1273. i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
  1274. memcpy (buf + i, &lockaddr, 8);
  1275. i += 8;
  1276. append_insns (&buildaddr, i, buf);
  1277. /* Remove stack that had been used for the collect_t object. */
  1278. i = 0;
  1279. i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
  1280. append_insns (&buildaddr, i, buf);
  1281. /* Restore register state. */
  1282. i = 0;
  1283. buf[i++] = 0x48; /* add $0x8,%rsp */
  1284. buf[i++] = 0x83;
  1285. buf[i++] = 0xc4;
  1286. buf[i++] = 0x08;
  1287. buf[i++] = 0x9d; /* popfq */
  1288. buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
  1289. buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
  1290. buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
  1291. buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
  1292. buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
  1293. buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
  1294. buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
  1295. buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
  1296. buf[i++] = 0x58; /* pop %rax */
  1297. buf[i++] = 0x5b; /* pop %rbx */
  1298. buf[i++] = 0x59; /* pop %rcx */
  1299. buf[i++] = 0x5a; /* pop %rdx */
  1300. buf[i++] = 0x5e; /* pop %rsi */
  1301. buf[i++] = 0x5f; /* pop %rdi */
  1302. buf[i++] = 0x5d; /* pop %rbp */
  1303. buf[i++] = 0x5c; /* pop %rsp */
  1304. append_insns (&buildaddr, i, buf);
  1305. /* Now, adjust the original instruction to execute in the jump
  1306. pad. */
  1307. *adjusted_insn_addr = buildaddr;
  1308. relocate_instruction (&buildaddr, tpaddr);
  1309. *adjusted_insn_addr_end = buildaddr;
  1310. /* Finally, write a jump back to the program. */
  1311. loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
  1312. if (loffset > INT_MAX || loffset < INT_MIN)
  1313. {
  1314. sprintf (err,
  1315. "E.Jump back from jump pad too far from tracepoint "
  1316. "(offset 0x%" PRIx64 " > int32).", loffset);
  1317. return 1;
  1318. }
  1319. offset = (int) loffset;
  1320. memcpy (buf, jump_insn, sizeof (jump_insn));
  1321. memcpy (buf + 1, &offset, 4);
  1322. append_insns (&buildaddr, sizeof (jump_insn), buf);
  1323. /* The jump pad is now built. Wire in a jump to our jump pad. This
  1324. is always done last (by our caller actually), so that we can
  1325. install fast tracepoints with threads running. This relies on
  1326. the agent's atomic write support. */
  1327. loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
  1328. if (loffset > INT_MAX || loffset < INT_MIN)
  1329. {
  1330. sprintf (err,
  1331. "E.Jump pad too far from tracepoint "
  1332. "(offset 0x%" PRIx64 " > int32).", loffset);
  1333. return 1;
  1334. }
  1335. offset = (int) loffset;
  1336. memcpy (buf, jump_insn, sizeof (jump_insn));
  1337. memcpy (buf + 1, &offset, 4);
  1338. memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
  1339. *jjump_pad_insn_size = sizeof (jump_insn);
  1340. /* Return the end address of our pad. */
  1341. *jump_entry = buildaddr;
  1342. return 0;
  1343. }
  1344. #endif /* __x86_64__ */
  1345. /* Build a jump pad that saves registers and calls a collection
  1346. function. Writes a jump instruction to the jump pad to
  1347. JJUMPAD_INSN. The caller is responsible to write it in at the
  1348. tracepoint address. */
  1349. static int
  1350. i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
  1351. CORE_ADDR collector,
  1352. CORE_ADDR lockaddr,
  1353. ULONGEST orig_size,
  1354. CORE_ADDR *jump_entry,
  1355. CORE_ADDR *trampoline,
  1356. ULONGEST *trampoline_size,
  1357. unsigned char *jjump_pad_insn,
  1358. ULONGEST *jjump_pad_insn_size,
  1359. CORE_ADDR *adjusted_insn_addr,
  1360. CORE_ADDR *adjusted_insn_addr_end,
  1361. char *err)
  1362. {
  1363. unsigned char buf[0x100];
  1364. int i, offset;
  1365. CORE_ADDR buildaddr = *jump_entry;
  1366. /* Build the jump pad. */
  1367. /* First, do tracepoint data collection. Save registers. */
  1368. i = 0;
  1369. buf[i++] = 0x60; /* pushad */
  1370. buf[i++] = 0x68; /* push tpaddr aka $pc */
  1371. *((int *)(buf + i)) = (int) tpaddr;
  1372. i += 4;
  1373. buf[i++] = 0x9c; /* pushf */
  1374. buf[i++] = 0x1e; /* push %ds */
  1375. buf[i++] = 0x06; /* push %es */
  1376. buf[i++] = 0x0f; /* push %fs */
  1377. buf[i++] = 0xa0;
  1378. buf[i++] = 0x0f; /* push %gs */
  1379. buf[i++] = 0xa8;
  1380. buf[i++] = 0x16; /* push %ss */
  1381. buf[i++] = 0x0e; /* push %cs */
  1382. append_insns (&buildaddr, i, buf);
  1383. /* Stack space for the collecting_t object. */
  1384. i = 0;
  1385. i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
  1386. /* Build the object. */
  1387. i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
  1388. memcpy (buf + i, &tpoint, 4);
  1389. i += 4;
  1390. i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
  1391. i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
  1392. i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
  1393. append_insns (&buildaddr, i, buf);
  1394. /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
  1395. If we cared for it, this could be using xchg alternatively. */
  1396. i = 0;
  1397. i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
  1398. i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
  1399. %esp,<lockaddr> */
  1400. memcpy (&buf[i], (void *) &lockaddr, 4);
  1401. i += 4;
  1402. i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
  1403. i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
  1404. append_insns (&buildaddr, i, buf);
  1405. /* Set up arguments to the gdb_collect call. */
  1406. i = 0;
  1407. i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
  1408. i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
  1409. i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
  1410. append_insns (&buildaddr, i, buf);
  1411. i = 0;
  1412. i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
  1413. append_insns (&buildaddr, i, buf);
  1414. i = 0;
  1415. i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
  1416. memcpy (&buf[i], (void *) &tpoint, 4);
  1417. i += 4;
  1418. append_insns (&buildaddr, i, buf);
  1419. buf[0] = 0xe8; /* call <reladdr> */
  1420. offset = collector - (buildaddr + sizeof (jump_insn));
  1421. memcpy (buf + 1, &offset, 4);
  1422. append_insns (&buildaddr, 5, buf);
  1423. /* Clean up after the call. */
  1424. buf[0] = 0x83; /* add $0x8,%esp */
  1425. buf[1] = 0xc4;
  1426. buf[2] = 0x08;
  1427. append_insns (&buildaddr, 3, buf);
  1428. /* Clear the spin-lock. This would need the LOCK prefix on older
  1429. broken archs. */
  1430. i = 0;
  1431. i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
  1432. i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
  1433. memcpy (buf + i, &lockaddr, 4);
  1434. i += 4;
  1435. append_insns (&buildaddr, i, buf);
  1436. /* Remove stack that had been used for the collect_t object. */
  1437. i = 0;
  1438. i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
  1439. append_insns (&buildaddr, i, buf);
  1440. i = 0;
  1441. buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
  1442. buf[i++] = 0xc4;
  1443. buf[i++] = 0x04;
  1444. buf[i++] = 0x17; /* pop %ss */
  1445. buf[i++] = 0x0f; /* pop %gs */
  1446. buf[i++] = 0xa9;
  1447. buf[i++] = 0x0f; /* pop %fs */
  1448. buf[i++] = 0xa1;
  1449. buf[i++] = 0x07; /* pop %es */
  1450. buf[i++] = 0x1f; /* pop %ds */
  1451. buf[i++] = 0x9d; /* popf */
  1452. buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
  1453. buf[i++] = 0xc4;
  1454. buf[i++] = 0x04;
  1455. buf[i++] = 0x61; /* popad */
  1456. append_insns (&buildaddr, i, buf);
  1457. /* Now, adjust the original instruction to execute in the jump
  1458. pad. */
  1459. *adjusted_insn_addr = buildaddr;
  1460. relocate_instruction (&buildaddr, tpaddr);
  1461. *adjusted_insn_addr_end = buildaddr;
  1462. /* Write the jump back to the program. */
  1463. offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
  1464. memcpy (buf, jump_insn, sizeof (jump_insn));
  1465. memcpy (buf + 1, &offset, 4);
  1466. append_insns (&buildaddr, sizeof (jump_insn), buf);
  1467. /* The jump pad is now built. Wire in a jump to our jump pad. This
  1468. is always done last (by our caller actually), so that we can
  1469. install fast tracepoints with threads running. This relies on
  1470. the agent's atomic write support. */
  1471. if (orig_size == 4)
  1472. {
  1473. /* Create a trampoline. */
  1474. *trampoline_size = sizeof (jump_insn);
  1475. if (!claim_trampoline_space (*trampoline_size, trampoline))
  1476. {
  1477. /* No trampoline space available. */
  1478. strcpy (err,
  1479. "E.Cannot allocate trampoline space needed for fast "
  1480. "tracepoints on 4-byte instructions.");
  1481. return 1;
  1482. }
  1483. offset = *jump_entry - (*trampoline + sizeof (jump_insn));
  1484. memcpy (buf, jump_insn, sizeof (jump_insn));
  1485. memcpy (buf + 1, &offset, 4);
  1486. write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
  1487. /* Use a 16-bit relative jump instruction to jump to the trampoline. */
  1488. offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
  1489. memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
  1490. memcpy (buf + 2, &offset, 2);
  1491. memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
  1492. *jjump_pad_insn_size = sizeof (small_jump_insn);
  1493. }
  1494. else
  1495. {
  1496. /* Else use a 32-bit relative jump instruction. */
  1497. offset = *jump_entry - (tpaddr + sizeof (jump_insn));
  1498. memcpy (buf, jump_insn, sizeof (jump_insn));
  1499. memcpy (buf + 1, &offset, 4);
  1500. memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
  1501. *jjump_pad_insn_size = sizeof (jump_insn);
  1502. }
  1503. /* Return the end address of our pad. */
  1504. *jump_entry = buildaddr;
  1505. return 0;
  1506. }
  1507. static int
  1508. x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
  1509. CORE_ADDR collector,
  1510. CORE_ADDR lockaddr,
  1511. ULONGEST orig_size,
  1512. CORE_ADDR *jump_entry,
  1513. CORE_ADDR *trampoline,
  1514. ULONGEST *trampoline_size,
  1515. unsigned char *jjump_pad_insn,
  1516. ULONGEST *jjump_pad_insn_size,
  1517. CORE_ADDR *adjusted_insn_addr,
  1518. CORE_ADDR *adjusted_insn_addr_end,
  1519. char *err)
  1520. {
  1521. #ifdef __x86_64__
  1522. if (register_size (0) == 8)
  1523. return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
  1524. collector, lockaddr,
  1525. orig_size, jump_entry,
  1526. trampoline, trampoline_size,
  1527. jjump_pad_insn,
  1528. jjump_pad_insn_size,
  1529. adjusted_insn_addr,
  1530. adjusted_insn_addr_end,
  1531. err);
  1532. #endif
  1533. return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
  1534. collector, lockaddr,
  1535. orig_size, jump_entry,
  1536. trampoline, trampoline_size,
  1537. jjump_pad_insn,
  1538. jjump_pad_insn_size,
  1539. adjusted_insn_addr,
  1540. adjusted_insn_addr_end,
  1541. err);
  1542. }
  1543. /* Return the minimum instruction length for fast tracepoints on x86/x86-64
  1544. architectures. */
  1545. static int
  1546. x86_get_min_fast_tracepoint_insn_len (void)
  1547. {
  1548. static int warned_about_fast_tracepoints = 0;
  1549. #ifdef __x86_64__
  1550. /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
  1551. used for fast tracepoints. */
  1552. if (register_size (0) == 8)
  1553. return 5;
  1554. #endif
  1555. if (agent_loaded_p ())
  1556. {
  1557. char errbuf[IPA_BUFSIZ];
  1558. errbuf[0] = '\0';
  1559. /* On x86, if trampolines are available, then 4-byte jump instructions
  1560. with a 2-byte offset may be used, otherwise 5-byte jump instructions
  1561. with a 4-byte offset are used instead. */
  1562. if (have_fast_tracepoint_trampoline_buffer (errbuf))
  1563. return 4;
  1564. else
  1565. {
  1566. /* GDB has no channel to explain to user why a shorter fast
  1567. tracepoint is not possible, but at least make GDBserver
  1568. mention that something has gone awry. */
  1569. if (!warned_about_fast_tracepoints)
  1570. {
  1571. warning ("4-byte fast tracepoints not available; %s\n", errbuf);
  1572. warned_about_fast_tracepoints = 1;
  1573. }
  1574. return 5;
  1575. }
  1576. }
  1577. else
  1578. {
  1579. /* Indicate that the minimum length is currently unknown since the IPA
  1580. has not loaded yet. */
  1581. return 0;
  1582. }
  1583. }
  1584. static void
  1585. add_insns (unsigned char *start, int len)
  1586. {
  1587. CORE_ADDR buildaddr = current_insn_ptr;
  1588. if (debug_threads)
  1589. fprintf (stderr, "Adding %d bytes of insn at %s\n",
  1590. len, paddress (buildaddr));
  1591. append_insns (&buildaddr, len, start);
  1592. current_insn_ptr = buildaddr;
  1593. }
  1594. /* Our general strategy for emitting code is to avoid specifying raw
  1595. bytes whenever possible, and instead copy a block of inline asm
  1596. that is embedded in the function. This is a little messy, because
  1597. we need to keep the compiler from discarding what looks like dead
  1598. code, plus suppress various warnings. */
  1599. #define EMIT_ASM(NAME, INSNS) \
  1600. do \
  1601. { \
  1602. extern unsigned char start_ ## NAME, end_ ## NAME; \
  1603. add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
  1604. __asm__ ("jmp end_" #NAME "\n" \
  1605. "\t" "start_" #NAME ":" \
  1606. "\t" INSNS "\n" \
  1607. "\t" "end_" #NAME ":"); \
  1608. } while (0)
  1609. #ifdef __x86_64__
  1610. #define EMIT_ASM32(NAME,INSNS) \
  1611. do \
  1612. { \
  1613. extern unsigned char start_ ## NAME, end_ ## NAME; \
  1614. add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
  1615. __asm__ (".code32\n" \
  1616. "\t" "jmp end_" #NAME "\n" \
  1617. "\t" "start_" #NAME ":\n" \
  1618. "\t" INSNS "\n" \
  1619. "\t" "end_" #NAME ":\n" \
  1620. ".code64\n"); \
  1621. } while (0)
  1622. #else
  1623. #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
  1624. #endif
  1625. #ifdef __x86_64__
  1626. static void
  1627. amd64_emit_prologue (void)
  1628. {
  1629. EMIT_ASM (amd64_prologue,
  1630. "pushq %rbp\n\t"
  1631. "movq %rsp,%rbp\n\t"
  1632. "sub $0x20,%rsp\n\t"
  1633. "movq %rdi,-8(%rbp)\n\t"
  1634. "movq %rsi,-16(%rbp)");
  1635. }
  1636. static void
  1637. amd64_emit_epilogue (void)
  1638. {
  1639. EMIT_ASM (amd64_epilogue,
  1640. "…

Large files files are truncated, but you can click here to view the full file