/thirdparty/breakpad/client/mac/handler/minidump_generator.cc

http://github.com/tomahawk-player/tomahawk · C++ · 1432 lines · 1093 code · 228 blank · 111 comment · 143 complexity · 9e695af4ab35fef4731abc0fd7c44f67 MD5 · raw file

  1. // Copyright (c) 2006, Google Inc.
  2. // All rights reserved.
  3. //
  4. // Redistribution and use in source and binary forms, with or without
  5. // modification, are permitted provided that the following conditions are
  6. // met:
  7. //
  8. // * Redistributions of source code must retain the above copyright
  9. // notice, this list of conditions and the following disclaimer.
  10. // * Redistributions in binary form must reproduce the above
  11. // copyright notice, this list of conditions and the following disclaimer
  12. // in the documentation and/or other materials provided with the
  13. // distribution.
  14. // * Neither the name of Google Inc. nor the names of its
  15. // contributors may be used to endorse or promote products derived from
  16. // this software without specific prior written permission.
  17. //
  18. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. #include <algorithm>
  30. #include <cstdio>
  31. #include <mach/host_info.h>
  32. #include <mach/vm_statistics.h>
  33. #include <mach-o/dyld.h>
  34. #include <mach-o/loader.h>
  35. #include <sys/sysctl.h>
  36. #include <sys/resource.h>
  37. #include <CoreFoundation/CoreFoundation.h>
  38. #include "client/mac/handler/minidump_generator.h"
  39. #ifdef HAS_ARM_SUPPORT
  40. #include <mach/arm/thread_status.h>
  41. #endif
  42. #ifdef HAS_PPC_SUPPORT
  43. #include <mach/ppc/thread_status.h>
  44. #endif
  45. #ifdef HAS_X86_SUPPORT
  46. #include <mach/i386/thread_status.h>
  47. #endif
  48. #include "client/minidump_file_writer-inl.h"
  49. #include "common/mac/file_id.h"
  50. #include "common/mac/macho_id.h"
  51. #include "common/mac/string_utilities.h"
  52. using MacStringUtils::ConvertToString;
  53. using MacStringUtils::IntegerValueAtIndex;
  54. namespace google_breakpad {
  55. #if __LP64__
  56. #define LC_SEGMENT_ARCH LC_SEGMENT_64
  57. #else
  58. #define LC_SEGMENT_ARCH LC_SEGMENT
  59. #endif
  60. // constructor when generating from within the crashed process
  61. MinidumpGenerator::MinidumpGenerator()
  62. : writer_(),
  63. exception_type_(0),
  64. exception_code_(0),
  65. exception_subcode_(0),
  66. exception_thread_(0),
  67. crashing_task_(mach_task_self()),
  68. handler_thread_(mach_thread_self()),
  69. cpu_type_(DynamicImages::GetNativeCPUType()),
  70. dynamic_images_(NULL),
  71. memory_blocks_(&allocator_) {
  72. GatherSystemInformation();
  73. }
  74. // constructor when generating from a different process than the
  75. // crashed process
  76. MinidumpGenerator::MinidumpGenerator(mach_port_t crashing_task,
  77. mach_port_t handler_thread)
  78. : writer_(),
  79. exception_type_(0),
  80. exception_code_(0),
  81. exception_subcode_(0),
  82. exception_thread_(0),
  83. crashing_task_(crashing_task),
  84. handler_thread_(handler_thread),
  85. cpu_type_(DynamicImages::GetNativeCPUType()),
  86. dynamic_images_(NULL),
  87. memory_blocks_(&allocator_) {
  88. if (crashing_task != mach_task_self()) {
  89. dynamic_images_ = new DynamicImages(crashing_task_);
  90. cpu_type_ = dynamic_images_->GetCPUType();
  91. } else {
  92. dynamic_images_ = NULL;
  93. cpu_type_ = DynamicImages::GetNativeCPUType();
  94. }
  95. GatherSystemInformation();
  96. }
  97. MinidumpGenerator::~MinidumpGenerator() {
  98. delete dynamic_images_;
  99. }
  100. char MinidumpGenerator::build_string_[16];
  101. int MinidumpGenerator::os_major_version_ = 0;
  102. int MinidumpGenerator::os_minor_version_ = 0;
  103. int MinidumpGenerator::os_build_number_ = 0;
  104. // static
  105. void MinidumpGenerator::GatherSystemInformation() {
  106. // If this is non-zero, then we've already gathered the information
  107. if (os_major_version_)
  108. return;
  109. // This code extracts the version and build information from the OS
  110. CFStringRef vers_path =
  111. CFSTR("/System/Library/CoreServices/SystemVersion.plist");
  112. CFURLRef sys_vers =
  113. CFURLCreateWithFileSystemPath(NULL,
  114. vers_path,
  115. kCFURLPOSIXPathStyle,
  116. false);
  117. CFDataRef data;
  118. SInt32 error;
  119. CFURLCreateDataAndPropertiesFromResource(NULL, sys_vers, &data, NULL, NULL,
  120. &error);
  121. if (!data) {
  122. CFRelease(sys_vers);
  123. return;
  124. }
  125. CFDictionaryRef list = static_cast<CFDictionaryRef>
  126. (CFPropertyListCreateFromXMLData(NULL, data, kCFPropertyListImmutable,
  127. NULL));
  128. if (!list) {
  129. CFRelease(sys_vers);
  130. CFRelease(data);
  131. return;
  132. }
  133. CFStringRef build_version = static_cast<CFStringRef>
  134. (CFDictionaryGetValue(list, CFSTR("ProductBuildVersion")));
  135. CFStringRef product_version = static_cast<CFStringRef>
  136. (CFDictionaryGetValue(list, CFSTR("ProductVersion")));
  137. string build_str = ConvertToString(build_version);
  138. string product_str = ConvertToString(product_version);
  139. CFRelease(list);
  140. CFRelease(sys_vers);
  141. CFRelease(data);
  142. strlcpy(build_string_, build_str.c_str(), sizeof(build_string_));
  143. // Parse the string that looks like "10.4.8"
  144. os_major_version_ = IntegerValueAtIndex(product_str, 0);
  145. os_minor_version_ = IntegerValueAtIndex(product_str, 1);
  146. os_build_number_ = IntegerValueAtIndex(product_str, 2);
  147. }
  148. string MinidumpGenerator::UniqueNameInDirectory(const string &dir,
  149. string *unique_name) {
  150. CFUUIDRef uuid = CFUUIDCreate(NULL);
  151. CFStringRef uuid_cfstr = CFUUIDCreateString(NULL, uuid);
  152. CFRelease(uuid);
  153. string file_name(ConvertToString(uuid_cfstr));
  154. CFRelease(uuid_cfstr);
  155. string path(dir);
  156. // Ensure that the directory (if non-empty) has a trailing slash so that
  157. // we can append the file name and have a valid pathname.
  158. if (!dir.empty()) {
  159. if (dir.at(dir.size() - 1) != '/')
  160. path.append(1, '/');
  161. }
  162. path.append(file_name);
  163. path.append(".dmp");
  164. if (unique_name)
  165. *unique_name = file_name;
  166. return path;
  167. }
  168. bool MinidumpGenerator::Write(const char *path) {
  169. WriteStreamFN writers[] = {
  170. &MinidumpGenerator::WriteThreadListStream,
  171. &MinidumpGenerator::WriteMemoryListStream,
  172. &MinidumpGenerator::WriteSystemInfoStream,
  173. &MinidumpGenerator::WriteModuleListStream,
  174. &MinidumpGenerator::WriteMiscInfoStream,
  175. &MinidumpGenerator::WriteBreakpadInfoStream,
  176. // Exception stream needs to be the last entry in this array as it may
  177. // be omitted in the case where the minidump is written without an
  178. // exception.
  179. &MinidumpGenerator::WriteExceptionStream,
  180. };
  181. bool result = false;
  182. // If opening was successful, create the header, directory, and call each
  183. // writer. The destructor for the TypedMDRVAs will cause the data to be
  184. // flushed. The destructor for the MinidumpFileWriter will close the file.
  185. if (writer_.Open(path)) {
  186. TypedMDRVA<MDRawHeader> header(&writer_);
  187. TypedMDRVA<MDRawDirectory> dir(&writer_);
  188. if (!header.Allocate())
  189. return false;
  190. int writer_count = static_cast<int>(sizeof(writers) / sizeof(writers[0]));
  191. // If we don't have exception information, don't write out the
  192. // exception stream
  193. if (!exception_thread_ && !exception_type_)
  194. --writer_count;
  195. // Add space for all writers
  196. if (!dir.AllocateArray(writer_count))
  197. return false;
  198. MDRawHeader *header_ptr = header.get();
  199. header_ptr->signature = MD_HEADER_SIGNATURE;
  200. header_ptr->version = MD_HEADER_VERSION;
  201. time(reinterpret_cast<time_t *>(&(header_ptr->time_date_stamp)));
  202. header_ptr->stream_count = writer_count;
  203. header_ptr->stream_directory_rva = dir.position();
  204. MDRawDirectory local_dir;
  205. result = true;
  206. for (int i = 0; (result) && (i < writer_count); ++i) {
  207. result = (this->*writers[i])(&local_dir);
  208. if (result)
  209. dir.CopyIndex(i, &local_dir);
  210. }
  211. }
  212. return result;
  213. }
  214. size_t MinidumpGenerator::CalculateStackSize(mach_vm_address_t start_addr) {
  215. mach_vm_address_t stack_region_base = start_addr;
  216. mach_vm_size_t stack_region_size;
  217. natural_t nesting_level = 0;
  218. vm_region_submap_info_64 submap_info;
  219. mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
  220. vm_region_recurse_info_t region_info;
  221. region_info = reinterpret_cast<vm_region_recurse_info_t>(&submap_info);
  222. if (start_addr == 0) {
  223. return 0;
  224. }
  225. kern_return_t result =
  226. mach_vm_region_recurse(crashing_task_, &stack_region_base,
  227. &stack_region_size, &nesting_level,
  228. region_info, &info_count);
  229. if (result != KERN_SUCCESS || start_addr < stack_region_base) {
  230. // Failure or stack corruption, since mach_vm_region had to go
  231. // higher in the process address space to find a valid region.
  232. return 0;
  233. }
  234. unsigned int tag = submap_info.user_tag;
  235. // If the user tag is VM_MEMORY_STACK, look for more readable regions with
  236. // the same tag placed immediately above the computed stack region. Under
  237. // some circumstances, the stack for thread 0 winds up broken up into
  238. // multiple distinct abutting regions. This can happen for several reasons,
  239. // including user code that calls setrlimit(RLIMIT_STACK, ...) or changes
  240. // the access on stack pages by calling mprotect.
  241. if (tag == VM_MEMORY_STACK) {
  242. while (true) {
  243. mach_vm_address_t next_region_base = stack_region_base +
  244. stack_region_size;
  245. mach_vm_address_t proposed_next_region_base = next_region_base;
  246. mach_vm_size_t next_region_size;
  247. nesting_level = 0;
  248. mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
  249. result = mach_vm_region_recurse(crashing_task_, &next_region_base,
  250. &next_region_size, &nesting_level,
  251. region_info, &info_count);
  252. if (result != KERN_SUCCESS ||
  253. next_region_base != proposed_next_region_base ||
  254. submap_info.user_tag != tag ||
  255. (submap_info.protection & VM_PROT_READ) == 0) {
  256. break;
  257. }
  258. stack_region_size += next_region_size;
  259. }
  260. }
  261. return stack_region_base + stack_region_size - start_addr;
  262. }
  263. bool MinidumpGenerator::WriteStackFromStartAddress(
  264. mach_vm_address_t start_addr,
  265. MDMemoryDescriptor *stack_location) {
  266. UntypedMDRVA memory(&writer_);
  267. bool result = false;
  268. size_t size = CalculateStackSize(start_addr);
  269. if (size == 0) {
  270. // In some situations the stack address for the thread can come back 0.
  271. // In these cases we skip over the threads in question and stuff the
  272. // stack with a clearly borked value.
  273. start_addr = 0xDEADBEEF;
  274. size = 16;
  275. if (!memory.Allocate(size))
  276. return false;
  277. unsigned long long dummy_stack[2]; // Fill dummy stack with 16 bytes of
  278. // junk.
  279. dummy_stack[0] = 0xDEADBEEF;
  280. dummy_stack[1] = 0xDEADBEEF;
  281. result = memory.Copy(dummy_stack, size);
  282. } else {
  283. if (!memory.Allocate(size))
  284. return false;
  285. if (dynamic_images_) {
  286. vector<uint8_t> stack_memory;
  287. if (ReadTaskMemory(crashing_task_,
  288. start_addr,
  289. size,
  290. stack_memory) != KERN_SUCCESS) {
  291. return false;
  292. }
  293. result = memory.Copy(&stack_memory[0], size);
  294. } else {
  295. result = memory.Copy(reinterpret_cast<const void *>(start_addr), size);
  296. }
  297. }
  298. stack_location->start_of_memory_range = start_addr;
  299. stack_location->memory = memory.location();
  300. return result;
  301. }
  302. bool MinidumpGenerator::WriteStack(breakpad_thread_state_data_t state,
  303. MDMemoryDescriptor *stack_location) {
  304. switch (cpu_type_) {
  305. #ifdef HAS_ARM_SUPPORT
  306. case CPU_TYPE_ARM:
  307. return WriteStackARM(state, stack_location);
  308. #endif
  309. #ifdef HAS_PPC_SUPPORT
  310. case CPU_TYPE_POWERPC:
  311. return WriteStackPPC(state, stack_location);
  312. case CPU_TYPE_POWERPC64:
  313. return WriteStackPPC64(state, stack_location);
  314. #endif
  315. #ifdef HAS_X86_SUPPORT
  316. case CPU_TYPE_I386:
  317. return WriteStackX86(state, stack_location);
  318. case CPU_TYPE_X86_64:
  319. return WriteStackX86_64(state, stack_location);
  320. #endif
  321. default:
  322. return false;
  323. }
  324. }
  325. bool MinidumpGenerator::WriteContext(breakpad_thread_state_data_t state,
  326. MDLocationDescriptor *register_location) {
  327. switch (cpu_type_) {
  328. #ifdef HAS_ARM_SUPPORT
  329. case CPU_TYPE_ARM:
  330. return WriteContextARM(state, register_location);
  331. #endif
  332. #ifdef HAS_PPC_SUPPORT
  333. case CPU_TYPE_POWERPC:
  334. return WriteContextPPC(state, register_location);
  335. case CPU_TYPE_POWERPC64:
  336. return WriteContextPPC64(state, register_location);
  337. #endif
  338. #ifdef HAS_X86_SUPPORT
  339. case CPU_TYPE_I386:
  340. return WriteContextX86(state, register_location);
  341. case CPU_TYPE_X86_64:
  342. return WriteContextX86_64(state, register_location);
  343. #endif
  344. default:
  345. return false;
  346. }
  347. }
  348. u_int64_t MinidumpGenerator::CurrentPCForStack(
  349. breakpad_thread_state_data_t state) {
  350. switch (cpu_type_) {
  351. #ifdef HAS_ARM_SUPPORT
  352. case CPU_TYPE_ARM:
  353. return CurrentPCForStackARM(state);
  354. #endif
  355. #ifdef HAS_PPC_SUPPORT
  356. case CPU_TYPE_POWERPC:
  357. return CurrentPCForStackPPC(state);
  358. case CPU_TYPE_POWERPC64:
  359. return CurrentPCForStackPPC64(state);
  360. #endif
  361. #ifdef HAS_X86_SUPPORT
  362. case CPU_TYPE_I386:
  363. return CurrentPCForStackX86(state);
  364. case CPU_TYPE_X86_64:
  365. return CurrentPCForStackX86_64(state);
  366. #endif
  367. default:
  368. assert("Unknown CPU type!");
  369. return 0;
  370. }
  371. }
  372. #ifdef HAS_ARM_SUPPORT
  373. bool MinidumpGenerator::WriteStackARM(breakpad_thread_state_data_t state,
  374. MDMemoryDescriptor *stack_location) {
  375. arm_thread_state_t *machine_state =
  376. reinterpret_cast<arm_thread_state_t *>(state);
  377. mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, sp);
  378. return WriteStackFromStartAddress(start_addr, stack_location);
  379. }
  380. u_int64_t
  381. MinidumpGenerator::CurrentPCForStackARM(breakpad_thread_state_data_t state) {
  382. arm_thread_state_t *machine_state =
  383. reinterpret_cast<arm_thread_state_t *>(state);
  384. return REGISTER_FROM_THREADSTATE(machine_state, pc);
  385. }
  386. bool MinidumpGenerator::WriteContextARM(breakpad_thread_state_data_t state,
  387. MDLocationDescriptor *register_location)
  388. {
  389. TypedMDRVA<MDRawContextARM> context(&writer_);
  390. arm_thread_state_t *machine_state =
  391. reinterpret_cast<arm_thread_state_t *>(state);
  392. if (!context.Allocate())
  393. return false;
  394. *register_location = context.location();
  395. MDRawContextARM *context_ptr = context.get();
  396. context_ptr->context_flags = MD_CONTEXT_ARM_FULL;
  397. #define AddGPR(a) context_ptr->iregs[a] = REGISTER_FROM_THREADSTATE(machine_state, r[a])
  398. context_ptr->iregs[13] = REGISTER_FROM_THREADSTATE(machine_state, sp);
  399. context_ptr->iregs[14] = REGISTER_FROM_THREADSTATE(machine_state, lr);
  400. context_ptr->iregs[15] = REGISTER_FROM_THREADSTATE(machine_state, pc);
  401. context_ptr->cpsr = REGISTER_FROM_THREADSTATE(machine_state, cpsr);
  402. AddGPR(0);
  403. AddGPR(1);
  404. AddGPR(2);
  405. AddGPR(3);
  406. AddGPR(4);
  407. AddGPR(5);
  408. AddGPR(6);
  409. AddGPR(7);
  410. AddGPR(8);
  411. AddGPR(9);
  412. AddGPR(10);
  413. AddGPR(11);
  414. AddGPR(12);
  415. #undef AddReg
  416. #undef AddGPR
  417. return true;
  418. }
  419. #endif
  420. #ifdef HAS_PCC_SUPPORT
  421. bool MinidumpGenerator::WriteStackPPC(breakpad_thread_state_data_t state,
  422. MDMemoryDescriptor *stack_location) {
  423. ppc_thread_state_t *machine_state =
  424. reinterpret_cast<ppc_thread_state_t *>(state);
  425. mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, r1);
  426. return WriteStackFromStartAddress(start_addr, stack_location);
  427. }
  428. bool MinidumpGenerator::WriteStackPPC64(breakpad_thread_state_data_t state,
  429. MDMemoryDescriptor *stack_location) {
  430. ppc_thread_state64_t *machine_state =
  431. reinterpret_cast<ppc_thread_state64_t *>(state);
  432. mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, r1);
  433. return WriteStackFromStartAddress(start_addr, stack_location);
  434. }
  435. u_int64_t
  436. MinidumpGenerator::CurrentPCForStackPPC(breakpad_thread_state_data_t state) {
  437. ppc_thread_state_t *machine_state =
  438. reinterpret_cast<ppc_thread_state_t *>(state);
  439. return REGISTER_FROM_THREADSTATE(machine_state, srr0);
  440. }
  441. u_int64_t
  442. MinidumpGenerator::CurrentPCForStackPPC64(breakpad_thread_state_data_t state) {
  443. ppc_thread_state64_t *machine_state =
  444. reinterpret_cast<ppc_thread_state64_t *>(state);
  445. return REGISTER_FROM_THREADSTATE(machine_state, srr0);
  446. }
  447. bool MinidumpGenerator::WriteContextPPC(breakpad_thread_state_data_t state,
  448. MDLocationDescriptor *register_location)
  449. {
  450. TypedMDRVA<MDRawContextPPC> context(&writer_);
  451. ppc_thread_state_t *machine_state =
  452. reinterpret_cast<ppc_thread_state_t *>(state);
  453. if (!context.Allocate())
  454. return false;
  455. *register_location = context.location();
  456. MDRawContextPPC *context_ptr = context.get();
  457. context_ptr->context_flags = MD_CONTEXT_PPC_BASE;
  458. #define AddReg(a) context_ptr->a = REGISTER_FROM_THREADSTATE(machine_state, a)
  459. #define AddGPR(a) context_ptr->gpr[a] = REGISTER_FROM_THREADSTATE(machine_state, r ## a)
  460. AddReg(srr0);
  461. AddReg(cr);
  462. AddReg(xer);
  463. AddReg(ctr);
  464. AddReg(lr);
  465. AddReg(vrsave);
  466. AddGPR(0);
  467. AddGPR(1);
  468. AddGPR(2);
  469. AddGPR(3);
  470. AddGPR(4);
  471. AddGPR(5);
  472. AddGPR(6);
  473. AddGPR(7);
  474. AddGPR(8);
  475. AddGPR(9);
  476. AddGPR(10);
  477. AddGPR(11);
  478. AddGPR(12);
  479. AddGPR(13);
  480. AddGPR(14);
  481. AddGPR(15);
  482. AddGPR(16);
  483. AddGPR(17);
  484. AddGPR(18);
  485. AddGPR(19);
  486. AddGPR(20);
  487. AddGPR(21);
  488. AddGPR(22);
  489. AddGPR(23);
  490. AddGPR(24);
  491. AddGPR(25);
  492. AddGPR(26);
  493. AddGPR(27);
  494. AddGPR(28);
  495. AddGPR(29);
  496. AddGPR(30);
  497. AddGPR(31);
  498. AddReg(mq);
  499. #undef AddReg
  500. #undef AddGPR
  501. return true;
  502. }
  503. bool MinidumpGenerator::WriteContextPPC64(
  504. breakpad_thread_state_data_t state,
  505. MDLocationDescriptor *register_location) {
  506. TypedMDRVA<MDRawContextPPC64> context(&writer_);
  507. ppc_thread_state64_t *machine_state =
  508. reinterpret_cast<ppc_thread_state64_t *>(state);
  509. if (!context.Allocate())
  510. return false;
  511. *register_location = context.location();
  512. MDRawContextPPC64 *context_ptr = context.get();
  513. context_ptr->context_flags = MD_CONTEXT_PPC_BASE;
  514. #define AddReg(a) context_ptr->a = REGISTER_FROM_THREADSTATE(machine_state, a)
  515. #define AddGPR(a) context_ptr->gpr[a] = REGISTER_FROM_THREADSTATE(machine_state, r ## a)
  516. AddReg(srr0);
  517. AddReg(cr);
  518. AddReg(xer);
  519. AddReg(ctr);
  520. AddReg(lr);
  521. AddReg(vrsave);
  522. AddGPR(0);
  523. AddGPR(1);
  524. AddGPR(2);
  525. AddGPR(3);
  526. AddGPR(4);
  527. AddGPR(5);
  528. AddGPR(6);
  529. AddGPR(7);
  530. AddGPR(8);
  531. AddGPR(9);
  532. AddGPR(10);
  533. AddGPR(11);
  534. AddGPR(12);
  535. AddGPR(13);
  536. AddGPR(14);
  537. AddGPR(15);
  538. AddGPR(16);
  539. AddGPR(17);
  540. AddGPR(18);
  541. AddGPR(19);
  542. AddGPR(20);
  543. AddGPR(21);
  544. AddGPR(22);
  545. AddGPR(23);
  546. AddGPR(24);
  547. AddGPR(25);
  548. AddGPR(26);
  549. AddGPR(27);
  550. AddGPR(28);
  551. AddGPR(29);
  552. AddGPR(30);
  553. AddGPR(31);
  554. #undef AddReg
  555. #undef AddGPR
  556. return true;
  557. }
  558. #endif
  559. #ifdef HAS_X86_SUPPORT
  560. bool MinidumpGenerator::WriteStackX86(breakpad_thread_state_data_t state,
  561. MDMemoryDescriptor *stack_location) {
  562. i386_thread_state_t *machine_state =
  563. reinterpret_cast<i386_thread_state_t *>(state);
  564. mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, esp);
  565. return WriteStackFromStartAddress(start_addr, stack_location);
  566. }
  567. bool MinidumpGenerator::WriteStackX86_64(breakpad_thread_state_data_t state,
  568. MDMemoryDescriptor *stack_location) {
  569. x86_thread_state64_t *machine_state =
  570. reinterpret_cast<x86_thread_state64_t *>(state);
  571. mach_vm_address_t start_addr = REGISTER_FROM_THREADSTATE(machine_state, rsp);
  572. return WriteStackFromStartAddress(start_addr, stack_location);
  573. }
  574. u_int64_t
  575. MinidumpGenerator::CurrentPCForStackX86(breakpad_thread_state_data_t state) {
  576. i386_thread_state_t *machine_state =
  577. reinterpret_cast<i386_thread_state_t *>(state);
  578. return REGISTER_FROM_THREADSTATE(machine_state, eip);
  579. }
  580. u_int64_t
  581. MinidumpGenerator::CurrentPCForStackX86_64(breakpad_thread_state_data_t state) {
  582. x86_thread_state64_t *machine_state =
  583. reinterpret_cast<x86_thread_state64_t *>(state);
  584. return REGISTER_FROM_THREADSTATE(machine_state, rip);
  585. }
  586. bool MinidumpGenerator::WriteContextX86(breakpad_thread_state_data_t state,
  587. MDLocationDescriptor *register_location)
  588. {
  589. TypedMDRVA<MDRawContextX86> context(&writer_);
  590. i386_thread_state_t *machine_state =
  591. reinterpret_cast<i386_thread_state_t *>(state);
  592. if (!context.Allocate())
  593. return false;
  594. *register_location = context.location();
  595. MDRawContextX86 *context_ptr = context.get();
  596. #define AddReg(a) context_ptr->a = REGISTER_FROM_THREADSTATE(machine_state, a)
  597. context_ptr->context_flags = MD_CONTEXT_X86;
  598. AddReg(eax);
  599. AddReg(ebx);
  600. AddReg(ecx);
  601. AddReg(edx);
  602. AddReg(esi);
  603. AddReg(edi);
  604. AddReg(ebp);
  605. AddReg(esp);
  606. AddReg(cs);
  607. AddReg(ds);
  608. AddReg(ss);
  609. AddReg(es);
  610. AddReg(fs);
  611. AddReg(gs);
  612. AddReg(eflags);
  613. AddReg(eip);
  614. #undef AddReg
  615. return true;
  616. }
  617. bool MinidumpGenerator::WriteContextX86_64(
  618. breakpad_thread_state_data_t state,
  619. MDLocationDescriptor *register_location) {
  620. TypedMDRVA<MDRawContextAMD64> context(&writer_);
  621. x86_thread_state64_t *machine_state =
  622. reinterpret_cast<x86_thread_state64_t *>(state);
  623. if (!context.Allocate())
  624. return false;
  625. *register_location = context.location();
  626. MDRawContextAMD64 *context_ptr = context.get();
  627. #define AddReg(a) context_ptr->a = REGISTER_FROM_THREADSTATE(machine_state, a)
  628. context_ptr->context_flags = MD_CONTEXT_AMD64;
  629. AddReg(rax);
  630. AddReg(rbx);
  631. AddReg(rcx);
  632. AddReg(rdx);
  633. AddReg(rdi);
  634. AddReg(rsi);
  635. AddReg(rbp);
  636. AddReg(rsp);
  637. AddReg(r8);
  638. AddReg(r9);
  639. AddReg(r10);
  640. AddReg(r11);
  641. AddReg(r12);
  642. AddReg(r13);
  643. AddReg(r14);
  644. AddReg(r15);
  645. AddReg(rip);
  646. // according to AMD's software developer guide, bits above 18 are
  647. // not used in the flags register. Since the minidump format
  648. // specifies 32 bits for the flags register, we can truncate safely
  649. // with no loss.
  650. context_ptr->eflags = static_cast<u_int32_t>(REGISTER_FROM_THREADSTATE(machine_state, rflags));
  651. AddReg(cs);
  652. AddReg(fs);
  653. AddReg(gs);
  654. #undef AddReg
  655. return true;
  656. }
  657. #endif
  658. bool MinidumpGenerator::GetThreadState(thread_act_t target_thread,
  659. thread_state_t state,
  660. mach_msg_type_number_t *count) {
  661. thread_state_flavor_t flavor;
  662. switch (cpu_type_) {
  663. #ifdef HAS_ARM_SUPPORT
  664. case CPU_TYPE_ARM:
  665. flavor = ARM_THREAD_STATE;
  666. break;
  667. #endif
  668. #ifdef HAS_PPC_SUPPORT
  669. case CPU_TYPE_POWERPC:
  670. flavor = PPC_THREAD_STATE;
  671. break;
  672. case CPU_TYPE_POWERPC64:
  673. flavor = PPC_THREAD_STATE64;
  674. break;
  675. #endif
  676. #ifdef HAS_X86_SUPPORT
  677. case CPU_TYPE_I386:
  678. flavor = i386_THREAD_STATE;
  679. break;
  680. case CPU_TYPE_X86_64:
  681. flavor = x86_THREAD_STATE64;
  682. break;
  683. #endif
  684. default:
  685. return false;
  686. }
  687. return thread_get_state(target_thread, flavor,
  688. state, count) == KERN_SUCCESS;
  689. }
  690. bool MinidumpGenerator::WriteThreadStream(mach_port_t thread_id,
  691. MDRawThread *thread) {
  692. breakpad_thread_state_data_t state;
  693. mach_msg_type_number_t state_count
  694. = static_cast<mach_msg_type_number_t>(sizeof(state));
  695. if (GetThreadState(thread_id, state, &state_count)) {
  696. if (!WriteStack(state, &thread->stack))
  697. return false;
  698. memory_blocks_.push_back(thread->stack);
  699. if (!WriteContext(state, &thread->thread_context))
  700. return false;
  701. thread->thread_id = thread_id;
  702. } else {
  703. return false;
  704. }
  705. return true;
  706. }
  707. bool MinidumpGenerator::WriteThreadListStream(
  708. MDRawDirectory *thread_list_stream) {
  709. TypedMDRVA<MDRawThreadList> list(&writer_);
  710. thread_act_port_array_t threads_for_task;
  711. mach_msg_type_number_t thread_count;
  712. int non_generator_thread_count;
  713. if (task_threads(crashing_task_, &threads_for_task, &thread_count))
  714. return false;
  715. // Don't include the generator thread
  716. if (handler_thread_ != MACH_PORT_NULL)
  717. non_generator_thread_count = thread_count - 1;
  718. else
  719. non_generator_thread_count = thread_count;
  720. if (!list.AllocateObjectAndArray(non_generator_thread_count,
  721. sizeof(MDRawThread)))
  722. return false;
  723. thread_list_stream->stream_type = MD_THREAD_LIST_STREAM;
  724. thread_list_stream->location = list.location();
  725. list.get()->number_of_threads = non_generator_thread_count;
  726. MDRawThread thread;
  727. int thread_idx = 0;
  728. for (unsigned int i = 0; i < thread_count; ++i) {
  729. memset(&thread, 0, sizeof(MDRawThread));
  730. if (threads_for_task[i] != handler_thread_) {
  731. if (!WriteThreadStream(threads_for_task[i], &thread))
  732. return false;
  733. list.CopyIndexAfterObject(thread_idx++, &thread, sizeof(MDRawThread));
  734. }
  735. }
  736. return true;
  737. }
  738. bool MinidumpGenerator::WriteMemoryListStream(
  739. MDRawDirectory *memory_list_stream) {
  740. TypedMDRVA<MDRawMemoryList> list(&writer_);
  741. // If the dump has an exception, include some memory around the
  742. // instruction pointer.
  743. const size_t kIPMemorySize = 256; // bytes
  744. bool have_ip_memory = false;
  745. MDMemoryDescriptor ip_memory_d;
  746. if (exception_thread_ && exception_type_) {
  747. breakpad_thread_state_data_t state;
  748. mach_msg_type_number_t stateCount
  749. = static_cast<mach_msg_type_number_t>(sizeof(state));
  750. if (thread_get_state(exception_thread_,
  751. BREAKPAD_MACHINE_THREAD_STATE,
  752. state,
  753. &stateCount) == KERN_SUCCESS) {
  754. u_int64_t ip = CurrentPCForStack(state);
  755. // Bound it to the upper and lower bounds of the region
  756. // it's contained within. If it's not in a known memory region,
  757. // don't bother trying to write it.
  758. mach_vm_address_t addr = ip;
  759. mach_vm_size_t size;
  760. natural_t nesting_level = 0;
  761. vm_region_submap_info_64 info;
  762. mach_msg_type_number_t info_count = VM_REGION_SUBMAP_INFO_COUNT_64;
  763. kern_return_t ret =
  764. mach_vm_region_recurse(crashing_task_,
  765. &addr,
  766. &size,
  767. &nesting_level,
  768. (vm_region_recurse_info_t)&info,
  769. &info_count);
  770. if (ret == KERN_SUCCESS && ip >= addr && ip < (addr + size)) {
  771. // Try to get 128 bytes before and after the IP, but
  772. // settle for whatever's available.
  773. ip_memory_d.start_of_memory_range =
  774. std::max(uintptr_t(addr),
  775. uintptr_t(ip - (kIPMemorySize / 2)));
  776. uintptr_t end_of_range =
  777. std::min(uintptr_t(ip + (kIPMemorySize / 2)),
  778. uintptr_t(addr + size));
  779. ip_memory_d.memory.data_size =
  780. end_of_range - ip_memory_d.start_of_memory_range;
  781. have_ip_memory = true;
  782. // This needs to get appended to the list even though
  783. // the memory bytes aren't filled in yet so the entire
  784. // list can be written first. The memory bytes will get filled
  785. // in after the memory list is written.
  786. memory_blocks_.push_back(ip_memory_d);
  787. }
  788. }
  789. }
  790. // Now fill in the memory list and write it.
  791. unsigned memory_count = memory_blocks_.size();
  792. if (!list.AllocateObjectAndArray(memory_count,
  793. sizeof(MDMemoryDescriptor)))
  794. return false;
  795. memory_list_stream->stream_type = MD_MEMORY_LIST_STREAM;
  796. memory_list_stream->location = list.location();
  797. list.get()->number_of_memory_ranges = memory_count;
  798. unsigned int i;
  799. for (i = 0; i < memory_count; ++i) {
  800. list.CopyIndexAfterObject(i, &memory_blocks_[i],
  801. sizeof(MDMemoryDescriptor));
  802. }
  803. if (have_ip_memory) {
  804. // Now read the memory around the instruction pointer.
  805. UntypedMDRVA ip_memory(&writer_);
  806. if (!ip_memory.Allocate(ip_memory_d.memory.data_size))
  807. return false;
  808. if (dynamic_images_) {
  809. // Out-of-process.
  810. vector<uint8_t> memory;
  811. if (ReadTaskMemory(crashing_task_,
  812. ip_memory_d.start_of_memory_range,
  813. ip_memory_d.memory.data_size,
  814. memory) != KERN_SUCCESS) {
  815. return false;
  816. }
  817. ip_memory.Copy(&memory[0], ip_memory_d.memory.data_size);
  818. } else {
  819. // In-process, just copy from local memory.
  820. ip_memory.Copy(
  821. reinterpret_cast<const void *>(ip_memory_d.start_of_memory_range),
  822. ip_memory_d.memory.data_size);
  823. }
  824. ip_memory_d.memory = ip_memory.location();
  825. // Write this again now that the data location is filled in.
  826. list.CopyIndexAfterObject(i - 1, &ip_memory_d,
  827. sizeof(MDMemoryDescriptor));
  828. }
  829. return true;
  830. }
  831. bool
  832. MinidumpGenerator::WriteExceptionStream(MDRawDirectory *exception_stream) {
  833. TypedMDRVA<MDRawExceptionStream> exception(&writer_);
  834. if (!exception.Allocate())
  835. return false;
  836. exception_stream->stream_type = MD_EXCEPTION_STREAM;
  837. exception_stream->location = exception.location();
  838. MDRawExceptionStream *exception_ptr = exception.get();
  839. exception_ptr->thread_id = exception_thread_;
  840. // This naming is confusing, but it is the proper translation from
  841. // mach naming to minidump naming.
  842. exception_ptr->exception_record.exception_code = exception_type_;
  843. exception_ptr->exception_record.exception_flags = exception_code_;
  844. breakpad_thread_state_data_t state;
  845. mach_msg_type_number_t state_count
  846. = static_cast<mach_msg_type_number_t>(sizeof(state));
  847. if (!GetThreadState(exception_thread_, state, &state_count))
  848. return false;
  849. if (!WriteContext(state, &exception_ptr->thread_context))
  850. return false;
  851. if (exception_type_ == EXC_BAD_ACCESS)
  852. exception_ptr->exception_record.exception_address = exception_subcode_;
  853. else
  854. exception_ptr->exception_record.exception_address = CurrentPCForStack(state);
  855. return true;
  856. }
  857. bool MinidumpGenerator::WriteSystemInfoStream(
  858. MDRawDirectory *system_info_stream) {
  859. TypedMDRVA<MDRawSystemInfo> info(&writer_);
  860. if (!info.Allocate())
  861. return false;
  862. system_info_stream->stream_type = MD_SYSTEM_INFO_STREAM;
  863. system_info_stream->location = info.location();
  864. // CPU Information
  865. uint32_t number_of_processors;
  866. size_t len = sizeof(number_of_processors);
  867. sysctlbyname("hw.ncpu", &number_of_processors, &len, NULL, 0);
  868. MDRawSystemInfo *info_ptr = info.get();
  869. switch (cpu_type_) {
  870. #ifdef HAS_ARM_SUPPORT
  871. case CPU_TYPE_ARM:
  872. info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_ARM;
  873. break;
  874. #endif
  875. #ifdef HAS_PPC_SUPPORT
  876. case CPU_TYPE_POWERPC:
  877. case CPU_TYPE_POWERPC64:
  878. info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_PPC;
  879. break;
  880. #endif
  881. #ifdef HAS_X86_SUPPORT
  882. case CPU_TYPE_I386:
  883. case CPU_TYPE_X86_64:
  884. if (cpu_type_ == CPU_TYPE_I386)
  885. info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_X86;
  886. else
  887. info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_AMD64;
  888. #ifdef __i386__
  889. // ebx is used for PIC code, so we need
  890. // to preserve it.
  891. #define cpuid(op,eax,ebx,ecx,edx) \
  892. asm ("pushl %%ebx \n\t" \
  893. "cpuid \n\t" \
  894. "movl %%ebx,%1 \n\t" \
  895. "popl %%ebx" \
  896. : "=a" (eax), \
  897. "=g" (ebx), \
  898. "=c" (ecx), \
  899. "=d" (edx) \
  900. : "0" (op))
  901. #elif defined(__x86_64__)
  902. #define cpuid(op,eax,ebx,ecx,edx) \
  903. asm ("cpuid \n\t" \
  904. : "=a" (eax), \
  905. "=b" (ebx), \
  906. "=c" (ecx), \
  907. "=d" (edx) \
  908. : "0" (op))
  909. #endif
  910. #if defined(__i386__) || defined(__x86_64__)
  911. int unused, unused2;
  912. // get vendor id
  913. cpuid(0, unused, info_ptr->cpu.x86_cpu_info.vendor_id[0],
  914. info_ptr->cpu.x86_cpu_info.vendor_id[2],
  915. info_ptr->cpu.x86_cpu_info.vendor_id[1]);
  916. // get version and feature info
  917. cpuid(1, info_ptr->cpu.x86_cpu_info.version_information, unused, unused2,
  918. info_ptr->cpu.x86_cpu_info.feature_information);
  919. // family
  920. info_ptr->processor_level =
  921. (info_ptr->cpu.x86_cpu_info.version_information & 0xF00) >> 8;
  922. // 0xMMSS (Model, Stepping)
  923. info_ptr->processor_revision =
  924. (info_ptr->cpu.x86_cpu_info.version_information & 0xF) |
  925. ((info_ptr->cpu.x86_cpu_info.version_information & 0xF0) << 4);
  926. // decode extended model info
  927. if (info_ptr->processor_level == 0xF ||
  928. info_ptr->processor_level == 0x6) {
  929. info_ptr->processor_revision |=
  930. ((info_ptr->cpu.x86_cpu_info.version_information & 0xF0000) >> 4);
  931. }
  932. // decode extended family info
  933. if (info_ptr->processor_level == 0xF) {
  934. info_ptr->processor_level +=
  935. ((info_ptr->cpu.x86_cpu_info.version_information & 0xFF00000) >> 20);
  936. }
  937. #endif // __i386__ || __x86_64_
  938. break;
  939. #endif // HAS_X86_SUPPORT
  940. default:
  941. info_ptr->processor_architecture = MD_CPU_ARCHITECTURE_UNKNOWN;
  942. break;
  943. }
  944. info_ptr->number_of_processors = number_of_processors;
  945. #if TARGET_OS_IPHONE
  946. info_ptr->platform_id = MD_OS_IOS;
  947. #else
  948. info_ptr->platform_id = MD_OS_MAC_OS_X;
  949. #endif // TARGET_OS_IPHONE
  950. MDLocationDescriptor build_string_loc;
  951. if (!writer_.WriteString(build_string_, 0,
  952. &build_string_loc))
  953. return false;
  954. info_ptr->csd_version_rva = build_string_loc.rva;
  955. info_ptr->major_version = os_major_version_;
  956. info_ptr->minor_version = os_minor_version_;
  957. info_ptr->build_number = os_build_number_;
  958. return true;
  959. }
  960. bool MinidumpGenerator::WriteModuleStream(unsigned int index,
  961. MDRawModule *module) {
  962. if (dynamic_images_) {
  963. // we're in a different process than the crashed process
  964. DynamicImage *image = dynamic_images_->GetImage(index);
  965. if (!image)
  966. return false;
  967. memset(module, 0, sizeof(MDRawModule));
  968. MDLocationDescriptor string_location;
  969. string name = image->GetFilePath();
  970. if (!writer_.WriteString(name.c_str(), 0, &string_location))
  971. return false;
  972. module->base_of_image = image->GetVMAddr() + image->GetVMAddrSlide();
  973. module->size_of_image = static_cast<u_int32_t>(image->GetVMSize());
  974. module->module_name_rva = string_location.rva;
  975. // We'll skip the executable module, because they don't have
  976. // LC_ID_DYLIB load commands, and the crash processing server gets
  977. // version information from the Plist file, anyway.
  978. if (index != (uint32_t)FindExecutableModule()) {
  979. module->version_info.signature = MD_VSFIXEDFILEINFO_SIGNATURE;
  980. module->version_info.struct_version |= MD_VSFIXEDFILEINFO_VERSION;
  981. // Convert MAC dylib version format, which is a 32 bit number, to the
  982. // format used by minidump. The mac format is <16 bits>.<8 bits>.<8 bits>
  983. // so it fits nicely into the windows version with some massaging
  984. // The mapping is:
  985. // 1) upper 16 bits of MAC version go to lower 16 bits of product HI
  986. // 2) Next most significant 8 bits go to upper 16 bits of product LO
  987. // 3) Least significant 8 bits go to lower 16 bits of product LO
  988. uint32_t modVersion = image->GetVersion();
  989. module->version_info.file_version_hi = 0;
  990. module->version_info.file_version_hi = modVersion >> 16;
  991. module->version_info.file_version_lo |= (modVersion & 0xff00) << 8;
  992. module->version_info.file_version_lo |= (modVersion & 0xff);
  993. }
  994. if (!WriteCVRecord(module, image->GetCPUType(), name.c_str(), false)) {
  995. return false;
  996. }
  997. } else {
  998. // Getting module info in the crashed process
  999. const breakpad_mach_header *header;
  1000. header = (breakpad_mach_header*)_dyld_get_image_header(index);
  1001. if (!header)
  1002. return false;
  1003. #ifdef __LP64__
  1004. assert(header->magic == MH_MAGIC_64);
  1005. if(header->magic != MH_MAGIC_64)
  1006. return false;
  1007. #else
  1008. assert(header->magic == MH_MAGIC);
  1009. if(header->magic != MH_MAGIC)
  1010. return false;
  1011. #endif
  1012. int cpu_type = header->cputype;
  1013. unsigned long slide = _dyld_get_image_vmaddr_slide(index);
  1014. const char* name = _dyld_get_image_name(index);
  1015. const struct load_command *cmd =
  1016. reinterpret_cast<const struct load_command *>(header + 1);
  1017. memset(module, 0, sizeof(MDRawModule));
  1018. for (unsigned int i = 0; cmd && (i < header->ncmds); i++) {
  1019. if (cmd->cmd == LC_SEGMENT_ARCH) {
  1020. const breakpad_mach_segment_command *seg =
  1021. reinterpret_cast<const breakpad_mach_segment_command *>(cmd);
  1022. if (!strcmp(seg->segname, "__TEXT")) {
  1023. MDLocationDescriptor string_location;
  1024. if (!writer_.WriteString(name, 0, &string_location))
  1025. return false;
  1026. module->base_of_image = seg->vmaddr + slide;
  1027. module->size_of_image = static_cast<u_int32_t>(seg->vmsize);
  1028. module->module_name_rva = string_location.rva;
  1029. bool in_memory = false;
  1030. #if TARGET_OS_IPHONE
  1031. in_memory = true;
  1032. #endif
  1033. if (!WriteCVRecord(module, cpu_type, name, in_memory))
  1034. return false;
  1035. return true;
  1036. }
  1037. }
  1038. cmd = reinterpret_cast<struct load_command*>((char *)cmd + cmd->cmdsize);
  1039. }
  1040. }
  1041. return true;
  1042. }
  1043. int MinidumpGenerator::FindExecutableModule() {
  1044. if (dynamic_images_) {
  1045. int index = dynamic_images_->GetExecutableImageIndex();
  1046. if (index >= 0) {
  1047. return index;
  1048. }
  1049. } else {
  1050. int image_count = _dyld_image_count();
  1051. const struct mach_header *header;
  1052. for (int index = 0; index < image_count; ++index) {
  1053. header = _dyld_get_image_header(index);
  1054. if (header->filetype == MH_EXECUTE)
  1055. return index;
  1056. }
  1057. }
  1058. // failed - just use the first image
  1059. return 0;
  1060. }
  1061. bool MinidumpGenerator::WriteCVRecord(MDRawModule *module, int cpu_type,
  1062. const char *module_path, bool in_memory) {
  1063. TypedMDRVA<MDCVInfoPDB70> cv(&writer_);
  1064. // Only return the last path component of the full module path
  1065. const char *module_name = strrchr(module_path, '/');
  1066. // Increment past the slash
  1067. if (module_name)
  1068. ++module_name;
  1069. else
  1070. module_name = "<Unknown>";
  1071. size_t module_name_length = strlen(module_name);
  1072. if (!cv.AllocateObjectAndArray(module_name_length + 1, sizeof(u_int8_t)))
  1073. return false;
  1074. if (!cv.CopyIndexAfterObject(0, module_name, module_name_length))
  1075. return false;
  1076. module->cv_record = cv.location();
  1077. MDCVInfoPDB70 *cv_ptr = cv.get();
  1078. cv_ptr->cv_signature = MD_CVINFOPDB70_SIGNATURE;
  1079. cv_ptr->age = 0;
  1080. // Get the module identifier
  1081. unsigned char identifier[16];
  1082. bool result = false;
  1083. if (in_memory) {
  1084. MacFileUtilities::MachoID macho(module_path,
  1085. reinterpret_cast<void *>(module->base_of_image),
  1086. static_cast<size_t>(module->size_of_image));
  1087. result = macho.UUIDCommand(cpu_type, identifier);
  1088. if (!result)
  1089. result = macho.MD5(cpu_type, identifier);
  1090. }
  1091. if (!result) {
  1092. FileID file_id(module_path);
  1093. result = file_id.MachoIdentifier(cpu_type, identifier);
  1094. }
  1095. if (result) {
  1096. cv_ptr->signature.data1 = (uint32_t)identifier[0] << 24 |
  1097. (uint32_t)identifier[1] << 16 | (uint32_t)identifier[2] << 8 |
  1098. (uint32_t)identifier[3];
  1099. cv_ptr->signature.data2 = (uint32_t)identifier[4] << 8 | identifier[5];
  1100. cv_ptr->signature.data3 = (uint32_t)identifier[6] << 8 | identifier[7];
  1101. cv_ptr->signature.data4[0] = identifier[8];
  1102. cv_ptr->signature.data4[1] = identifier[9];
  1103. cv_ptr->signature.data4[2] = identifier[10];
  1104. cv_ptr->signature.data4[3] = identifier[11];
  1105. cv_ptr->signature.data4[4] = identifier[12];
  1106. cv_ptr->signature.data4[5] = identifier[13];
  1107. cv_ptr->signature.data4[6] = identifier[14];
  1108. cv_ptr->signature.data4[7] = identifier[15];
  1109. }
  1110. return true;
  1111. }
  1112. bool MinidumpGenerator::WriteModuleListStream(
  1113. MDRawDirectory *module_list_stream) {
  1114. TypedMDRVA<MDRawModuleList> list(&writer_);
  1115. size_t image_count = dynamic_images_ ?
  1116. static_cast<size_t>(dynamic_images_->GetImageCount()) :
  1117. _dyld_image_count();
  1118. if (!list.AllocateObjectAndArray(image_count, MD_MODULE_SIZE))
  1119. return false;
  1120. module_list_stream->stream_type = MD_MODULE_LIST_STREAM;
  1121. module_list_stream->location = list.location();
  1122. list.get()->number_of_modules = image_count;
  1123. // Write out the executable module as the first one
  1124. MDRawModule module;
  1125. size_t executableIndex = FindExecutableModule();
  1126. if (!WriteModuleStream(executableIndex, &module)) {
  1127. return false;
  1128. }
  1129. list.CopyIndexAfterObject(0, &module, MD_MODULE_SIZE);
  1130. int destinationIndex = 1; // Write all other modules after this one
  1131. for (size_t i = 0; i < image_count; ++i) {
  1132. if (i != executableIndex) {
  1133. if (!WriteModuleStream(i, &module)) {
  1134. return false;
  1135. }
  1136. list.CopyIndexAfterObject(destinationIndex++, &module, MD_MODULE_SIZE);
  1137. }
  1138. }
  1139. return true;
  1140. }
  1141. bool MinidumpGenerator::WriteMiscInfoStream(MDRawDirectory *misc_info_stream) {
  1142. TypedMDRVA<MDRawMiscInfo> info(&writer_);
  1143. if (!info.Allocate())
  1144. return false;
  1145. misc_info_stream->stream_type = MD_MISC_INFO_STREAM;
  1146. misc_info_stream->location = info.location();
  1147. MDRawMiscInfo *info_ptr = info.get();
  1148. info_ptr->size_of_info = static_cast<u_int32_t>(sizeof(MDRawMiscInfo));
  1149. info_ptr->flags1 = MD_MISCINFO_FLAGS1_PROCESS_ID |
  1150. MD_MISCINFO_FLAGS1_PROCESS_TIMES |
  1151. MD_MISCINFO_FLAGS1_PROCESSOR_POWER_INFO;
  1152. // Process ID
  1153. info_ptr->process_id = getpid();
  1154. // Times
  1155. struct rusage usage;
  1156. if (getrusage(RUSAGE_SELF, &usage) != -1) {
  1157. // Omit the fractional time since the MDRawMiscInfo only wants seconds
  1158. info_ptr->process_user_time =
  1159. static_cast<u_int32_t>(usage.ru_utime.tv_sec);
  1160. info_ptr->process_kernel_time =
  1161. static_cast<u_int32_t>(usage.ru_stime.tv_sec);
  1162. }
  1163. int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID,
  1164. static_cast<int>(info_ptr->process_id) };
  1165. u_int mibsize = static_cast<u_int>(sizeof(mib) / sizeof(mib[0]));
  1166. struct kinfo_proc proc;
  1167. size_t size = sizeof(proc);
  1168. if (sysctl(mib, mibsize, &proc, &size, NULL, 0) == 0) {
  1169. info_ptr->process_create_time =
  1170. static_cast<u_int32_t>(proc.kp_proc.p_starttime.tv_sec);
  1171. }
  1172. // Speed
  1173. uint64_t speed;
  1174. const uint64_t kOneMillion = 1000 * 1000;
  1175. size = sizeof(speed);
  1176. sysctlbyname("hw.cpufrequency_max", &speed, &size, NULL, 0);
  1177. info_ptr->processor_max_mhz = static_cast<u_int32_t>(speed / kOneMillion);
  1178. info_ptr->processor_mhz_limit = static_cast<u_int32_t>(speed / kOneMillion);
  1179. size = sizeof(speed);
  1180. sysctlbyname("hw.cpufrequency", &speed, &size, NULL, 0);
  1181. info_ptr->processor_current_mhz = static_cast<u_int32_t>(speed / kOneMillion);
  1182. return true;
  1183. }
  1184. bool MinidumpGenerator::WriteBreakpadInfoStream(
  1185. MDRawDirectory *breakpad_info_stream) {
  1186. TypedMDRVA<MDRawBreakpadInfo> info(&writer_);
  1187. if (!info.Allocate())
  1188. return false;
  1189. breakpad_info_stream->stream_type = MD_BREAKPAD_INFO_STREAM;
  1190. breakpad_info_stream->location = info.location();
  1191. MDRawBreakpadInfo *info_ptr = info.get();
  1192. if (exception_thread_ && exception_type_) {
  1193. info_ptr->validity = MD_BREAKPAD_INFO_VALID_DUMP_THREAD_ID |
  1194. MD_BREAKPAD_INFO_VALID_REQUESTING_THREAD_ID;
  1195. info_ptr->dump_thread_id = handler_thread_;
  1196. info_ptr->requesting_thread_id = exception_thread_;
  1197. } else {
  1198. info_ptr->validity = MD_BREAKPAD_INFO_VALID_DUMP_THREAD_ID;
  1199. info_ptr->dump_thread_id = handler_thread_;
  1200. info_ptr->requesting_thread_id = 0;
  1201. }
  1202. return true;
  1203. }
  1204. } // namespace google_breakpad