PageRenderTime 1081ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 1ms

/tools/perf/util/header.c

https://github.com/Mengqi/linux-2.6
C | 1194 lines | 916 code | 233 blank | 45 comment | 177 complexity | 534632d66594009aa3b524f9894b48cf MD5 | raw file
  1. #define _FILE_OFFSET_BITS 64
  2. #include <sys/types.h>
  3. #include <byteswap.h>
  4. #include <unistd.h>
  5. #include <stdio.h>
  6. #include <stdlib.h>
  7. #include <linux/list.h>
  8. #include <linux/kernel.h>
  9. #include "evlist.h"
  10. #include "evsel.h"
  11. #include "util.h"
  12. #include "header.h"
  13. #include "../perf.h"
  14. #include "trace-event.h"
  15. #include "session.h"
  16. #include "symbol.h"
  17. #include "debug.h"
  18. static bool no_buildid_cache = false;
  19. static int event_count;
  20. static struct perf_trace_event_type *events;
  21. int perf_header__push_event(u64 id, const char *name)
  22. {
  23. if (strlen(name) > MAX_EVENT_NAME)
  24. pr_warning("Event %s will be truncated\n", name);
  25. if (!events) {
  26. events = malloc(sizeof(struct perf_trace_event_type));
  27. if (events == NULL)
  28. return -ENOMEM;
  29. } else {
  30. struct perf_trace_event_type *nevents;
  31. nevents = realloc(events, (event_count + 1) * sizeof(*events));
  32. if (nevents == NULL)
  33. return -ENOMEM;
  34. events = nevents;
  35. }
  36. memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
  37. events[event_count].event_id = id;
  38. strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
  39. event_count++;
  40. return 0;
  41. }
  42. char *perf_header__find_event(u64 id)
  43. {
  44. int i;
  45. for (i = 0 ; i < event_count; i++) {
  46. if (events[i].event_id == id)
  47. return events[i].name;
  48. }
  49. return NULL;
  50. }
  51. static const char *__perf_magic = "PERFFILE";
  52. #define PERF_MAGIC (*(u64 *)__perf_magic)
  53. struct perf_file_attr {
  54. struct perf_event_attr attr;
  55. struct perf_file_section ids;
  56. };
  57. void perf_header__set_feat(struct perf_header *header, int feat)
  58. {
  59. set_bit(feat, header->adds_features);
  60. }
  61. void perf_header__clear_feat(struct perf_header *header, int feat)
  62. {
  63. clear_bit(feat, header->adds_features);
  64. }
  65. bool perf_header__has_feat(const struct perf_header *header, int feat)
  66. {
  67. return test_bit(feat, header->adds_features);
  68. }
  69. static int do_write(int fd, const void *buf, size_t size)
  70. {
  71. while (size) {
  72. int ret = write(fd, buf, size);
  73. if (ret < 0)
  74. return -errno;
  75. size -= ret;
  76. buf += ret;
  77. }
  78. return 0;
  79. }
  80. #define NAME_ALIGN 64
  81. static int write_padded(int fd, const void *bf, size_t count,
  82. size_t count_aligned)
  83. {
  84. static const char zero_buf[NAME_ALIGN];
  85. int err = do_write(fd, bf, count);
  86. if (!err)
  87. err = do_write(fd, zero_buf, count_aligned - count);
  88. return err;
  89. }
  90. #define dsos__for_each_with_build_id(pos, head) \
  91. list_for_each_entry(pos, head, node) \
  92. if (!pos->has_build_id) \
  93. continue; \
  94. else
  95. static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
  96. u16 misc, int fd)
  97. {
  98. struct dso *pos;
  99. dsos__for_each_with_build_id(pos, head) {
  100. int err;
  101. struct build_id_event b;
  102. size_t len;
  103. if (!pos->hit)
  104. continue;
  105. len = pos->long_name_len + 1;
  106. len = ALIGN(len, NAME_ALIGN);
  107. memset(&b, 0, sizeof(b));
  108. memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
  109. b.pid = pid;
  110. b.header.misc = misc;
  111. b.header.size = sizeof(b) + len;
  112. err = do_write(fd, &b, sizeof(b));
  113. if (err < 0)
  114. return err;
  115. err = write_padded(fd, pos->long_name,
  116. pos->long_name_len + 1, len);
  117. if (err < 0)
  118. return err;
  119. }
  120. return 0;
  121. }
  122. static int machine__write_buildid_table(struct machine *machine, int fd)
  123. {
  124. int err;
  125. u16 kmisc = PERF_RECORD_MISC_KERNEL,
  126. umisc = PERF_RECORD_MISC_USER;
  127. if (!machine__is_host(machine)) {
  128. kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
  129. umisc = PERF_RECORD_MISC_GUEST_USER;
  130. }
  131. err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
  132. kmisc, fd);
  133. if (err == 0)
  134. err = __dsos__write_buildid_table(&machine->user_dsos,
  135. machine->pid, umisc, fd);
  136. return err;
  137. }
  138. static int dsos__write_buildid_table(struct perf_header *header, int fd)
  139. {
  140. struct perf_session *session = container_of(header,
  141. struct perf_session, header);
  142. struct rb_node *nd;
  143. int err = machine__write_buildid_table(&session->host_machine, fd);
  144. if (err)
  145. return err;
  146. for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
  147. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  148. err = machine__write_buildid_table(pos, fd);
  149. if (err)
  150. break;
  151. }
  152. return err;
  153. }
  154. int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
  155. const char *name, bool is_kallsyms)
  156. {
  157. const size_t size = PATH_MAX;
  158. char *realname, *filename = zalloc(size),
  159. *linkname = zalloc(size), *targetname;
  160. int len, err = -1;
  161. if (is_kallsyms) {
  162. if (symbol_conf.kptr_restrict) {
  163. pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
  164. return 0;
  165. }
  166. realname = (char *)name;
  167. } else
  168. realname = realpath(name, NULL);
  169. if (realname == NULL || filename == NULL || linkname == NULL)
  170. goto out_free;
  171. len = snprintf(filename, size, "%s%s%s",
  172. debugdir, is_kallsyms ? "/" : "", realname);
  173. if (mkdir_p(filename, 0755))
  174. goto out_free;
  175. snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
  176. if (access(filename, F_OK)) {
  177. if (is_kallsyms) {
  178. if (copyfile("/proc/kallsyms", filename))
  179. goto out_free;
  180. } else if (link(realname, filename) && copyfile(name, filename))
  181. goto out_free;
  182. }
  183. len = snprintf(linkname, size, "%s/.build-id/%.2s",
  184. debugdir, sbuild_id);
  185. if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
  186. goto out_free;
  187. snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
  188. targetname = filename + strlen(debugdir) - 5;
  189. memcpy(targetname, "../..", 5);
  190. if (symlink(targetname, linkname) == 0)
  191. err = 0;
  192. out_free:
  193. if (!is_kallsyms)
  194. free(realname);
  195. free(filename);
  196. free(linkname);
  197. return err;
  198. }
  199. static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
  200. const char *name, const char *debugdir,
  201. bool is_kallsyms)
  202. {
  203. char sbuild_id[BUILD_ID_SIZE * 2 + 1];
  204. build_id__sprintf(build_id, build_id_size, sbuild_id);
  205. return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
  206. }
  207. int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
  208. {
  209. const size_t size = PATH_MAX;
  210. char *filename = zalloc(size),
  211. *linkname = zalloc(size);
  212. int err = -1;
  213. if (filename == NULL || linkname == NULL)
  214. goto out_free;
  215. snprintf(linkname, size, "%s/.build-id/%.2s/%s",
  216. debugdir, sbuild_id, sbuild_id + 2);
  217. if (access(linkname, F_OK))
  218. goto out_free;
  219. if (readlink(linkname, filename, size) < 0)
  220. goto out_free;
  221. if (unlink(linkname))
  222. goto out_free;
  223. /*
  224. * Since the link is relative, we must make it absolute:
  225. */
  226. snprintf(linkname, size, "%s/.build-id/%.2s/%s",
  227. debugdir, sbuild_id, filename);
  228. if (unlink(linkname))
  229. goto out_free;
  230. err = 0;
  231. out_free:
  232. free(filename);
  233. free(linkname);
  234. return err;
  235. }
  236. static int dso__cache_build_id(struct dso *dso, const char *debugdir)
  237. {
  238. bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
  239. return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
  240. dso->long_name, debugdir, is_kallsyms);
  241. }
  242. static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
  243. {
  244. struct dso *pos;
  245. int err = 0;
  246. dsos__for_each_with_build_id(pos, head)
  247. if (dso__cache_build_id(pos, debugdir))
  248. err = -1;
  249. return err;
  250. }
  251. static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
  252. {
  253. int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
  254. ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
  255. return ret;
  256. }
  257. static int perf_session__cache_build_ids(struct perf_session *session)
  258. {
  259. struct rb_node *nd;
  260. int ret;
  261. char debugdir[PATH_MAX];
  262. snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
  263. if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
  264. return -1;
  265. ret = machine__cache_build_ids(&session->host_machine, debugdir);
  266. for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
  267. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  268. ret |= machine__cache_build_ids(pos, debugdir);
  269. }
  270. return ret ? -1 : 0;
  271. }
  272. static bool machine__read_build_ids(struct machine *machine, bool with_hits)
  273. {
  274. bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
  275. ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
  276. return ret;
  277. }
  278. static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
  279. {
  280. struct rb_node *nd;
  281. bool ret = machine__read_build_ids(&session->host_machine, with_hits);
  282. for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
  283. struct machine *pos = rb_entry(nd, struct machine, rb_node);
  284. ret |= machine__read_build_ids(pos, with_hits);
  285. }
  286. return ret;
  287. }
  288. static int perf_header__adds_write(struct perf_header *header,
  289. struct perf_evlist *evlist, int fd)
  290. {
  291. int nr_sections;
  292. struct perf_session *session;
  293. struct perf_file_section *feat_sec;
  294. int sec_size;
  295. u64 sec_start;
  296. int idx = 0, err;
  297. session = container_of(header, struct perf_session, header);
  298. if (perf_header__has_feat(header, HEADER_BUILD_ID &&
  299. !perf_session__read_build_ids(session, true)))
  300. perf_header__clear_feat(header, HEADER_BUILD_ID);
  301. nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
  302. if (!nr_sections)
  303. return 0;
  304. feat_sec = calloc(sizeof(*feat_sec), nr_sections);
  305. if (feat_sec == NULL)
  306. return -ENOMEM;
  307. sec_size = sizeof(*feat_sec) * nr_sections;
  308. sec_start = header->data_offset + header->data_size;
  309. lseek(fd, sec_start + sec_size, SEEK_SET);
  310. if (perf_header__has_feat(header, HEADER_TRACE_INFO)) {
  311. struct perf_file_section *trace_sec;
  312. trace_sec = &feat_sec[idx++];
  313. /* Write trace info */
  314. trace_sec->offset = lseek(fd, 0, SEEK_CUR);
  315. read_tracing_data(fd, &evlist->entries);
  316. trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
  317. }
  318. if (perf_header__has_feat(header, HEADER_BUILD_ID)) {
  319. struct perf_file_section *buildid_sec;
  320. buildid_sec = &feat_sec[idx++];
  321. /* Write build-ids */
  322. buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
  323. err = dsos__write_buildid_table(header, fd);
  324. if (err < 0) {
  325. pr_debug("failed to write buildid table\n");
  326. goto out_free;
  327. }
  328. buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
  329. buildid_sec->offset;
  330. if (!no_buildid_cache)
  331. perf_session__cache_build_ids(session);
  332. }
  333. lseek(fd, sec_start, SEEK_SET);
  334. err = do_write(fd, feat_sec, sec_size);
  335. if (err < 0)
  336. pr_debug("failed to write feature section\n");
  337. out_free:
  338. free(feat_sec);
  339. return err;
  340. }
  341. int perf_header__write_pipe(int fd)
  342. {
  343. struct perf_pipe_file_header f_header;
  344. int err;
  345. f_header = (struct perf_pipe_file_header){
  346. .magic = PERF_MAGIC,
  347. .size = sizeof(f_header),
  348. };
  349. err = do_write(fd, &f_header, sizeof(f_header));
  350. if (err < 0) {
  351. pr_debug("failed to write perf pipe header\n");
  352. return err;
  353. }
  354. return 0;
  355. }
  356. int perf_session__write_header(struct perf_session *session,
  357. struct perf_evlist *evlist,
  358. int fd, bool at_exit)
  359. {
  360. struct perf_file_header f_header;
  361. struct perf_file_attr f_attr;
  362. struct perf_header *header = &session->header;
  363. struct perf_evsel *attr, *pair = NULL;
  364. int err;
  365. lseek(fd, sizeof(f_header), SEEK_SET);
  366. if (session->evlist != evlist)
  367. pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
  368. list_for_each_entry(attr, &evlist->entries, node) {
  369. attr->id_offset = lseek(fd, 0, SEEK_CUR);
  370. err = do_write(fd, attr->id, attr->ids * sizeof(u64));
  371. if (err < 0) {
  372. out_err_write:
  373. pr_debug("failed to write perf header\n");
  374. return err;
  375. }
  376. if (session->evlist != evlist) {
  377. err = do_write(fd, pair->id, pair->ids * sizeof(u64));
  378. if (err < 0)
  379. goto out_err_write;
  380. attr->ids += pair->ids;
  381. pair = list_entry(pair->node.next, struct perf_evsel, node);
  382. }
  383. }
  384. header->attr_offset = lseek(fd, 0, SEEK_CUR);
  385. list_for_each_entry(attr, &evlist->entries, node) {
  386. f_attr = (struct perf_file_attr){
  387. .attr = attr->attr,
  388. .ids = {
  389. .offset = attr->id_offset,
  390. .size = attr->ids * sizeof(u64),
  391. }
  392. };
  393. err = do_write(fd, &f_attr, sizeof(f_attr));
  394. if (err < 0) {
  395. pr_debug("failed to write perf header attribute\n");
  396. return err;
  397. }
  398. }
  399. header->event_offset = lseek(fd, 0, SEEK_CUR);
  400. header->event_size = event_count * sizeof(struct perf_trace_event_type);
  401. if (events) {
  402. err = do_write(fd, events, header->event_size);
  403. if (err < 0) {
  404. pr_debug("failed to write perf header events\n");
  405. return err;
  406. }
  407. }
  408. header->data_offset = lseek(fd, 0, SEEK_CUR);
  409. if (at_exit) {
  410. err = perf_header__adds_write(header, evlist, fd);
  411. if (err < 0)
  412. return err;
  413. }
  414. f_header = (struct perf_file_header){
  415. .magic = PERF_MAGIC,
  416. .size = sizeof(f_header),
  417. .attr_size = sizeof(f_attr),
  418. .attrs = {
  419. .offset = header->attr_offset,
  420. .size = evlist->nr_entries * sizeof(f_attr),
  421. },
  422. .data = {
  423. .offset = header->data_offset,
  424. .size = header->data_size,
  425. },
  426. .event_types = {
  427. .offset = header->event_offset,
  428. .size = header->event_size,
  429. },
  430. };
  431. memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
  432. lseek(fd, 0, SEEK_SET);
  433. err = do_write(fd, &f_header, sizeof(f_header));
  434. if (err < 0) {
  435. pr_debug("failed to write perf header\n");
  436. return err;
  437. }
  438. lseek(fd, header->data_offset + header->data_size, SEEK_SET);
  439. header->frozen = 1;
  440. return 0;
  441. }
  442. static int perf_header__getbuffer64(struct perf_header *header,
  443. int fd, void *buf, size_t size)
  444. {
  445. if (readn(fd, buf, size) <= 0)
  446. return -1;
  447. if (header->needs_swap)
  448. mem_bswap_64(buf, size);
  449. return 0;
  450. }
  451. int perf_header__process_sections(struct perf_header *header, int fd,
  452. int (*process)(struct perf_file_section *section,
  453. struct perf_header *ph,
  454. int feat, int fd))
  455. {
  456. struct perf_file_section *feat_sec;
  457. int nr_sections;
  458. int sec_size;
  459. int idx = 0;
  460. int err = -1, feat = 1;
  461. nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
  462. if (!nr_sections)
  463. return 0;
  464. feat_sec = calloc(sizeof(*feat_sec), nr_sections);
  465. if (!feat_sec)
  466. return -1;
  467. sec_size = sizeof(*feat_sec) * nr_sections;
  468. lseek(fd, header->data_offset + header->data_size, SEEK_SET);
  469. if (perf_header__getbuffer64(header, fd, feat_sec, sec_size))
  470. goto out_free;
  471. err = 0;
  472. while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
  473. if (perf_header__has_feat(header, feat)) {
  474. struct perf_file_section *sec = &feat_sec[idx++];
  475. err = process(sec, header, feat, fd);
  476. if (err < 0)
  477. break;
  478. }
  479. ++feat;
  480. }
  481. out_free:
  482. free(feat_sec);
  483. return err;
  484. }
  485. int perf_file_header__read(struct perf_file_header *header,
  486. struct perf_header *ph, int fd)
  487. {
  488. lseek(fd, 0, SEEK_SET);
  489. if (readn(fd, header, sizeof(*header)) <= 0 ||
  490. memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
  491. return -1;
  492. if (header->attr_size != sizeof(struct perf_file_attr)) {
  493. u64 attr_size = bswap_64(header->attr_size);
  494. if (attr_size != sizeof(struct perf_file_attr))
  495. return -1;
  496. mem_bswap_64(header, offsetof(struct perf_file_header,
  497. adds_features));
  498. ph->needs_swap = true;
  499. }
  500. if (header->size != sizeof(*header)) {
  501. /* Support the previous format */
  502. if (header->size == offsetof(typeof(*header), adds_features))
  503. bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
  504. else
  505. return -1;
  506. }
  507. memcpy(&ph->adds_features, &header->adds_features,
  508. sizeof(ph->adds_features));
  509. /*
  510. * FIXME: hack that assumes that if we need swap the perf.data file
  511. * may be coming from an arch with a different word-size, ergo different
  512. * DEFINE_BITMAP format, investigate more later, but for now its mostly
  513. * safe to assume that we have a build-id section. Trace files probably
  514. * have several other issues in this realm anyway...
  515. */
  516. if (ph->needs_swap) {
  517. memset(&ph->adds_features, 0, sizeof(ph->adds_features));
  518. perf_header__set_feat(ph, HEADER_BUILD_ID);
  519. }
  520. ph->event_offset = header->event_types.offset;
  521. ph->event_size = header->event_types.size;
  522. ph->data_offset = header->data.offset;
  523. ph->data_size = header->data.size;
  524. return 0;
  525. }
  526. static int __event_process_build_id(struct build_id_event *bev,
  527. char *filename,
  528. struct perf_session *session)
  529. {
  530. int err = -1;
  531. struct list_head *head;
  532. struct machine *machine;
  533. u16 misc;
  534. struct dso *dso;
  535. enum dso_kernel_type dso_type;
  536. machine = perf_session__findnew_machine(session, bev->pid);
  537. if (!machine)
  538. goto out;
  539. misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  540. switch (misc) {
  541. case PERF_RECORD_MISC_KERNEL:
  542. dso_type = DSO_TYPE_KERNEL;
  543. head = &machine->kernel_dsos;
  544. break;
  545. case PERF_RECORD_MISC_GUEST_KERNEL:
  546. dso_type = DSO_TYPE_GUEST_KERNEL;
  547. head = &machine->kernel_dsos;
  548. break;
  549. case PERF_RECORD_MISC_USER:
  550. case PERF_RECORD_MISC_GUEST_USER:
  551. dso_type = DSO_TYPE_USER;
  552. head = &machine->user_dsos;
  553. break;
  554. default:
  555. goto out;
  556. }
  557. dso = __dsos__findnew(head, filename);
  558. if (dso != NULL) {
  559. char sbuild_id[BUILD_ID_SIZE * 2 + 1];
  560. dso__set_build_id(dso, &bev->build_id);
  561. if (filename[0] == '[')
  562. dso->kernel = dso_type;
  563. build_id__sprintf(dso->build_id, sizeof(dso->build_id),
  564. sbuild_id);
  565. pr_debug("build id event received for %s: %s\n",
  566. dso->long_name, sbuild_id);
  567. }
  568. err = 0;
  569. out:
  570. return err;
  571. }
  572. static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
  573. int input, u64 offset, u64 size)
  574. {
  575. struct perf_session *session = container_of(header, struct perf_session, header);
  576. struct {
  577. struct perf_event_header header;
  578. u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
  579. char filename[0];
  580. } old_bev;
  581. struct build_id_event bev;
  582. char filename[PATH_MAX];
  583. u64 limit = offset + size;
  584. while (offset < limit) {
  585. ssize_t len;
  586. if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
  587. return -1;
  588. if (header->needs_swap)
  589. perf_event_header__bswap(&old_bev.header);
  590. len = old_bev.header.size - sizeof(old_bev);
  591. if (read(input, filename, len) != len)
  592. return -1;
  593. bev.header = old_bev.header;
  594. /*
  595. * As the pid is the missing value, we need to fill
  596. * it properly. The header.misc value give us nice hint.
  597. */
  598. bev.pid = HOST_KERNEL_ID;
  599. if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
  600. bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
  601. bev.pid = DEFAULT_GUEST_KERNEL_ID;
  602. memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
  603. __event_process_build_id(&bev, filename, session);
  604. offset += bev.header.size;
  605. }
  606. return 0;
  607. }
  608. static int perf_header__read_build_ids(struct perf_header *header,
  609. int input, u64 offset, u64 size)
  610. {
  611. struct perf_session *session = container_of(header, struct perf_session, header);
  612. struct build_id_event bev;
  613. char filename[PATH_MAX];
  614. u64 limit = offset + size, orig_offset = offset;
  615. int err = -1;
  616. while (offset < limit) {
  617. ssize_t len;
  618. if (read(input, &bev, sizeof(bev)) != sizeof(bev))
  619. goto out;
  620. if (header->needs_swap)
  621. perf_event_header__bswap(&bev.header);
  622. len = bev.header.size - sizeof(bev);
  623. if (read(input, filename, len) != len)
  624. goto out;
  625. /*
  626. * The a1645ce1 changeset:
  627. *
  628. * "perf: 'perf kvm' tool for monitoring guest performance from host"
  629. *
  630. * Added a field to struct build_id_event that broke the file
  631. * format.
  632. *
  633. * Since the kernel build-id is the first entry, process the
  634. * table using the old format if the well known
  635. * '[kernel.kallsyms]' string for the kernel build-id has the
  636. * first 4 characters chopped off (where the pid_t sits).
  637. */
  638. if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
  639. if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
  640. return -1;
  641. return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
  642. }
  643. __event_process_build_id(&bev, filename, session);
  644. offset += bev.header.size;
  645. }
  646. err = 0;
  647. out:
  648. return err;
  649. }
  650. static int perf_file_section__process(struct perf_file_section *section,
  651. struct perf_header *ph,
  652. int feat, int fd)
  653. {
  654. if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
  655. pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
  656. "%d, continuing...\n", section->offset, feat);
  657. return 0;
  658. }
  659. switch (feat) {
  660. case HEADER_TRACE_INFO:
  661. trace_report(fd, false);
  662. break;
  663. case HEADER_BUILD_ID:
  664. if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
  665. pr_debug("Failed to read buildids, continuing...\n");
  666. break;
  667. default:
  668. pr_debug("unknown feature %d, continuing...\n", feat);
  669. }
  670. return 0;
  671. }
  672. static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
  673. struct perf_header *ph, int fd,
  674. bool repipe)
  675. {
  676. if (readn(fd, header, sizeof(*header)) <= 0 ||
  677. memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
  678. return -1;
  679. if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
  680. return -1;
  681. if (header->size != sizeof(*header)) {
  682. u64 size = bswap_64(header->size);
  683. if (size != sizeof(*header))
  684. return -1;
  685. ph->needs_swap = true;
  686. }
  687. return 0;
  688. }
  689. static int perf_header__read_pipe(struct perf_session *session, int fd)
  690. {
  691. struct perf_header *header = &session->header;
  692. struct perf_pipe_file_header f_header;
  693. if (perf_file_header__read_pipe(&f_header, header, fd,
  694. session->repipe) < 0) {
  695. pr_debug("incompatible file format\n");
  696. return -EINVAL;
  697. }
  698. session->fd = fd;
  699. return 0;
  700. }
  701. int perf_session__read_header(struct perf_session *session, int fd)
  702. {
  703. struct perf_header *header = &session->header;
  704. struct perf_file_header f_header;
  705. struct perf_file_attr f_attr;
  706. u64 f_id;
  707. int nr_attrs, nr_ids, i, j;
  708. session->evlist = perf_evlist__new(NULL, NULL);
  709. if (session->evlist == NULL)
  710. return -ENOMEM;
  711. if (session->fd_pipe)
  712. return perf_header__read_pipe(session, fd);
  713. if (perf_file_header__read(&f_header, header, fd) < 0) {
  714. pr_debug("incompatible file format\n");
  715. return -EINVAL;
  716. }
  717. nr_attrs = f_header.attrs.size / sizeof(f_attr);
  718. lseek(fd, f_header.attrs.offset, SEEK_SET);
  719. for (i = 0; i < nr_attrs; i++) {
  720. struct perf_evsel *evsel;
  721. off_t tmp;
  722. if (readn(fd, &f_attr, sizeof(f_attr)) <= 0)
  723. goto out_errno;
  724. if (header->needs_swap)
  725. perf_event__attr_swap(&f_attr.attr);
  726. tmp = lseek(fd, 0, SEEK_CUR);
  727. evsel = perf_evsel__new(&f_attr.attr, i);
  728. if (evsel == NULL)
  729. goto out_delete_evlist;
  730. /*
  731. * Do it before so that if perf_evsel__alloc_id fails, this
  732. * entry gets purged too at perf_evlist__delete().
  733. */
  734. perf_evlist__add(session->evlist, evsel);
  735. nr_ids = f_attr.ids.size / sizeof(u64);
  736. /*
  737. * We don't have the cpu and thread maps on the header, so
  738. * for allocating the perf_sample_id table we fake 1 cpu and
  739. * hattr->ids threads.
  740. */
  741. if (perf_evsel__alloc_id(evsel, 1, nr_ids))
  742. goto out_delete_evlist;
  743. lseek(fd, f_attr.ids.offset, SEEK_SET);
  744. for (j = 0; j < nr_ids; j++) {
  745. if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
  746. goto out_errno;
  747. perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
  748. }
  749. lseek(fd, tmp, SEEK_SET);
  750. }
  751. if (f_header.event_types.size) {
  752. lseek(fd, f_header.event_types.offset, SEEK_SET);
  753. events = malloc(f_header.event_types.size);
  754. if (events == NULL)
  755. return -ENOMEM;
  756. if (perf_header__getbuffer64(header, fd, events,
  757. f_header.event_types.size))
  758. goto out_errno;
  759. event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
  760. }
  761. perf_header__process_sections(header, fd, perf_file_section__process);
  762. lseek(fd, header->data_offset, SEEK_SET);
  763. header->frozen = 1;
  764. return 0;
  765. out_errno:
  766. return -errno;
  767. out_delete_evlist:
  768. perf_evlist__delete(session->evlist);
  769. session->evlist = NULL;
  770. return -ENOMEM;
  771. }
  772. int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
  773. perf_event__handler_t process,
  774. struct perf_session *session)
  775. {
  776. union perf_event *ev;
  777. size_t size;
  778. int err;
  779. size = sizeof(struct perf_event_attr);
  780. size = ALIGN(size, sizeof(u64));
  781. size += sizeof(struct perf_event_header);
  782. size += ids * sizeof(u64);
  783. ev = malloc(size);
  784. if (ev == NULL)
  785. return -ENOMEM;
  786. ev->attr.attr = *attr;
  787. memcpy(ev->attr.id, id, ids * sizeof(u64));
  788. ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
  789. ev->attr.header.size = size;
  790. err = process(ev, NULL, session);
  791. free(ev);
  792. return err;
  793. }
  794. int perf_session__synthesize_attrs(struct perf_session *session,
  795. perf_event__handler_t process)
  796. {
  797. struct perf_evsel *attr;
  798. int err = 0;
  799. list_for_each_entry(attr, &session->evlist->entries, node) {
  800. err = perf_event__synthesize_attr(&attr->attr, attr->ids,
  801. attr->id, process, session);
  802. if (err) {
  803. pr_debug("failed to create perf header attribute\n");
  804. return err;
  805. }
  806. }
  807. return err;
  808. }
  809. int perf_event__process_attr(union perf_event *event,
  810. struct perf_session *session)
  811. {
  812. unsigned int i, ids, n_ids;
  813. struct perf_evsel *evsel;
  814. if (session->evlist == NULL) {
  815. session->evlist = perf_evlist__new(NULL, NULL);
  816. if (session->evlist == NULL)
  817. return -ENOMEM;
  818. }
  819. evsel = perf_evsel__new(&event->attr.attr,
  820. session->evlist->nr_entries);
  821. if (evsel == NULL)
  822. return -ENOMEM;
  823. perf_evlist__add(session->evlist, evsel);
  824. ids = event->header.size;
  825. ids -= (void *)&event->attr.id - (void *)event;
  826. n_ids = ids / sizeof(u64);
  827. /*
  828. * We don't have the cpu and thread maps on the header, so
  829. * for allocating the perf_sample_id table we fake 1 cpu and
  830. * hattr->ids threads.
  831. */
  832. if (perf_evsel__alloc_id(evsel, 1, n_ids))
  833. return -ENOMEM;
  834. for (i = 0; i < n_ids; i++) {
  835. perf_evlist__id_add(session->evlist, evsel, 0, i,
  836. event->attr.id[i]);
  837. }
  838. perf_session__update_sample_type(session);
  839. return 0;
  840. }
  841. int perf_event__synthesize_event_type(u64 event_id, char *name,
  842. perf_event__handler_t process,
  843. struct perf_session *session)
  844. {
  845. union perf_event ev;
  846. size_t size = 0;
  847. int err = 0;
  848. memset(&ev, 0, sizeof(ev));
  849. ev.event_type.event_type.event_id = event_id;
  850. memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
  851. strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
  852. ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
  853. size = strlen(name);
  854. size = ALIGN(size, sizeof(u64));
  855. ev.event_type.header.size = sizeof(ev.event_type) -
  856. (sizeof(ev.event_type.event_type.name) - size);
  857. err = process(&ev, NULL, session);
  858. return err;
  859. }
  860. int perf_event__synthesize_event_types(perf_event__handler_t process,
  861. struct perf_session *session)
  862. {
  863. struct perf_trace_event_type *type;
  864. int i, err = 0;
  865. for (i = 0; i < event_count; i++) {
  866. type = &events[i];
  867. err = perf_event__synthesize_event_type(type->event_id,
  868. type->name, process,
  869. session);
  870. if (err) {
  871. pr_debug("failed to create perf header event type\n");
  872. return err;
  873. }
  874. }
  875. return err;
  876. }
  877. int perf_event__process_event_type(union perf_event *event,
  878. struct perf_session *session __unused)
  879. {
  880. if (perf_header__push_event(event->event_type.event_type.event_id,
  881. event->event_type.event_type.name) < 0)
  882. return -ENOMEM;
  883. return 0;
  884. }
  885. int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
  886. perf_event__handler_t process,
  887. struct perf_session *session __unused)
  888. {
  889. union perf_event ev;
  890. ssize_t size = 0, aligned_size = 0, padding;
  891. int err __used = 0;
  892. memset(&ev, 0, sizeof(ev));
  893. ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
  894. size = read_tracing_data_size(fd, &evlist->entries);
  895. if (size <= 0)
  896. return size;
  897. aligned_size = ALIGN(size, sizeof(u64));
  898. padding = aligned_size - size;
  899. ev.tracing_data.header.size = sizeof(ev.tracing_data);
  900. ev.tracing_data.size = aligned_size;
  901. process(&ev, NULL, session);
  902. err = read_tracing_data(fd, &evlist->entries);
  903. write_padded(fd, NULL, 0, padding);
  904. return aligned_size;
  905. }
  906. int perf_event__process_tracing_data(union perf_event *event,
  907. struct perf_session *session)
  908. {
  909. ssize_t size_read, padding, size = event->tracing_data.size;
  910. off_t offset = lseek(session->fd, 0, SEEK_CUR);
  911. char buf[BUFSIZ];
  912. /* setup for reading amidst mmap */
  913. lseek(session->fd, offset + sizeof(struct tracing_data_event),
  914. SEEK_SET);
  915. size_read = trace_report(session->fd, session->repipe);
  916. padding = ALIGN(size_read, sizeof(u64)) - size_read;
  917. if (read(session->fd, buf, padding) < 0)
  918. die("reading input file");
  919. if (session->repipe) {
  920. int retw = write(STDOUT_FILENO, buf, padding);
  921. if (retw <= 0 || retw != padding)
  922. die("repiping tracing data padding");
  923. }
  924. if (size_read + padding != size)
  925. die("tracing data size mismatch");
  926. return size_read + padding;
  927. }
  928. int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
  929. perf_event__handler_t process,
  930. struct machine *machine,
  931. struct perf_session *session)
  932. {
  933. union perf_event ev;
  934. size_t len;
  935. int err = 0;
  936. if (!pos->hit)
  937. return err;
  938. memset(&ev, 0, sizeof(ev));
  939. len = pos->long_name_len + 1;
  940. len = ALIGN(len, NAME_ALIGN);
  941. memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
  942. ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
  943. ev.build_id.header.misc = misc;
  944. ev.build_id.pid = machine->pid;
  945. ev.build_id.header.size = sizeof(ev.build_id) + len;
  946. memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
  947. err = process(&ev, NULL, session);
  948. return err;
  949. }
  950. int perf_event__process_build_id(union perf_event *event,
  951. struct perf_session *session)
  952. {
  953. __event_process_build_id(&event->build_id,
  954. event->build_id.filename,
  955. session);
  956. return 0;
  957. }
  958. void disable_buildid_cache(void)
  959. {
  960. no_buildid_cache = true;
  961. }