PageRenderTime 70ms CodeModel.GetById 23ms RepoModel.GetById 1ms app.codeStats 0ms

/tools/perf/util/sort.c

http://github.com/torvalds/linux
C | 3193 lines | 2462 code | 627 blank | 104 comment | 474 complexity | e2cb9929f5409fe7a2af22113602ebb8 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <errno.h>
  3. #include <inttypes.h>
  4. #include <regex.h>
  5. #include <stdlib.h>
  6. #include <linux/mman.h>
  7. #include <linux/time64.h>
  8. #include "debug.h"
  9. #include "dso.h"
  10. #include "sort.h"
  11. #include "hist.h"
  12. #include "cacheline.h"
  13. #include "comm.h"
  14. #include "map.h"
  15. #include "maps.h"
  16. #include "symbol.h"
  17. #include "map_symbol.h"
  18. #include "branch.h"
  19. #include "thread.h"
  20. #include "evsel.h"
  21. #include "evlist.h"
  22. #include "srcline.h"
  23. #include "strlist.h"
  24. #include "strbuf.h"
  25. #include <traceevent/event-parse.h>
  26. #include "mem-events.h"
  27. #include "annotate.h"
  28. #include "time-utils.h"
  29. #include "cgroup.h"
  30. #include "machine.h"
  31. #include <linux/kernel.h>
  32. #include <linux/string.h>
  33. regex_t parent_regex;
  34. const char default_parent_pattern[] = "^sys_|^do_page_fault";
  35. const char *parent_pattern = default_parent_pattern;
  36. const char *default_sort_order = "comm,dso,symbol";
  37. const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
  38. const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
  39. const char default_top_sort_order[] = "dso,symbol";
  40. const char default_diff_sort_order[] = "dso,symbol";
  41. const char default_tracepoint_sort_order[] = "trace";
  42. const char *sort_order;
  43. const char *field_order;
  44. regex_t ignore_callees_regex;
  45. int have_ignore_callees = 0;
  46. enum sort_mode sort__mode = SORT_MODE__NORMAL;
  47. /*
  48. * Replaces all occurrences of a char used with the:
  49. *
  50. * -t, --field-separator
  51. *
  52. * option, that uses a special separator character and don't pad with spaces,
  53. * replacing all occurrences of this separator in symbol names (and other
  54. * output) with a '.' character, that thus it's the only non valid separator.
  55. */
  56. static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
  57. {
  58. int n;
  59. va_list ap;
  60. va_start(ap, fmt);
  61. n = vsnprintf(bf, size, fmt, ap);
  62. if (symbol_conf.field_sep && n > 0) {
  63. char *sep = bf;
  64. while (1) {
  65. sep = strchr(sep, *symbol_conf.field_sep);
  66. if (sep == NULL)
  67. break;
  68. *sep = '.';
  69. }
  70. }
  71. va_end(ap);
  72. if (n >= (int)size)
  73. return size - 1;
  74. return n;
  75. }
  76. static int64_t cmp_null(const void *l, const void *r)
  77. {
  78. if (!l && !r)
  79. return 0;
  80. else if (!l)
  81. return -1;
  82. else
  83. return 1;
  84. }
  85. /* --sort pid */
  86. static int64_t
  87. sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
  88. {
  89. return right->thread->tid - left->thread->tid;
  90. }
  91. static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
  92. size_t size, unsigned int width)
  93. {
  94. const char *comm = thread__comm_str(he->thread);
  95. width = max(7U, width) - 8;
  96. return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
  97. width, width, comm ?: "");
  98. }
  99. static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
  100. {
  101. const struct thread *th = arg;
  102. if (type != HIST_FILTER__THREAD)
  103. return -1;
  104. return th && he->thread != th;
  105. }
  106. struct sort_entry sort_thread = {
  107. .se_header = " Pid:Command",
  108. .se_cmp = sort__thread_cmp,
  109. .se_snprintf = hist_entry__thread_snprintf,
  110. .se_filter = hist_entry__thread_filter,
  111. .se_width_idx = HISTC_THREAD,
  112. };
  113. /* --sort comm */
  114. /*
  115. * We can't use pointer comparison in functions below,
  116. * because it gives different results based on pointer
  117. * values, which could break some sorting assumptions.
  118. */
  119. static int64_t
  120. sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
  121. {
  122. return strcmp(comm__str(right->comm), comm__str(left->comm));
  123. }
  124. static int64_t
  125. sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
  126. {
  127. return strcmp(comm__str(right->comm), comm__str(left->comm));
  128. }
  129. static int64_t
  130. sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
  131. {
  132. return strcmp(comm__str(right->comm), comm__str(left->comm));
  133. }
  134. static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
  135. size_t size, unsigned int width)
  136. {
  137. return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
  138. }
  139. struct sort_entry sort_comm = {
  140. .se_header = "Command",
  141. .se_cmp = sort__comm_cmp,
  142. .se_collapse = sort__comm_collapse,
  143. .se_sort = sort__comm_sort,
  144. .se_snprintf = hist_entry__comm_snprintf,
  145. .se_filter = hist_entry__thread_filter,
  146. .se_width_idx = HISTC_COMM,
  147. };
  148. /* --sort dso */
  149. static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
  150. {
  151. struct dso *dso_l = map_l ? map_l->dso : NULL;
  152. struct dso *dso_r = map_r ? map_r->dso : NULL;
  153. const char *dso_name_l, *dso_name_r;
  154. if (!dso_l || !dso_r)
  155. return cmp_null(dso_r, dso_l);
  156. if (verbose > 0) {
  157. dso_name_l = dso_l->long_name;
  158. dso_name_r = dso_r->long_name;
  159. } else {
  160. dso_name_l = dso_l->short_name;
  161. dso_name_r = dso_r->short_name;
  162. }
  163. return strcmp(dso_name_l, dso_name_r);
  164. }
  165. static int64_t
  166. sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
  167. {
  168. return _sort__dso_cmp(right->ms.map, left->ms.map);
  169. }
  170. static int _hist_entry__dso_snprintf(struct map *map, char *bf,
  171. size_t size, unsigned int width)
  172. {
  173. if (map && map->dso) {
  174. const char *dso_name = verbose > 0 ? map->dso->long_name :
  175. map->dso->short_name;
  176. return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
  177. }
  178. return repsep_snprintf(bf, size, "%-*.*s", width, width, "[unknown]");
  179. }
  180. static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
  181. size_t size, unsigned int width)
  182. {
  183. return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
  184. }
  185. static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
  186. {
  187. const struct dso *dso = arg;
  188. if (type != HIST_FILTER__DSO)
  189. return -1;
  190. return dso && (!he->ms.map || he->ms.map->dso != dso);
  191. }
  192. struct sort_entry sort_dso = {
  193. .se_header = "Shared Object",
  194. .se_cmp = sort__dso_cmp,
  195. .se_snprintf = hist_entry__dso_snprintf,
  196. .se_filter = hist_entry__dso_filter,
  197. .se_width_idx = HISTC_DSO,
  198. };
  199. /* --sort symbol */
  200. static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
  201. {
  202. return (int64_t)(right_ip - left_ip);
  203. }
  204. static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
  205. {
  206. if (!sym_l || !sym_r)
  207. return cmp_null(sym_l, sym_r);
  208. if (sym_l == sym_r)
  209. return 0;
  210. if (sym_l->inlined || sym_r->inlined) {
  211. int ret = strcmp(sym_l->name, sym_r->name);
  212. if (ret)
  213. return ret;
  214. if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
  215. return 0;
  216. }
  217. if (sym_l->start != sym_r->start)
  218. return (int64_t)(sym_r->start - sym_l->start);
  219. return (int64_t)(sym_r->end - sym_l->end);
  220. }
  221. static int64_t
  222. sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
  223. {
  224. int64_t ret;
  225. if (!left->ms.sym && !right->ms.sym)
  226. return _sort__addr_cmp(left->ip, right->ip);
  227. /*
  228. * comparing symbol address alone is not enough since it's a
  229. * relative address within a dso.
  230. */
  231. if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
  232. ret = sort__dso_cmp(left, right);
  233. if (ret != 0)
  234. return ret;
  235. }
  236. return _sort__sym_cmp(left->ms.sym, right->ms.sym);
  237. }
  238. static int64_t
  239. sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
  240. {
  241. if (!left->ms.sym || !right->ms.sym)
  242. return cmp_null(left->ms.sym, right->ms.sym);
  243. return strcmp(right->ms.sym->name, left->ms.sym->name);
  244. }
  245. static int _hist_entry__sym_snprintf(struct map_symbol *ms,
  246. u64 ip, char level, char *bf, size_t size,
  247. unsigned int width)
  248. {
  249. struct symbol *sym = ms->sym;
  250. struct map *map = ms->map;
  251. size_t ret = 0;
  252. if (verbose > 0) {
  253. char o = map ? dso__symtab_origin(map->dso) : '!';
  254. ret += repsep_snprintf(bf, size, "%-#*llx %c ",
  255. BITS_PER_LONG / 4 + 2, ip, o);
  256. }
  257. ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
  258. if (sym && map) {
  259. if (sym->type == STT_OBJECT) {
  260. ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
  261. ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
  262. ip - map->unmap_ip(map, sym->start));
  263. } else {
  264. ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
  265. width - ret,
  266. sym->name);
  267. if (sym->inlined)
  268. ret += repsep_snprintf(bf + ret, size - ret,
  269. " (inlined)");
  270. }
  271. } else {
  272. size_t len = BITS_PER_LONG / 4;
  273. ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
  274. len, ip);
  275. }
  276. return ret;
  277. }
  278. int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
  279. {
  280. return _hist_entry__sym_snprintf(&he->ms, he->ip,
  281. he->level, bf, size, width);
  282. }
  283. static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
  284. {
  285. const char *sym = arg;
  286. if (type != HIST_FILTER__SYMBOL)
  287. return -1;
  288. return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
  289. }
  290. struct sort_entry sort_sym = {
  291. .se_header = "Symbol",
  292. .se_cmp = sort__sym_cmp,
  293. .se_sort = sort__sym_sort,
  294. .se_snprintf = hist_entry__sym_snprintf,
  295. .se_filter = hist_entry__sym_filter,
  296. .se_width_idx = HISTC_SYMBOL,
  297. };
  298. /* --sort srcline */
  299. char *hist_entry__srcline(struct hist_entry *he)
  300. {
  301. return map__srcline(he->ms.map, he->ip, he->ms.sym);
  302. }
  303. static int64_t
  304. sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
  305. {
  306. if (!left->srcline)
  307. left->srcline = hist_entry__srcline(left);
  308. if (!right->srcline)
  309. right->srcline = hist_entry__srcline(right);
  310. return strcmp(right->srcline, left->srcline);
  311. }
  312. static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
  313. size_t size, unsigned int width)
  314. {
  315. if (!he->srcline)
  316. he->srcline = hist_entry__srcline(he);
  317. return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
  318. }
  319. struct sort_entry sort_srcline = {
  320. .se_header = "Source:Line",
  321. .se_cmp = sort__srcline_cmp,
  322. .se_snprintf = hist_entry__srcline_snprintf,
  323. .se_width_idx = HISTC_SRCLINE,
  324. };
  325. /* --sort srcline_from */
  326. static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
  327. {
  328. return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
  329. }
  330. static int64_t
  331. sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
  332. {
  333. if (!left->branch_info->srcline_from)
  334. left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
  335. if (!right->branch_info->srcline_from)
  336. right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
  337. return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
  338. }
  339. static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
  340. size_t size, unsigned int width)
  341. {
  342. return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
  343. }
  344. struct sort_entry sort_srcline_from = {
  345. .se_header = "From Source:Line",
  346. .se_cmp = sort__srcline_from_cmp,
  347. .se_snprintf = hist_entry__srcline_from_snprintf,
  348. .se_width_idx = HISTC_SRCLINE_FROM,
  349. };
  350. /* --sort srcline_to */
  351. static int64_t
  352. sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
  353. {
  354. if (!left->branch_info->srcline_to)
  355. left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
  356. if (!right->branch_info->srcline_to)
  357. right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
  358. return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
  359. }
  360. static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
  361. size_t size, unsigned int width)
  362. {
  363. return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
  364. }
  365. struct sort_entry sort_srcline_to = {
  366. .se_header = "To Source:Line",
  367. .se_cmp = sort__srcline_to_cmp,
  368. .se_snprintf = hist_entry__srcline_to_snprintf,
  369. .se_width_idx = HISTC_SRCLINE_TO,
  370. };
  371. static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
  372. size_t size, unsigned int width)
  373. {
  374. struct symbol *sym = he->ms.sym;
  375. struct annotation *notes;
  376. double ipc = 0.0, coverage = 0.0;
  377. char tmp[64];
  378. if (!sym)
  379. return repsep_snprintf(bf, size, "%-*s", width, "-");
  380. notes = symbol__annotation(sym);
  381. if (notes->hit_cycles)
  382. ipc = notes->hit_insn / ((double)notes->hit_cycles);
  383. if (notes->total_insn) {
  384. coverage = notes->cover_insn * 100.0 /
  385. ((double)notes->total_insn);
  386. }
  387. snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
  388. return repsep_snprintf(bf, size, "%-*s", width, tmp);
  389. }
  390. struct sort_entry sort_sym_ipc = {
  391. .se_header = "IPC [IPC Coverage]",
  392. .se_cmp = sort__sym_cmp,
  393. .se_snprintf = hist_entry__sym_ipc_snprintf,
  394. .se_width_idx = HISTC_SYMBOL_IPC,
  395. };
  396. static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
  397. __maybe_unused,
  398. char *bf, size_t size,
  399. unsigned int width)
  400. {
  401. char tmp[64];
  402. snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
  403. return repsep_snprintf(bf, size, "%-*s", width, tmp);
  404. }
  405. struct sort_entry sort_sym_ipc_null = {
  406. .se_header = "IPC [IPC Coverage]",
  407. .se_cmp = sort__sym_cmp,
  408. .se_snprintf = hist_entry__sym_ipc_null_snprintf,
  409. .se_width_idx = HISTC_SYMBOL_IPC,
  410. };
  411. /* --sort srcfile */
  412. static char no_srcfile[1];
  413. static char *hist_entry__get_srcfile(struct hist_entry *e)
  414. {
  415. char *sf, *p;
  416. struct map *map = e->ms.map;
  417. if (!map)
  418. return no_srcfile;
  419. sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
  420. e->ms.sym, false, true, true, e->ip);
  421. if (!strcmp(sf, SRCLINE_UNKNOWN))
  422. return no_srcfile;
  423. p = strchr(sf, ':');
  424. if (p && *sf) {
  425. *p = 0;
  426. return sf;
  427. }
  428. free(sf);
  429. return no_srcfile;
  430. }
  431. static int64_t
  432. sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
  433. {
  434. if (!left->srcfile)
  435. left->srcfile = hist_entry__get_srcfile(left);
  436. if (!right->srcfile)
  437. right->srcfile = hist_entry__get_srcfile(right);
  438. return strcmp(right->srcfile, left->srcfile);
  439. }
  440. static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
  441. size_t size, unsigned int width)
  442. {
  443. if (!he->srcfile)
  444. he->srcfile = hist_entry__get_srcfile(he);
  445. return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
  446. }
  447. struct sort_entry sort_srcfile = {
  448. .se_header = "Source File",
  449. .se_cmp = sort__srcfile_cmp,
  450. .se_snprintf = hist_entry__srcfile_snprintf,
  451. .se_width_idx = HISTC_SRCFILE,
  452. };
  453. /* --sort parent */
  454. static int64_t
  455. sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
  456. {
  457. struct symbol *sym_l = left->parent;
  458. struct symbol *sym_r = right->parent;
  459. if (!sym_l || !sym_r)
  460. return cmp_null(sym_l, sym_r);
  461. return strcmp(sym_r->name, sym_l->name);
  462. }
  463. static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
  464. size_t size, unsigned int width)
  465. {
  466. return repsep_snprintf(bf, size, "%-*.*s", width, width,
  467. he->parent ? he->parent->name : "[other]");
  468. }
  469. struct sort_entry sort_parent = {
  470. .se_header = "Parent symbol",
  471. .se_cmp = sort__parent_cmp,
  472. .se_snprintf = hist_entry__parent_snprintf,
  473. .se_width_idx = HISTC_PARENT,
  474. };
  475. /* --sort cpu */
  476. static int64_t
  477. sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
  478. {
  479. return right->cpu - left->cpu;
  480. }
  481. static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
  482. size_t size, unsigned int width)
  483. {
  484. return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
  485. }
  486. struct sort_entry sort_cpu = {
  487. .se_header = "CPU",
  488. .se_cmp = sort__cpu_cmp,
  489. .se_snprintf = hist_entry__cpu_snprintf,
  490. .se_width_idx = HISTC_CPU,
  491. };
  492. /* --sort cgroup_id */
  493. static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
  494. {
  495. return (int64_t)(right_dev - left_dev);
  496. }
  497. static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
  498. {
  499. return (int64_t)(right_ino - left_ino);
  500. }
  501. static int64_t
  502. sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
  503. {
  504. int64_t ret;
  505. ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
  506. if (ret != 0)
  507. return ret;
  508. return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
  509. left->cgroup_id.ino);
  510. }
  511. static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
  512. char *bf, size_t size,
  513. unsigned int width __maybe_unused)
  514. {
  515. return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
  516. he->cgroup_id.ino);
  517. }
  518. struct sort_entry sort_cgroup_id = {
  519. .se_header = "cgroup id (dev/inode)",
  520. .se_cmp = sort__cgroup_id_cmp,
  521. .se_snprintf = hist_entry__cgroup_id_snprintf,
  522. .se_width_idx = HISTC_CGROUP_ID,
  523. };
  524. /* --sort cgroup */
  525. static int64_t
  526. sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
  527. {
  528. return right->cgroup - left->cgroup;
  529. }
  530. static int hist_entry__cgroup_snprintf(struct hist_entry *he,
  531. char *bf, size_t size,
  532. unsigned int width __maybe_unused)
  533. {
  534. const char *cgrp_name = "N/A";
  535. if (he->cgroup) {
  536. struct cgroup *cgrp = cgroup__find(he->ms.maps->machine->env,
  537. he->cgroup);
  538. if (cgrp != NULL)
  539. cgrp_name = cgrp->name;
  540. else
  541. cgrp_name = "unknown";
  542. }
  543. return repsep_snprintf(bf, size, "%s", cgrp_name);
  544. }
  545. struct sort_entry sort_cgroup = {
  546. .se_header = "Cgroup",
  547. .se_cmp = sort__cgroup_cmp,
  548. .se_snprintf = hist_entry__cgroup_snprintf,
  549. .se_width_idx = HISTC_CGROUP,
  550. };
  551. /* --sort socket */
  552. static int64_t
  553. sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
  554. {
  555. return right->socket - left->socket;
  556. }
  557. static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
  558. size_t size, unsigned int width)
  559. {
  560. return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
  561. }
  562. static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
  563. {
  564. int sk = *(const int *)arg;
  565. if (type != HIST_FILTER__SOCKET)
  566. return -1;
  567. return sk >= 0 && he->socket != sk;
  568. }
  569. struct sort_entry sort_socket = {
  570. .se_header = "Socket",
  571. .se_cmp = sort__socket_cmp,
  572. .se_snprintf = hist_entry__socket_snprintf,
  573. .se_filter = hist_entry__socket_filter,
  574. .se_width_idx = HISTC_SOCKET,
  575. };
  576. /* --sort time */
  577. static int64_t
  578. sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
  579. {
  580. return right->time - left->time;
  581. }
  582. static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
  583. size_t size, unsigned int width)
  584. {
  585. char he_time[32];
  586. if (symbol_conf.nanosecs)
  587. timestamp__scnprintf_nsec(he->time, he_time,
  588. sizeof(he_time));
  589. else
  590. timestamp__scnprintf_usec(he->time, he_time,
  591. sizeof(he_time));
  592. return repsep_snprintf(bf, size, "%-.*s", width, he_time);
  593. }
  594. struct sort_entry sort_time = {
  595. .se_header = "Time",
  596. .se_cmp = sort__time_cmp,
  597. .se_snprintf = hist_entry__time_snprintf,
  598. .se_width_idx = HISTC_TIME,
  599. };
  600. /* --sort trace */
  601. static char *get_trace_output(struct hist_entry *he)
  602. {
  603. struct trace_seq seq;
  604. struct evsel *evsel;
  605. struct tep_record rec = {
  606. .data = he->raw_data,
  607. .size = he->raw_size,
  608. };
  609. evsel = hists_to_evsel(he->hists);
  610. trace_seq_init(&seq);
  611. if (symbol_conf.raw_trace) {
  612. tep_print_fields(&seq, he->raw_data, he->raw_size,
  613. evsel->tp_format);
  614. } else {
  615. tep_print_event(evsel->tp_format->tep,
  616. &seq, &rec, "%s", TEP_PRINT_INFO);
  617. }
  618. /*
  619. * Trim the buffer, it starts at 4KB and we're not going to
  620. * add anything more to this buffer.
  621. */
  622. return realloc(seq.buffer, seq.len + 1);
  623. }
  624. static int64_t
  625. sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
  626. {
  627. struct evsel *evsel;
  628. evsel = hists_to_evsel(left->hists);
  629. if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
  630. return 0;
  631. if (left->trace_output == NULL)
  632. left->trace_output = get_trace_output(left);
  633. if (right->trace_output == NULL)
  634. right->trace_output = get_trace_output(right);
  635. return strcmp(right->trace_output, left->trace_output);
  636. }
  637. static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
  638. size_t size, unsigned int width)
  639. {
  640. struct evsel *evsel;
  641. evsel = hists_to_evsel(he->hists);
  642. if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
  643. return scnprintf(bf, size, "%-.*s", width, "N/A");
  644. if (he->trace_output == NULL)
  645. he->trace_output = get_trace_output(he);
  646. return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
  647. }
  648. struct sort_entry sort_trace = {
  649. .se_header = "Trace output",
  650. .se_cmp = sort__trace_cmp,
  651. .se_snprintf = hist_entry__trace_snprintf,
  652. .se_width_idx = HISTC_TRACE,
  653. };
  654. /* sort keys for branch stacks */
  655. static int64_t
  656. sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
  657. {
  658. if (!left->branch_info || !right->branch_info)
  659. return cmp_null(left->branch_info, right->branch_info);
  660. return _sort__dso_cmp(left->branch_info->from.ms.map,
  661. right->branch_info->from.ms.map);
  662. }
  663. static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
  664. size_t size, unsigned int width)
  665. {
  666. if (he->branch_info)
  667. return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
  668. bf, size, width);
  669. else
  670. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  671. }
  672. static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
  673. const void *arg)
  674. {
  675. const struct dso *dso = arg;
  676. if (type != HIST_FILTER__DSO)
  677. return -1;
  678. return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
  679. he->branch_info->from.ms.map->dso != dso);
  680. }
  681. static int64_t
  682. sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
  683. {
  684. if (!left->branch_info || !right->branch_info)
  685. return cmp_null(left->branch_info, right->branch_info);
  686. return _sort__dso_cmp(left->branch_info->to.ms.map,
  687. right->branch_info->to.ms.map);
  688. }
  689. static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
  690. size_t size, unsigned int width)
  691. {
  692. if (he->branch_info)
  693. return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
  694. bf, size, width);
  695. else
  696. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  697. }
  698. static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
  699. const void *arg)
  700. {
  701. const struct dso *dso = arg;
  702. if (type != HIST_FILTER__DSO)
  703. return -1;
  704. return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
  705. he->branch_info->to.ms.map->dso != dso);
  706. }
  707. static int64_t
  708. sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
  709. {
  710. struct addr_map_symbol *from_l = &left->branch_info->from;
  711. struct addr_map_symbol *from_r = &right->branch_info->from;
  712. if (!left->branch_info || !right->branch_info)
  713. return cmp_null(left->branch_info, right->branch_info);
  714. from_l = &left->branch_info->from;
  715. from_r = &right->branch_info->from;
  716. if (!from_l->ms.sym && !from_r->ms.sym)
  717. return _sort__addr_cmp(from_l->addr, from_r->addr);
  718. return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
  719. }
  720. static int64_t
  721. sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
  722. {
  723. struct addr_map_symbol *to_l, *to_r;
  724. if (!left->branch_info || !right->branch_info)
  725. return cmp_null(left->branch_info, right->branch_info);
  726. to_l = &left->branch_info->to;
  727. to_r = &right->branch_info->to;
  728. if (!to_l->ms.sym && !to_r->ms.sym)
  729. return _sort__addr_cmp(to_l->addr, to_r->addr);
  730. return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
  731. }
  732. static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
  733. size_t size, unsigned int width)
  734. {
  735. if (he->branch_info) {
  736. struct addr_map_symbol *from = &he->branch_info->from;
  737. return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
  738. he->level, bf, size, width);
  739. }
  740. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  741. }
  742. static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
  743. size_t size, unsigned int width)
  744. {
  745. if (he->branch_info) {
  746. struct addr_map_symbol *to = &he->branch_info->to;
  747. return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
  748. he->level, bf, size, width);
  749. }
  750. return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
  751. }
  752. static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
  753. const void *arg)
  754. {
  755. const char *sym = arg;
  756. if (type != HIST_FILTER__SYMBOL)
  757. return -1;
  758. return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
  759. strstr(he->branch_info->from.ms.sym->name, sym));
  760. }
  761. static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
  762. const void *arg)
  763. {
  764. const char *sym = arg;
  765. if (type != HIST_FILTER__SYMBOL)
  766. return -1;
  767. return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
  768. strstr(he->branch_info->to.ms.sym->name, sym));
  769. }
  770. struct sort_entry sort_dso_from = {
  771. .se_header = "Source Shared Object",
  772. .se_cmp = sort__dso_from_cmp,
  773. .se_snprintf = hist_entry__dso_from_snprintf,
  774. .se_filter = hist_entry__dso_from_filter,
  775. .se_width_idx = HISTC_DSO_FROM,
  776. };
  777. struct sort_entry sort_dso_to = {
  778. .se_header = "Target Shared Object",
  779. .se_cmp = sort__dso_to_cmp,
  780. .se_snprintf = hist_entry__dso_to_snprintf,
  781. .se_filter = hist_entry__dso_to_filter,
  782. .se_width_idx = HISTC_DSO_TO,
  783. };
  784. struct sort_entry sort_sym_from = {
  785. .se_header = "Source Symbol",
  786. .se_cmp = sort__sym_from_cmp,
  787. .se_snprintf = hist_entry__sym_from_snprintf,
  788. .se_filter = hist_entry__sym_from_filter,
  789. .se_width_idx = HISTC_SYMBOL_FROM,
  790. };
  791. struct sort_entry sort_sym_to = {
  792. .se_header = "Target Symbol",
  793. .se_cmp = sort__sym_to_cmp,
  794. .se_snprintf = hist_entry__sym_to_snprintf,
  795. .se_filter = hist_entry__sym_to_filter,
  796. .se_width_idx = HISTC_SYMBOL_TO,
  797. };
  798. static int64_t
  799. sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
  800. {
  801. unsigned char mp, p;
  802. if (!left->branch_info || !right->branch_info)
  803. return cmp_null(left->branch_info, right->branch_info);
  804. mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
  805. p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
  806. return mp || p;
  807. }
  808. static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
  809. size_t size, unsigned int width){
  810. static const char *out = "N/A";
  811. if (he->branch_info) {
  812. if (he->branch_info->flags.predicted)
  813. out = "N";
  814. else if (he->branch_info->flags.mispred)
  815. out = "Y";
  816. }
  817. return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
  818. }
  819. static int64_t
  820. sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
  821. {
  822. if (!left->branch_info || !right->branch_info)
  823. return cmp_null(left->branch_info, right->branch_info);
  824. return left->branch_info->flags.cycles -
  825. right->branch_info->flags.cycles;
  826. }
  827. static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
  828. size_t size, unsigned int width)
  829. {
  830. if (!he->branch_info)
  831. return scnprintf(bf, size, "%-.*s", width, "N/A");
  832. if (he->branch_info->flags.cycles == 0)
  833. return repsep_snprintf(bf, size, "%-*s", width, "-");
  834. return repsep_snprintf(bf, size, "%-*hd", width,
  835. he->branch_info->flags.cycles);
  836. }
  837. struct sort_entry sort_cycles = {
  838. .se_header = "Basic Block Cycles",
  839. .se_cmp = sort__cycles_cmp,
  840. .se_snprintf = hist_entry__cycles_snprintf,
  841. .se_width_idx = HISTC_CYCLES,
  842. };
  843. /* --sort daddr_sym */
  844. int64_t
  845. sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
  846. {
  847. uint64_t l = 0, r = 0;
  848. if (left->mem_info)
  849. l = left->mem_info->daddr.addr;
  850. if (right->mem_info)
  851. r = right->mem_info->daddr.addr;
  852. return (int64_t)(r - l);
  853. }
  854. static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
  855. size_t size, unsigned int width)
  856. {
  857. uint64_t addr = 0;
  858. struct map_symbol *ms = NULL;
  859. if (he->mem_info) {
  860. addr = he->mem_info->daddr.addr;
  861. ms = &he->mem_info->daddr.ms;
  862. }
  863. return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
  864. }
  865. int64_t
  866. sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
  867. {
  868. uint64_t l = 0, r = 0;
  869. if (left->mem_info)
  870. l = left->mem_info->iaddr.addr;
  871. if (right->mem_info)
  872. r = right->mem_info->iaddr.addr;
  873. return (int64_t)(r - l);
  874. }
  875. static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
  876. size_t size, unsigned int width)
  877. {
  878. uint64_t addr = 0;
  879. struct map_symbol *ms = NULL;
  880. if (he->mem_info) {
  881. addr = he->mem_info->iaddr.addr;
  882. ms = &he->mem_info->iaddr.ms;
  883. }
  884. return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
  885. }
  886. static int64_t
  887. sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
  888. {
  889. struct map *map_l = NULL;
  890. struct map *map_r = NULL;
  891. if (left->mem_info)
  892. map_l = left->mem_info->daddr.ms.map;
  893. if (right->mem_info)
  894. map_r = right->mem_info->daddr.ms.map;
  895. return _sort__dso_cmp(map_l, map_r);
  896. }
  897. static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
  898. size_t size, unsigned int width)
  899. {
  900. struct map *map = NULL;
  901. if (he->mem_info)
  902. map = he->mem_info->daddr.ms.map;
  903. return _hist_entry__dso_snprintf(map, bf, size, width);
  904. }
  905. static int64_t
  906. sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
  907. {
  908. union perf_mem_data_src data_src_l;
  909. union perf_mem_data_src data_src_r;
  910. if (left->mem_info)
  911. data_src_l = left->mem_info->data_src;
  912. else
  913. data_src_l.mem_lock = PERF_MEM_LOCK_NA;
  914. if (right->mem_info)
  915. data_src_r = right->mem_info->data_src;
  916. else
  917. data_src_r.mem_lock = PERF_MEM_LOCK_NA;
  918. return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
  919. }
  920. static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
  921. size_t size, unsigned int width)
  922. {
  923. char out[10];
  924. perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
  925. return repsep_snprintf(bf, size, "%.*s", width, out);
  926. }
  927. static int64_t
  928. sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
  929. {
  930. union perf_mem_data_src data_src_l;
  931. union perf_mem_data_src data_src_r;
  932. if (left->mem_info)
  933. data_src_l = left->mem_info->data_src;
  934. else
  935. data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
  936. if (right->mem_info)
  937. data_src_r = right->mem_info->data_src;
  938. else
  939. data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
  940. return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
  941. }
  942. static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
  943. size_t size, unsigned int width)
  944. {
  945. char out[64];
  946. perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
  947. return repsep_snprintf(bf, size, "%-*s", width, out);
  948. }
  949. static int64_t
  950. sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
  951. {
  952. union perf_mem_data_src data_src_l;
  953. union perf_mem_data_src data_src_r;
  954. if (left->mem_info)
  955. data_src_l = left->mem_info->data_src;
  956. else
  957. data_src_l.mem_lvl = PERF_MEM_LVL_NA;
  958. if (right->mem_info)
  959. data_src_r = right->mem_info->data_src;
  960. else
  961. data_src_r.mem_lvl = PERF_MEM_LVL_NA;
  962. return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
  963. }
  964. static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
  965. size_t size, unsigned int width)
  966. {
  967. char out[64];
  968. perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
  969. return repsep_snprintf(bf, size, "%-*s", width, out);
  970. }
  971. static int64_t
  972. sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
  973. {
  974. union perf_mem_data_src data_src_l;
  975. union perf_mem_data_src data_src_r;
  976. if (left->mem_info)
  977. data_src_l = left->mem_info->data_src;
  978. else
  979. data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
  980. if (right->mem_info)
  981. data_src_r = right->mem_info->data_src;
  982. else
  983. data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
  984. return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
  985. }
  986. static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
  987. size_t size, unsigned int width)
  988. {
  989. char out[64];
  990. perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
  991. return repsep_snprintf(bf, size, "%-*s", width, out);
  992. }
  993. int64_t
  994. sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
  995. {
  996. u64 l, r;
  997. struct map *l_map, *r_map;
  998. int rc;
  999. if (!left->mem_info) return -1;
  1000. if (!right->mem_info) return 1;
  1001. /* group event types together */
  1002. if (left->cpumode > right->cpumode) return -1;
  1003. if (left->cpumode < right->cpumode) return 1;
  1004. l_map = left->mem_info->daddr.ms.map;
  1005. r_map = right->mem_info->daddr.ms.map;
  1006. /* if both are NULL, jump to sort on al_addr instead */
  1007. if (!l_map && !r_map)
  1008. goto addr;
  1009. if (!l_map) return -1;
  1010. if (!r_map) return 1;
  1011. rc = dso__cmp_id(l_map->dso, r_map->dso);
  1012. if (rc)
  1013. return rc;
  1014. /*
  1015. * Addresses with no major/minor numbers are assumed to be
  1016. * anonymous in userspace. Sort those on pid then address.
  1017. *
  1018. * The kernel and non-zero major/minor mapped areas are
  1019. * assumed to be unity mapped. Sort those on address.
  1020. */
  1021. if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
  1022. (!(l_map->flags & MAP_SHARED)) &&
  1023. !l_map->dso->id.maj && !l_map->dso->id.min &&
  1024. !l_map->dso->id.ino && !l_map->dso->id.ino_generation) {
  1025. /* userspace anonymous */
  1026. if (left->thread->pid_ > right->thread->pid_) return -1;
  1027. if (left->thread->pid_ < right->thread->pid_) return 1;
  1028. }
  1029. addr:
  1030. /* al_addr does all the right addr - start + offset calculations */
  1031. l = cl_address(left->mem_info->daddr.al_addr);
  1032. r = cl_address(right->mem_info->daddr.al_addr);
  1033. if (l > r) return -1;
  1034. if (l < r) return 1;
  1035. return 0;
  1036. }
  1037. static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
  1038. size_t size, unsigned int width)
  1039. {
  1040. uint64_t addr = 0;
  1041. struct map_symbol *ms = NULL;
  1042. char level = he->level;
  1043. if (he->mem_info) {
  1044. struct map *map = he->mem_info->daddr.ms.map;
  1045. addr = cl_address(he->mem_info->daddr.al_addr);
  1046. ms = &he->mem_info->daddr.ms;
  1047. /* print [s] for shared data mmaps */
  1048. if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
  1049. map && !(map->prot & PROT_EXEC) &&
  1050. (map->flags & MAP_SHARED) &&
  1051. (map->dso->id.maj || map->dso->id.min ||
  1052. map->dso->id.ino || map->dso->id.ino_generation))
  1053. level = 's';
  1054. else if (!map)
  1055. level = 'X';
  1056. }
  1057. return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
  1058. }
  1059. struct sort_entry sort_mispredict = {
  1060. .se_header = "Branch Mispredicted",
  1061. .se_cmp = sort__mispredict_cmp,
  1062. .se_snprintf = hist_entry__mispredict_snprintf,
  1063. .se_width_idx = HISTC_MISPREDICT,
  1064. };
  1065. static u64 he_weight(struct hist_entry *he)
  1066. {
  1067. return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
  1068. }
  1069. static int64_t
  1070. sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
  1071. {
  1072. return he_weight(left) - he_weight(right);
  1073. }
  1074. static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
  1075. size_t size, unsigned int width)
  1076. {
  1077. return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
  1078. }
  1079. struct sort_entry sort_local_weight = {
  1080. .se_header = "Local Weight",
  1081. .se_cmp = sort__local_weight_cmp,
  1082. .se_snprintf = hist_entry__local_weight_snprintf,
  1083. .se_width_idx = HISTC_LOCAL_WEIGHT,
  1084. };
  1085. static int64_t
  1086. sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
  1087. {
  1088. return left->stat.weight - right->stat.weight;
  1089. }
  1090. static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
  1091. size_t size, unsigned int width)
  1092. {
  1093. return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
  1094. }
  1095. struct sort_entry sort_global_weight = {
  1096. .se_header = "Weight",
  1097. .se_cmp = sort__global_weight_cmp,
  1098. .se_snprintf = hist_entry__global_weight_snprintf,
  1099. .se_width_idx = HISTC_GLOBAL_WEIGHT,
  1100. };
  1101. struct sort_entry sort_mem_daddr_sym = {
  1102. .se_header = "Data Symbol",
  1103. .se_cmp = sort__daddr_cmp,
  1104. .se_snprintf = hist_entry__daddr_snprintf,
  1105. .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
  1106. };
  1107. struct sort_entry sort_mem_iaddr_sym = {
  1108. .se_header = "Code Symbol",
  1109. .se_cmp = sort__iaddr_cmp,
  1110. .se_snprintf = hist_entry__iaddr_snprintf,
  1111. .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
  1112. };
  1113. struct sort_entry sort_mem_daddr_dso = {
  1114. .se_header = "Data Object",
  1115. .se_cmp = sort__dso_daddr_cmp,
  1116. .se_snprintf = hist_entry__dso_daddr_snprintf,
  1117. .se_width_idx = HISTC_MEM_DADDR_DSO,
  1118. };
  1119. struct sort_entry sort_mem_locked = {
  1120. .se_header = "Locked",
  1121. .se_cmp = sort__locked_cmp,
  1122. .se_snprintf = hist_entry__locked_snprintf,
  1123. .se_width_idx = HISTC_MEM_LOCKED,
  1124. };
  1125. struct sort_entry sort_mem_tlb = {
  1126. .se_header = "TLB access",
  1127. .se_cmp = sort__tlb_cmp,
  1128. .se_snprintf = hist_entry__tlb_snprintf,
  1129. .se_width_idx = HISTC_MEM_TLB,
  1130. };
  1131. struct sort_entry sort_mem_lvl = {
  1132. .se_header = "Memory access",
  1133. .se_cmp = sort__lvl_cmp,
  1134. .se_snprintf = hist_entry__lvl_snprintf,
  1135. .se_width_idx = HISTC_MEM_LVL,
  1136. };
  1137. struct sort_entry sort_mem_snoop = {
  1138. .se_header = "Snoop",
  1139. .se_cmp = sort__snoop_cmp,
  1140. .se_snprintf = hist_entry__snoop_snprintf,
  1141. .se_width_idx = HISTC_MEM_SNOOP,
  1142. };
  1143. struct sort_entry sort_mem_dcacheline = {
  1144. .se_header = "Data Cacheline",
  1145. .se_cmp = sort__dcacheline_cmp,
  1146. .se_snprintf = hist_entry__dcacheline_snprintf,
  1147. .se_width_idx = HISTC_MEM_DCACHELINE,
  1148. };
  1149. static int64_t
  1150. sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
  1151. {
  1152. uint64_t l = 0, r = 0;
  1153. if (left->mem_info)
  1154. l = left->mem_info->daddr.phys_addr;
  1155. if (right->mem_info)
  1156. r = right->mem_info->daddr.phys_addr;
  1157. return (int64_t)(r - l);
  1158. }
  1159. static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
  1160. size_t size, unsigned int width)
  1161. {
  1162. uint64_t addr = 0;
  1163. size_t ret = 0;
  1164. size_t len = BITS_PER_LONG / 4;
  1165. addr = he->mem_info->daddr.phys_addr;
  1166. ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
  1167. ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
  1168. ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
  1169. if (ret > width)
  1170. bf[width] = '\0';
  1171. return width;
  1172. }
  1173. struct sort_entry sort_mem_phys_daddr = {
  1174. .se_header = "Data Physical Address",
  1175. .se_cmp = sort__phys_daddr_cmp,
  1176. .se_snprintf = hist_entry__phys_daddr_snprintf,
  1177. .se_width_idx = HISTC_MEM_PHYS_DADDR,
  1178. };
  1179. static int64_t
  1180. sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
  1181. {
  1182. if (!left->branch_info || !right->branch_info)
  1183. return cmp_null(left->branch_info, right->branch_info);
  1184. return left->branch_info->flags.abort !=
  1185. right->branch_info->flags.abort;
  1186. }
  1187. static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
  1188. size_t size, unsigned int width)
  1189. {
  1190. static const char *out = "N/A";
  1191. if (he->branch_info) {
  1192. if (he->branch_info->flags.abort)
  1193. out = "A";
  1194. else
  1195. out = ".";
  1196. }
  1197. return repsep_snprintf(bf, size, "%-*s", width, out);
  1198. }
  1199. struct sort_entry sort_abort = {
  1200. .se_header = "Transaction abort",
  1201. .se_cmp = sort__abort_cmp,
  1202. .se_snprintf = hist_entry__abort_snprintf,
  1203. .se_width_idx = HISTC_ABORT,
  1204. };
  1205. static int64_t
  1206. sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
  1207. {
  1208. if (!left->branch_info || !right->branch_info)
  1209. return cmp_null(left->branch_info, right->branch_info);
  1210. return left->branch_info->flags.in_tx !=
  1211. right->branch_info->flags.in_tx;
  1212. }
  1213. static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
  1214. size_t size, unsigned int width)
  1215. {
  1216. static const char *out = "N/A";
  1217. if (he->branch_info) {
  1218. if (he->branch_info->flags.in_tx)
  1219. out = "T";
  1220. else
  1221. out = ".";
  1222. }
  1223. return repsep_snprintf(bf, size, "%-*s", width, out);
  1224. }
  1225. struct sort_entry sort_in_tx = {
  1226. .se_header = "Branch in transaction",
  1227. .se_cmp = sort__in_tx_cmp,
  1228. .se_snprintf = hist_entry__in_tx_snprintf,
  1229. .se_width_idx = HISTC_IN_TX,
  1230. };
  1231. static int64_t
  1232. sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
  1233. {
  1234. return left->transaction - right->transaction;
  1235. }
  1236. static inline char *add_str(char *p, const char *str)
  1237. {
  1238. strcpy(p, str);
  1239. return p + strlen(str);
  1240. }
  1241. static struct txbit {
  1242. unsigned flag;
  1243. const char *name;
  1244. int skip_for_len;
  1245. } txbits[] = {
  1246. { PERF_TXN_ELISION, "EL ", 0 },
  1247. { PERF_TXN_TRANSACTION, "TX ", 1 },
  1248. { PERF_TXN_SYNC, "SYNC ", 1 },
  1249. { PERF_TXN_ASYNC, "ASYNC ", 0 },
  1250. { PERF_TXN_RETRY, "RETRY ", 0 },
  1251. { PERF_TXN_CONFLICT, "CON ", 0 },
  1252. { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
  1253. { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
  1254. { 0, NULL, 0 }
  1255. };
  1256. int hist_entry__transaction_len(void)
  1257. {
  1258. int i;
  1259. int len = 0;
  1260. for (i = 0; txbits[i].name; i++) {
  1261. if (!txbits[i].skip_for_len)
  1262. len += strlen(txbits[i].name);
  1263. }
  1264. len += 4; /* :XX<space> */
  1265. return len;
  1266. }
  1267. static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
  1268. size_t size, unsigned int width)
  1269. {
  1270. u64 t = he->transaction;
  1271. char buf[128];
  1272. char *p = buf;
  1273. int i;
  1274. buf[0] = 0;
  1275. for (i = 0; txbits[i].name; i++)
  1276. if (txbits[i].flag & t)
  1277. p = add_str(p, txbits[i].name);
  1278. if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
  1279. p = add_str(p, "NEITHER ");
  1280. if (t & PERF_TXN_ABORT_MASK) {
  1281. sprintf(p, ":%" PRIx64,
  1282. (t & PERF_TXN_ABORT_MASK) >>
  1283. PERF_TXN_ABORT_SHIFT);
  1284. p += strlen(p);
  1285. }
  1286. return repsep_snprintf(bf, size, "%-*s", width, buf);
  1287. }
  1288. struct sort_entry sort_transaction = {
  1289. .se_header = "Transaction ",
  1290. .se_cmp = sort__transaction_cmp,
  1291. .se_snprintf = hist_entry__transaction_snprintf,
  1292. .se_width_idx = HISTC_TRANSACTION,
  1293. };
  1294. /* --sort symbol_size */
  1295. static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
  1296. {
  1297. int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
  1298. int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
  1299. return size_l < size_r ? -1 :
  1300. size_l == size_r ? 0 : 1;
  1301. }
  1302. static int64_t
  1303. sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
  1304. {
  1305. return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
  1306. }
  1307. static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
  1308. size_t bf_size, unsigned int width)
  1309. {
  1310. if (sym)
  1311. return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
  1312. return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
  1313. }
  1314. static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
  1315. size_t size, unsigned int width)
  1316. {
  1317. return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
  1318. }
  1319. struct sort_entry sort_sym_size = {
  1320. .se_header = "Symbol size",
  1321. .se_cmp = sort__sym_size_cmp,
  1322. .se_snprintf = hist_entry__sym_size_snprintf,
  1323. .se_width_idx = HISTC_SYM_SIZE,
  1324. };
  1325. /* --sort dso_size */
  1326. static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
  1327. {
  1328. int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
  1329. int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
  1330. return size_l < size_r ? -1 :
  1331. size_l == size_r ? 0 : 1;
  1332. }
  1333. static int64_t
  1334. sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
  1335. {
  1336. return _sort__dso_size_cmp(right->ms.map, left->ms.map);
  1337. }
  1338. static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
  1339. size_t bf_size, unsigned int width)
  1340. {
  1341. if (map && map->dso)
  1342. return repsep_snprintf(bf, bf_size, "%*d", width,
  1343. map__size(map));
  1344. return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
  1345. }
  1346. static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
  1347. size_t size, unsigned int width)
  1348. {
  1349. return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
  1350. }
  1351. struct sort_entry sort_dso_size = {
  1352. .se_header = "DSO size",
  1353. .se_cmp = sort__dso_size_cmp,
  1354. .se_snprintf = hist_entry__dso_size_snprintf,
  1355. .se_width_idx = HISTC_DSO_SIZE,
  1356. };
  1357. struct sort_dimension {
  1358. const char *name;
  1359. struct sort_entry *entry;
  1360. int taken;
  1361. };
  1362. #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
  1363. static struct sort_dimension common_sort_dimensions[] = {
  1364. DIM(SORT_PID, "pid", sort_thread),
  1365. DIM(SORT_COMM, "comm", sort_comm),
  1366. DIM(SORT_DSO, "dso", sort_dso),
  1367. DIM(SORT_SYM, "symbol", sort_sym),
  1368. DIM(SORT_PARENT, "parent", sort_parent),
  1369. DIM(SORT_CPU, "cpu", sort_cpu),
  1370. DIM(SORT_SOCKET, "socket", sort_socket),
  1371. DIM(SORT_SRCLINE, "srcline", sort_srcline),
  1372. DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
  1373. DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
  1374. DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
  1375. DIM(SORT_TRANSACTION, "transaction", sort_transaction),
  1376. DIM(SORT_TRACE, "trace", sort_trace),
  1377. DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
  1378. DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
  1379. DIM(SORT_CGROUP, "cgroup", sort_cgroup),
  1380. DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
  1381. DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
  1382. DIM(SORT_TIME, "time", sort_time),
  1383. };
  1384. #undef DIM
  1385. #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
  1386. static struct sort_dimension bstack_sort_dimensions[] = {
  1387. DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
  1388. DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
  1389. DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
  1390. DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
  1391. DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
  1392. DIM(SORT_IN_TX, "in_tx", sort_in_tx),
  1393. DIM(SORT_ABORT, "abort", sort_abort),
  1394. DIM(SORT_CYCLES, "cycles", sort_cycles),
  1395. DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
  1396. DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
  1397. DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
  1398. };
  1399. #undef DIM
  1400. #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
  1401. static struct sort_dimension memory_sort_dimensions[] = {
  1402. DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
  1403. DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
  1404. DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
  1405. DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
  1406. DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
  1407. DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
  1408. DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
  1409. DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
  1410. DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
  1411. };
  1412. #undef DIM
  1413. struct hpp_dimension {
  1414. const char *name;
  1415. struct perf_hpp_fmt *fmt;
  1416. int taken;
  1417. };
  1418. #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
  1419. static struct hpp_dimension hpp_sort_dimensions[] = {
  1420. DIM(PERF_HPP__OVERHEAD, "overhead"),
  1421. DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
  1422. DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
  1423. DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
  1424. DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
  1425. DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
  1426. DIM(PERF_HPP__SAMPLES, "sample"),
  1427. DIM(PERF_HPP__PERIOD, "period"),
  1428. };
  1429. #undef DIM
  1430. struct hpp_sort_entry {
  1431. struct perf_hpp_fmt hpp;
  1432. struct sort_entry *se;
  1433. };
  1434. void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
  1435. {
  1436. struct hpp_sort_entry *hse;
  1437. if (!perf_hpp__is_sort_entry(fmt))
  1438. return;
  1439. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1440. hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
  1441. }
  1442. static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1443. struct hists *hists, int line __maybe_unused,
  1444. int *span __maybe_unused)
  1445. {
  1446. struct hpp_sort_entry *hse;
  1447. size_t len = fmt->user_len;
  1448. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1449. if (!len)
  1450. len = hists__col_len(hists, hse->se->se_width_idx);
  1451. return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
  1452. }
  1453. static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
  1454. struct perf_hpp *hpp __maybe_unused,
  1455. struct hists *hists)
  1456. {
  1457. struct hpp_sort_entry *hse;
  1458. size_t len = fmt->user_len;
  1459. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1460. if (!len)
  1461. len = hists__col_len(hists, hse->se->se_width_idx);
  1462. return len;
  1463. }
  1464. static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1465. struct hist_entry *he)
  1466. {
  1467. struct hpp_sort_entry *hse;
  1468. size_t len = fmt->user_len;
  1469. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1470. if (!len)
  1471. len = hists__col_len(he->hists, hse->se->se_width_idx);
  1472. return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
  1473. }
  1474. static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
  1475. struct hist_entry *a, struct hist_entry *b)
  1476. {
  1477. struct hpp_sort_entry *hse;
  1478. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1479. return hse->se->se_cmp(a, b);
  1480. }
  1481. static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
  1482. struct hist_entry *a, struct hist_entry *b)
  1483. {
  1484. struct hpp_sort_entry *hse;
  1485. int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
  1486. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1487. collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
  1488. return collapse_fn(a, b);
  1489. }
  1490. static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
  1491. struct hist_entry *a, struct hist_entry *b)
  1492. {
  1493. struct hpp_sort_entry *hse;
  1494. int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
  1495. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1496. sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
  1497. return sort_fn(a, b);
  1498. }
  1499. bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
  1500. {
  1501. return format->header == __sort__hpp_header;
  1502. }
  1503. #define MK_SORT_ENTRY_CHK(key) \
  1504. bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
  1505. { \
  1506. struct hpp_sort_entry *hse; \
  1507. \
  1508. if (!perf_hpp__is_sort_entry(fmt)) \
  1509. return false; \
  1510. \
  1511. hse = container_of(fmt, struct hpp_sort_entry, hpp); \
  1512. return hse->se == &sort_ ## key ; \
  1513. }
  1514. MK_SORT_ENTRY_CHK(trace)
  1515. MK_SORT_ENTRY_CHK(srcline)
  1516. MK_SORT_ENTRY_CHK(srcfile)
  1517. MK_SORT_ENTRY_CHK(thread)
  1518. MK_SORT_ENTRY_CHK(comm)
  1519. MK_SORT_ENTRY_CHK(dso)
  1520. MK_SORT_ENTRY_CHK(sym)
  1521. static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  1522. {
  1523. struct hpp_sort_entry *hse_a;
  1524. struct hpp_sort_entry *hse_b;
  1525. if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
  1526. return false;
  1527. hse_a = container_of(a, struct hpp_sort_entry, hpp);
  1528. hse_b = container_of(b, struct hpp_sort_entry, hpp);
  1529. return hse_a->se == hse_b->se;
  1530. }
  1531. static void hse_free(struct perf_hpp_fmt *fmt)
  1532. {
  1533. struct hpp_sort_entry *hse;
  1534. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1535. free(hse);
  1536. }
  1537. static struct hpp_sort_entry *
  1538. __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
  1539. {
  1540. struct hpp_sort_entry *hse;
  1541. hse = malloc(sizeof(*hse));
  1542. if (hse == NULL) {
  1543. pr_err("Memory allocation failed\n");
  1544. return NULL;
  1545. }
  1546. hse->se = sd->entry;
  1547. hse->hpp.name = sd->entry->se_header;
  1548. hse->hpp.header = __sort__hpp_header;
  1549. hse->hpp.width = __sort__hpp_width;
  1550. hse->hpp.entry = __sort__hpp_entry;
  1551. hse->hpp.color = NULL;
  1552. hse->hpp.cmp = __sort__hpp_cmp;
  1553. hse->hpp.collapse = __sort__hpp_collapse;
  1554. hse->hpp.sort = __sort__hpp_sort;
  1555. hse->hpp.equal = __sort__hpp_equal;
  1556. hse->hpp.free = hse_free;
  1557. INIT_LIST_HEAD(&hse->hpp.list);
  1558. INIT_LIST_HEAD(&hse->hpp.sort_list);
  1559. hse->hpp.elide = false;
  1560. hse->hpp.len = 0;
  1561. hse->hpp.user_len = 0;
  1562. hse->hpp.level = level;
  1563. return hse;
  1564. }
  1565. static void hpp_free(struct perf_hpp_fmt *fmt)
  1566. {
  1567. free(fmt);
  1568. }
  1569. static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
  1570. int level)
  1571. {
  1572. struct perf_hpp_fmt *fmt;
  1573. fmt = memdup(hd->fmt, sizeof(*fmt));
  1574. if (fmt) {
  1575. INIT_LIST_HEAD(&fmt->list);
  1576. INIT_LIST_HEAD(&fmt->sort_list);
  1577. fmt->free = hpp_free;
  1578. fmt->level = level;
  1579. }
  1580. return fmt;
  1581. }
  1582. int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
  1583. {
  1584. struct perf_hpp_fmt *fmt;
  1585. struct hpp_sort_entry *hse;
  1586. int ret = -1;
  1587. int r;
  1588. perf_hpp_list__for_each_format(he->hpp_list, fmt) {
  1589. if (!perf_hpp__is_sort_entry(fmt))
  1590. continue;
  1591. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1592. if (hse->se->se_filter == NULL)
  1593. continue;
  1594. /*
  1595. * hist entry is filtered if any of sort key in the hpp list
  1596. * is applied. But it should skip non-matched filter types.
  1597. */
  1598. r = hse->se->se_filter(he, type, arg);
  1599. if (r >= 0) {
  1600. if (ret < 0)
  1601. ret = 0;
  1602. ret |= r;
  1603. }
  1604. }
  1605. return ret;
  1606. }
  1607. static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
  1608. struct perf_hpp_list *list,
  1609. int level)
  1610. {
  1611. struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
  1612. if (hse == NULL)
  1613. return -1;
  1614. perf_hpp_list__register_sort_field(list, &hse->hpp);
  1615. return 0;
  1616. }
  1617. static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
  1618. struct perf_hpp_list *list)
  1619. {
  1620. struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
  1621. if (hse == NULL)
  1622. return -1;
  1623. perf_hpp_list__column_register(list, &hse->hpp);
  1624. return 0;
  1625. }
  1626. struct hpp_dynamic_entry {
  1627. struct perf_hpp_fmt hpp;
  1628. struct evsel *evsel;
  1629. struct tep_format_field *field;
  1630. unsigned dynamic_len;
  1631. bool raw_trace;
  1632. };
  1633. static int hde_width(struct hpp_dynamic_entry *hde)
  1634. {
  1635. if (!hde->hpp.len) {
  1636. int len = hde->dynamic_len;
  1637. int namelen = strlen(hde->field->name);
  1638. int fieldlen = hde->field->size;
  1639. if (namelen > len)
  1640. len = namelen;
  1641. if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
  1642. /* length for print hex numbers */
  1643. fieldlen = hde->field->size * 2 + 2;
  1644. }
  1645. if (fieldlen > len)
  1646. len = fieldlen;
  1647. hde->hpp.len = len;
  1648. }
  1649. return hde->hpp.len;
  1650. }
  1651. static void update_dynamic_len(struct hpp_dynamic_entry *hde,
  1652. struct hist_entry *he)
  1653. {
  1654. char *str, *pos;
  1655. struct tep_format_field *field = hde->field;
  1656. size_t namelen;
  1657. bool last = false;
  1658. if (hde->raw_trace)
  1659. return;
  1660. /* parse pretty print result and update max length */
  1661. if (!he->trace_output)
  1662. he->trace_output = get_trace_output(he);
  1663. namelen = strlen(field->name);
  1664. str = he->trace_output;
  1665. while (str) {
  1666. pos = strchr(str, ' ');
  1667. if (pos == NULL) {
  1668. last = true;
  1669. pos = str + strlen(str);
  1670. }
  1671. if (!strncmp(str, field->name, namelen)) {
  1672. size_t len;
  1673. str += namelen + 1;
  1674. len = pos - str;
  1675. if (len > hde->dynamic_len)
  1676. hde->dynamic_len = len;
  1677. break;
  1678. }
  1679. if (last)
  1680. str = NULL;
  1681. else
  1682. str = pos + 1;
  1683. }
  1684. }
  1685. static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1686. struct hists *hists __maybe_unused,
  1687. int line __maybe_unused,
  1688. int *span __maybe_unused)
  1689. {
  1690. struct hpp_dynamic_entry *hde;
  1691. size_t len = fmt->user_len;
  1692. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1693. if (!len)
  1694. len = hde_width(hde);
  1695. return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
  1696. }
  1697. static int __sort__hde_width(struct perf_hpp_fmt *fmt,
  1698. struct perf_hpp *hpp __maybe_unused,
  1699. struct hists *hists __maybe_unused)
  1700. {
  1701. struct hpp_dynamic_entry *hde;
  1702. size_t len = fmt->user_len;
  1703. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1704. if (!len)
  1705. len = hde_width(hde);
  1706. return len;
  1707. }
  1708. bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
  1709. {
  1710. struct hpp_dynamic_entry *hde;
  1711. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1712. return hists_to_evsel(hists) == hde->evsel;
  1713. }
  1714. static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  1715. struct hist_entry *he)
  1716. {
  1717. struct hpp_dynamic_entry *hde;
  1718. size_t len = fmt->user_len;
  1719. char *str, *pos;
  1720. struct tep_format_field *field;
  1721. size_t namelen;
  1722. bool last = false;
  1723. int ret;
  1724. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1725. if (!len)
  1726. len = hde_width(hde);
  1727. if (hde->raw_trace)
  1728. goto raw_field;
  1729. if (!he->trace_output)
  1730. he->trace_output = get_trace_output(he);
  1731. field = hde->field;
  1732. namelen = strlen(field->name);
  1733. str = he->trace_output;
  1734. while (str) {
  1735. pos = strchr(str, ' ');
  1736. if (pos == NULL) {
  1737. last = true;
  1738. pos = str + strlen(str);
  1739. }
  1740. if (!strncmp(str, field->name, namelen)) {
  1741. str += namelen + 1;
  1742. str = strndup(str, pos - str);
  1743. if (str == NULL)
  1744. return scnprintf(hpp->buf, hpp->size,
  1745. "%*.*s", len, len, "ERROR");
  1746. break;
  1747. }
  1748. if (last)
  1749. str = NULL;
  1750. else
  1751. str = pos + 1;
  1752. }
  1753. if (str == NULL) {
  1754. struct trace_seq seq;
  1755. raw_field:
  1756. trace_seq_init(&seq);
  1757. tep_print_field(&seq, he->raw_data, hde->field);
  1758. str = seq.buffer;
  1759. }
  1760. ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
  1761. free(str);
  1762. return ret;
  1763. }
  1764. static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
  1765. struct hist_entry *a, struct hist_entry *b)
  1766. {
  1767. struct hpp_dynamic_entry *hde;
  1768. struct tep_format_field *field;
  1769. unsigned offset, size;
  1770. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1771. if (b == NULL) {
  1772. update_dynamic_len(hde, a);
  1773. return 0;
  1774. }
  1775. field = hde->field;
  1776. if (field->flags & TEP_FIELD_IS_DYNAMIC) {
  1777. unsigned long long dyn;
  1778. tep_read_number_field(field, a->raw_data, &dyn);
  1779. offset = dyn & 0xffff;
  1780. size = (dyn >> 16) & 0xffff;
  1781. /* record max width for output */
  1782. if (size > hde->dynamic_len)
  1783. hde->dynamic_len = size;
  1784. } else {
  1785. offset = field->offset;
  1786. size = field->size;
  1787. }
  1788. return memcmp(a->raw_data + offset, b->raw_data + offset, size);
  1789. }
  1790. bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
  1791. {
  1792. return fmt->cmp == __sort__hde_cmp;
  1793. }
  1794. static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  1795. {
  1796. struct hpp_dynamic_entry *hde_a;
  1797. struct hpp_dynamic_entry *hde_b;
  1798. if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
  1799. return false;
  1800. hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
  1801. hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
  1802. return hde_a->field == hde_b->field;
  1803. }
  1804. static void hde_free(struct perf_hpp_fmt *fmt)
  1805. {
  1806. struct hpp_dynamic_entry *hde;
  1807. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1808. free(hde);
  1809. }
  1810. static struct hpp_dynamic_entry *
  1811. __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
  1812. int level)
  1813. {
  1814. struct hpp_dynamic_entry *hde;
  1815. hde = malloc(sizeof(*hde));
  1816. if (hde == NULL) {
  1817. pr_debug("Memory allocation failed\n");
  1818. return NULL;
  1819. }
  1820. hde->evsel = evsel;
  1821. hde->field = field;
  1822. hde->dynamic_len = 0;
  1823. hde->hpp.name = field->name;
  1824. hde->hpp.header = __sort__hde_header;
  1825. hde->hpp.width = __sort__hde_width;
  1826. hde->hpp.entry = __sort__hde_entry;
  1827. hde->hpp.color = NULL;
  1828. hde->hpp.cmp = __sort__hde_cmp;
  1829. hde->hpp.collapse = __sort__hde_cmp;
  1830. hde->hpp.sort = __sort__hde_cmp;
  1831. hde->hpp.equal = __sort__hde_equal;
  1832. hde->hpp.free = hde_free;
  1833. INIT_LIST_HEAD(&hde->hpp.list);
  1834. INIT_LIST_HEAD(&hde->hpp.sort_list);
  1835. hde->hpp.elide = false;
  1836. hde->hpp.len = 0;
  1837. hde->hpp.user_len = 0;
  1838. hde->hpp.level = level;
  1839. return hde;
  1840. }
  1841. struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
  1842. {
  1843. struct perf_hpp_fmt *new_fmt = NULL;
  1844. if (perf_hpp__is_sort_entry(fmt)) {
  1845. struct hpp_sort_entry *hse, *new_hse;
  1846. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  1847. new_hse = memdup(hse, sizeof(*hse));
  1848. if (new_hse)
  1849. new_fmt = &new_hse->hpp;
  1850. } else if (perf_hpp__is_dynamic_entry(fmt)) {
  1851. struct hpp_dynamic_entry *hde, *new_hde;
  1852. hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
  1853. new_hde = memdup(hde, sizeof(*hde));
  1854. if (new_hde)
  1855. new_fmt = &new_hde->hpp;
  1856. } else {
  1857. new_fmt = memdup(fmt, sizeof(*fmt));
  1858. }
  1859. INIT_LIST_HEAD(&new_fmt->list);
  1860. INIT_LIST_HEAD(&new_fmt->sort_list);
  1861. return new_fmt;
  1862. }
  1863. static int parse_field_name(char *str, char **event, char **field, char **opt)
  1864. {
  1865. char *event_name, *field_name, *opt_name;
  1866. event_name = str;
  1867. field_name = strchr(str, '.');
  1868. if (field_name) {
  1869. *field_name++ = '\0';
  1870. } else {
  1871. event_name = NULL;
  1872. field_name = str;
  1873. }
  1874. opt_name = strchr(field_name, '/');
  1875. if (opt_name)
  1876. *opt_name++ = '\0';
  1877. *event = event_name;
  1878. *field = field_name;
  1879. *opt = opt_name;
  1880. return 0;
  1881. }
  1882. /* find match evsel using a given event name. The event name can be:
  1883. * 1. '%' + event index (e.g. '%1' for first event)
  1884. * 2. full event name (e.g. sched:sched_switch)
  1885. * 3. partial event name (should not contain ':')
  1886. */
  1887. static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
  1888. {
  1889. struct evsel *evsel = NULL;
  1890. struct evsel *pos;
  1891. bool full_name;
  1892. /* case 1 */
  1893. if (event_name[0] == '%') {
  1894. int nr = strtol(event_name+1, NULL, 0);
  1895. if (nr > evlist->core.nr_entries)
  1896. return NULL;
  1897. evsel = evlist__first(evlist);
  1898. while (--nr > 0)
  1899. evsel = perf_evsel__next(evsel);
  1900. return evsel;
  1901. }
  1902. full_name = !!strchr(event_name, ':');
  1903. evlist__for_each_entry(evlist, pos) {
  1904. /* case 2 */
  1905. if (full_name && !strcmp(pos->name, event_name))
  1906. return pos;
  1907. /* case 3 */
  1908. if (!full_name && strstr(pos->name, event_name)) {
  1909. if (evsel) {
  1910. pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
  1911. event_name, evsel->name, pos->name);
  1912. return NULL;
  1913. }
  1914. evsel = pos;
  1915. }
  1916. }
  1917. return evsel;
  1918. }
  1919. static int __dynamic_dimension__add(struct evsel *evsel,
  1920. struct tep_format_field *field,
  1921. bool raw_trace, int level)
  1922. {
  1923. struct hpp_dynamic_entry *hde;
  1924. hde = __alloc_dynamic_entry(evsel, field, level);
  1925. if (hde == NULL)
  1926. return -ENOMEM;
  1927. hde->raw_trace = raw_trace;
  1928. perf_hpp__register_sort_field(&hde->hpp);
  1929. return 0;
  1930. }
  1931. static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
  1932. {
  1933. int ret;
  1934. struct tep_format_field *field;
  1935. field = evsel->tp_format->format.fields;
  1936. while (field) {
  1937. ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
  1938. if (ret < 0)
  1939. return ret;
  1940. field = field->next;
  1941. }
  1942. return 0;
  1943. }
  1944. static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
  1945. int level)
  1946. {
  1947. int ret;
  1948. struct evsel *evsel;
  1949. evlist__for_each_entry(evlist, evsel) {
  1950. if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
  1951. continue;
  1952. ret = add_evsel_fields(evsel, raw_trace, level);
  1953. if (ret < 0)
  1954. return ret;
  1955. }
  1956. return 0;
  1957. }
  1958. static int add_all_matching_fields(struct evlist *evlist,
  1959. char *field_name, bool raw_trace, int level)
  1960. {
  1961. int ret = -ESRCH;
  1962. struct evsel *evsel;
  1963. struct tep_format_field *field;
  1964. evlist__for_each_entry(evlist, evsel) {
  1965. if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
  1966. continue;
  1967. field = tep_find_any_field(evsel->tp_format, field_name);
  1968. if (field == NULL)
  1969. continue;
  1970. ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
  1971. if (ret < 0)
  1972. break;
  1973. }
  1974. return ret;
  1975. }
  1976. static int add_dynamic_entry(struct evlist *evlist, const char *tok,
  1977. int level)
  1978. {
  1979. char *str, *event_name, *field_name, *opt_name;
  1980. struct evsel *evsel;
  1981. struct tep_format_field *field;
  1982. bool raw_trace = symbol_conf.raw_trace;
  1983. int ret = 0;
  1984. if (evlist == NULL)
  1985. return -ENOENT;
  1986. str = strdup(tok);
  1987. if (str == NULL)
  1988. return -ENOMEM;
  1989. if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
  1990. ret = -EINVAL;
  1991. goto out;
  1992. }
  1993. if (opt_name) {
  1994. if (strcmp(opt_name, "raw")) {
  1995. pr_debug("unsupported field option %s\n", opt_name);
  1996. ret = -EINVAL;
  1997. goto out;
  1998. }
  1999. raw_trace = true;
  2000. }
  2001. if (!strcmp(field_name, "trace_fields")) {
  2002. ret = add_all_dynamic_fields(evlist, raw_trace, level);
  2003. goto out;
  2004. }
  2005. if (event_name == NULL) {
  2006. ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
  2007. goto out;
  2008. }
  2009. evsel = find_evsel(evlist, event_name);
  2010. if (evsel == NULL) {
  2011. pr_debug("Cannot find event: %s\n", event_name);
  2012. ret = -ENOENT;
  2013. goto out;
  2014. }
  2015. if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
  2016. pr_debug("%s is not a tracepoint event\n", event_name);
  2017. ret = -EINVAL;
  2018. goto out;
  2019. }
  2020. if (!strcmp(field_name, "*")) {
  2021. ret = add_evsel_fields(evsel, raw_trace, level);
  2022. } else {
  2023. field = tep_find_any_field(evsel->tp_format, field_name);
  2024. if (field == NULL) {
  2025. pr_debug("Cannot find event field for %s.%s\n",
  2026. event_name, field_name);
  2027. return -ENOENT;
  2028. }
  2029. ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
  2030. }
  2031. out:
  2032. free(str);
  2033. return ret;
  2034. }
  2035. static int __sort_dimension__add(struct sort_dimension *sd,
  2036. struct perf_hpp_list *list,
  2037. int level)
  2038. {
  2039. if (sd->taken)
  2040. return 0;
  2041. if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
  2042. return -1;
  2043. if (sd->entry->se_collapse)
  2044. list->need_collapse = 1;
  2045. sd->taken = 1;
  2046. return 0;
  2047. }
  2048. static int __hpp_dimension__add(struct hpp_dimension *hd,
  2049. struct perf_hpp_list *list,
  2050. int level)
  2051. {
  2052. struct perf_hpp_fmt *fmt;
  2053. if (hd->taken)
  2054. return 0;
  2055. fmt = __hpp_dimension__alloc_hpp(hd, level);
  2056. if (!fmt)
  2057. return -1;
  2058. hd->taken = 1;
  2059. perf_hpp_list__register_sort_field(list, fmt);
  2060. return 0;
  2061. }
  2062. static int __sort_dimension__add_output(struct perf_hpp_list *list,
  2063. struct sort_dimension *sd)
  2064. {
  2065. if (sd->taken)
  2066. return 0;
  2067. if (__sort_dimension__add_hpp_output(sd, list) < 0)
  2068. return -1;
  2069. sd->taken = 1;
  2070. return 0;
  2071. }
  2072. static int __hpp_dimension__add_output(struct perf_hpp_list *list,
  2073. struct hpp_dimension *hd)
  2074. {
  2075. struct perf_hpp_fmt *fmt;
  2076. if (hd->taken)
  2077. return 0;
  2078. fmt = __hpp_dimension__alloc_hpp(hd, 0);
  2079. if (!fmt)
  2080. return -1;
  2081. hd->taken = 1;
  2082. perf_hpp_list__column_register(list, fmt);
  2083. return 0;
  2084. }
  2085. int hpp_dimension__add_output(unsigned col)
  2086. {
  2087. BUG_ON(col >= PERF_HPP__MAX_INDEX);
  2088. return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
  2089. }
  2090. int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
  2091. struct evlist *evlist,
  2092. int level)
  2093. {
  2094. unsigned int i;
  2095. for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
  2096. struct sort_dimension *sd = &common_sort_dimensions[i];
  2097. if (strncasecmp(tok, sd->name, strlen(tok)))
  2098. continue;
  2099. if (sd->entry == &sort_parent) {
  2100. int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
  2101. if (ret) {
  2102. char err[BUFSIZ];
  2103. regerror(ret, &parent_regex, err, sizeof(err));
  2104. pr_err("Invalid regex: %s\n%s", parent_pattern, err);
  2105. return -EINVAL;
  2106. }
  2107. list->parent = 1;
  2108. } else if (sd->entry == &sort_sym) {
  2109. list->sym = 1;
  2110. /*
  2111. * perf diff displays the performance difference amongst
  2112. * two or more perf.data files. Those files could come
  2113. * from different binaries. So we should not compare
  2114. * their ips, but the name of symbol.
  2115. */
  2116. if (sort__mode == SORT_MODE__DIFF)
  2117. sd->entry->se_collapse = sort__sym_sort;
  2118. } else if (sd->entry == &sort_dso) {
  2119. list->dso = 1;
  2120. } else if (sd->entry == &sort_socket) {
  2121. list->socket = 1;
  2122. } else if (sd->entry == &sort_thread) {
  2123. list->thread = 1;
  2124. } else if (sd->entry == &sort_comm) {
  2125. list->comm = 1;
  2126. }
  2127. return __sort_dimension__add(sd, list, level);
  2128. }
  2129. for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
  2130. struct hpp_dimension *hd = &hpp_sort_dimensions[i];
  2131. if (strncasecmp(tok, hd->name, strlen(tok)))
  2132. continue;
  2133. return __hpp_dimension__add(hd, list, level);
  2134. }
  2135. for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
  2136. struct sort_dimension *sd = &bstack_sort_dimensions[i];
  2137. if (strncasecmp(tok, sd->name, strlen(tok)))
  2138. continue;
  2139. if (sort__mode != SORT_MODE__BRANCH)
  2140. return -EINVAL;
  2141. if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
  2142. list->sym = 1;
  2143. __sort_dimension__add(sd, list, level);
  2144. return 0;
  2145. }
  2146. for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
  2147. struct sort_dimension *sd = &memory_sort_dimensions[i];
  2148. if (strncasecmp(tok, sd->name, strlen(tok)))
  2149. continue;
  2150. if (sort__mode != SORT_MODE__MEMORY)
  2151. return -EINVAL;
  2152. if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
  2153. return -EINVAL;
  2154. if (sd->entry == &sort_mem_daddr_sym)
  2155. list->sym = 1;
  2156. __sort_dimension__add(sd, list, level);
  2157. return 0;
  2158. }
  2159. if (!add_dynamic_entry(evlist, tok, level))
  2160. return 0;
  2161. return -ESRCH;
  2162. }
  2163. static int setup_sort_list(struct perf_hpp_list *list, char *str,
  2164. struct evlist *evlist)
  2165. {
  2166. char *tmp, *tok;
  2167. int ret = 0;
  2168. int level = 0;
  2169. int next_level = 1;
  2170. bool in_group = false;
  2171. do {
  2172. tok = str;
  2173. tmp = strpbrk(str, "{}, ");
  2174. if (tmp) {
  2175. if (in_group)
  2176. next_level = level;
  2177. else
  2178. next_level = level + 1;
  2179. if (*tmp == '{')
  2180. in_group = true;
  2181. else if (*tmp == '}')
  2182. in_group = false;
  2183. *tmp = '\0';
  2184. str = tmp + 1;
  2185. }
  2186. if (*tok) {
  2187. ret = sort_dimension__add(list, tok, evlist, level);
  2188. if (ret == -EINVAL) {
  2189. if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
  2190. ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
  2191. else
  2192. ui__error("Invalid --sort key: `%s'", tok);
  2193. break;
  2194. } else if (ret == -ESRCH) {
  2195. ui__error("Unknown --sort key: `%s'", tok);
  2196. break;
  2197. }
  2198. }
  2199. level = next_level;
  2200. } while (tmp);
  2201. return ret;
  2202. }
  2203. static const char *get_default_sort_order(struct evlist *evlist)
  2204. {
  2205. const char *default_sort_orders[] = {
  2206. default_sort_order,
  2207. default_branch_sort_order,
  2208. default_mem_sort_order,
  2209. default_top_sort_order,
  2210. default_diff_sort_order,
  2211. default_tracepoint_sort_order,
  2212. };
  2213. bool use_trace = true;
  2214. struct evsel *evsel;
  2215. BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
  2216. if (evlist == NULL || perf_evlist__empty(evlist))
  2217. goto out_no_evlist;
  2218. evlist__for_each_entry(evlist, evsel) {
  2219. if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
  2220. use_trace = false;
  2221. break;
  2222. }
  2223. }
  2224. if (use_trace) {
  2225. sort__mode = SORT_MODE__TRACEPOINT;
  2226. if (symbol_conf.raw_trace)
  2227. return "trace_fields";
  2228. }
  2229. out_no_evlist:
  2230. return default_sort_orders[sort__mode];
  2231. }
  2232. static int setup_sort_order(struct evlist *evlist)
  2233. {
  2234. char *new_sort_order;
  2235. /*
  2236. * Append '+'-prefixed sort order to the default sort
  2237. * order string.
  2238. */
  2239. if (!sort_order || is_strict_order(sort_order))
  2240. return 0;
  2241. if (sort_order[1] == '\0') {
  2242. ui__error("Invalid --sort key: `+'");
  2243. return -EINVAL;
  2244. }
  2245. /*
  2246. * We allocate new sort_order string, but we never free it,
  2247. * because it's checked over the rest of the code.
  2248. */
  2249. if (asprintf(&new_sort_order, "%s,%s",
  2250. get_default_sort_order(evlist), sort_order + 1) < 0) {
  2251. pr_err("Not enough memory to set up --sort");
  2252. return -ENOMEM;
  2253. }
  2254. sort_order = new_sort_order;
  2255. return 0;
  2256. }
  2257. /*
  2258. * Adds 'pre,' prefix into 'str' is 'pre' is
  2259. * not already part of 'str'.
  2260. */
  2261. static char *prefix_if_not_in(const char *pre, char *str)
  2262. {
  2263. char *n;
  2264. if (!str || strstr(str, pre))
  2265. return str;
  2266. if (asprintf(&n, "%s,%s", pre, str) < 0)
  2267. return NULL;
  2268. free(str);
  2269. return n;
  2270. }
  2271. static char *setup_overhead(char *keys)
  2272. {
  2273. if (sort__mode == SORT_MODE__DIFF)
  2274. return keys;
  2275. keys = prefix_if_not_in("overhead", keys);
  2276. if (symbol_conf.cumulate_callchain)
  2277. keys = prefix_if_not_in("overhead_children", keys);
  2278. return keys;
  2279. }
  2280. static int __setup_sorting(struct evlist *evlist)
  2281. {
  2282. char *str;
  2283. const char *sort_keys;
  2284. int ret = 0;
  2285. ret = setup_sort_order(evlist);
  2286. if (ret)
  2287. return ret;
  2288. sort_keys = sort_order;
  2289. if (sort_keys == NULL) {
  2290. if (is_strict_order(field_order)) {
  2291. /*
  2292. * If user specified field order but no sort order,
  2293. * we'll honor it and not add default sort orders.
  2294. */
  2295. return 0;
  2296. }
  2297. sort_keys = get_default_sort_order(evlist);
  2298. }
  2299. str = strdup(sort_keys);
  2300. if (str == NULL) {
  2301. pr_err("Not enough memory to setup sort keys");
  2302. return -ENOMEM;
  2303. }
  2304. /*
  2305. * Prepend overhead fields for backward compatibility.
  2306. */
  2307. if (!is_strict_order(field_order)) {
  2308. str = setup_overhead(str);
  2309. if (str == NULL) {
  2310. pr_err("Not enough memory to setup overhead keys");
  2311. return -ENOMEM;
  2312. }
  2313. }
  2314. ret = setup_sort_list(&perf_hpp_list, str, evlist);
  2315. free(str);
  2316. return ret;
  2317. }
  2318. void perf_hpp__set_elide(int idx, bool elide)
  2319. {
  2320. struct perf_hpp_fmt *fmt;
  2321. struct hpp_sort_entry *hse;
  2322. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2323. if (!perf_hpp__is_sort_entry(fmt))
  2324. continue;
  2325. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  2326. if (hse->se->se_width_idx == idx) {
  2327. fmt->elide = elide;
  2328. break;
  2329. }
  2330. }
  2331. }
  2332. static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
  2333. {
  2334. if (list && strlist__nr_entries(list) == 1) {
  2335. if (fp != NULL)
  2336. fprintf(fp, "# %s: %s\n", list_name,
  2337. strlist__entry(list, 0)->s);
  2338. return true;
  2339. }
  2340. return false;
  2341. }
  2342. static bool get_elide(int idx, FILE *output)
  2343. {
  2344. switch (idx) {
  2345. case HISTC_SYMBOL:
  2346. return __get_elide(symbol_conf.sym_list, "symbol", output);
  2347. case HISTC_DSO:
  2348. return __get_elide(symbol_conf.dso_list, "dso", output);
  2349. case HISTC_COMM:
  2350. return __get_elide(symbol_conf.comm_list, "comm", output);
  2351. default:
  2352. break;
  2353. }
  2354. if (sort__mode != SORT_MODE__BRANCH)
  2355. return false;
  2356. switch (idx) {
  2357. case HISTC_SYMBOL_FROM:
  2358. return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
  2359. case HISTC_SYMBOL_TO:
  2360. return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
  2361. case HISTC_DSO_FROM:
  2362. return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
  2363. case HISTC_DSO_TO:
  2364. return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
  2365. default:
  2366. break;
  2367. }
  2368. return false;
  2369. }
  2370. void sort__setup_elide(FILE *output)
  2371. {
  2372. struct perf_hpp_fmt *fmt;
  2373. struct hpp_sort_entry *hse;
  2374. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2375. if (!perf_hpp__is_sort_entry(fmt))
  2376. continue;
  2377. hse = container_of(fmt, struct hpp_sort_entry, hpp);
  2378. fmt->elide = get_elide(hse->se->se_width_idx, output);
  2379. }
  2380. /*
  2381. * It makes no sense to elide all of sort entries.
  2382. * Just revert them to show up again.
  2383. */
  2384. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2385. if (!perf_hpp__is_sort_entry(fmt))
  2386. continue;
  2387. if (!fmt->elide)
  2388. return;
  2389. }
  2390. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  2391. if (!perf_hpp__is_sort_entry(fmt))
  2392. continue;
  2393. fmt->elide = false;
  2394. }
  2395. }
  2396. int output_field_add(struct perf_hpp_list *list, char *tok)
  2397. {
  2398. unsigned int i;
  2399. for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
  2400. struct sort_dimension *sd = &common_sort_dimensions[i];
  2401. if (strncasecmp(tok, sd->name, strlen(tok)))
  2402. continue;
  2403. return __sort_dimension__add_output(list, sd);
  2404. }
  2405. for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
  2406. struct hpp_dimension *hd = &hpp_sort_dimensions[i];
  2407. if (strncasecmp(tok, hd->name, strlen(tok)))
  2408. continue;
  2409. return __hpp_dimension__add_output(list, hd);
  2410. }
  2411. for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
  2412. struct sort_dimension *sd = &bstack_sort_dimensions[i];
  2413. if (strncasecmp(tok, sd->name, strlen(tok)))
  2414. continue;
  2415. if (sort__mode != SORT_MODE__MEMORY)
  2416. return -EINVAL;
  2417. return __sort_dimension__add_output(list, sd);
  2418. }
  2419. for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
  2420. struct sort_dimension *sd = &memory_sort_dimensions[i];
  2421. if (strncasecmp(tok, sd->name, strlen(tok)))
  2422. continue;
  2423. if (sort__mode != SORT_MODE__BRANCH)
  2424. return -EINVAL;
  2425. return __sort_dimension__add_output(list, sd);
  2426. }
  2427. return -ESRCH;
  2428. }
  2429. static int setup_output_list(struct perf_hpp_list *list, char *str)
  2430. {
  2431. char *tmp, *tok;
  2432. int ret = 0;
  2433. for (tok = strtok_r(str, ", ", &tmp);
  2434. tok; tok = strtok_r(NULL, ", ", &tmp)) {
  2435. ret = output_field_add(list, tok);
  2436. if (ret == -EINVAL) {
  2437. ui__error("Invalid --fields key: `%s'", tok);
  2438. break;
  2439. } else if (ret == -ESRCH) {
  2440. ui__error("Unknown --fields key: `%s'", tok);
  2441. break;
  2442. }
  2443. }
  2444. return ret;
  2445. }
  2446. void reset_dimensions(void)
  2447. {
  2448. unsigned int i;
  2449. for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
  2450. common_sort_dimensions[i].taken = 0;
  2451. for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
  2452. hpp_sort_dimensions[i].taken = 0;
  2453. for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
  2454. bstack_sort_dimensions[i].taken = 0;
  2455. for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
  2456. memory_sort_dimensions[i].taken = 0;
  2457. }
  2458. bool is_strict_order(const char *order)
  2459. {
  2460. return order && (*order != '+');
  2461. }
  2462. static int __setup_output_field(void)
  2463. {
  2464. char *str, *strp;
  2465. int ret = -EINVAL;
  2466. if (field_order == NULL)
  2467. return 0;
  2468. strp = str = strdup(field_order);
  2469. if (str == NULL) {
  2470. pr_err("Not enough memory to setup output fields");
  2471. return -ENOMEM;
  2472. }
  2473. if (!is_strict_order(field_order))
  2474. strp++;
  2475. if (!strlen(strp)) {
  2476. ui__error("Invalid --fields key: `+'");
  2477. goto out;
  2478. }
  2479. ret = setup_output_list(&perf_hpp_list, strp);
  2480. out:
  2481. free(str);
  2482. return ret;
  2483. }
  2484. int setup_sorting(struct evlist *evlist)
  2485. {
  2486. int err;
  2487. err = __setup_sorting(evlist);
  2488. if (err < 0)
  2489. return err;
  2490. if (parent_pattern != default_parent_pattern) {
  2491. err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
  2492. if (err < 0)
  2493. return err;
  2494. }
  2495. reset_dimensions();
  2496. /*
  2497. * perf diff doesn't use default hpp output fields.
  2498. */
  2499. if (sort__mode != SORT_MODE__DIFF)
  2500. perf_hpp__init();
  2501. err = __setup_output_field();
  2502. if (err < 0)
  2503. return err;
  2504. /* copy sort keys to output fields */
  2505. perf_hpp__setup_output_field(&perf_hpp_list);
  2506. /* and then copy output fields to sort keys */
  2507. perf_hpp__append_sort_keys(&perf_hpp_list);
  2508. /* setup hists-specific output fields */
  2509. if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
  2510. return -1;
  2511. return 0;
  2512. }
  2513. void reset_output_field(void)
  2514. {
  2515. perf_hpp_list.need_collapse = 0;
  2516. perf_hpp_list.parent = 0;
  2517. perf_hpp_list.sym = 0;
  2518. perf_hpp_list.dso = 0;
  2519. field_order = NULL;
  2520. sort_order = NULL;
  2521. reset_dimensions();
  2522. perf_hpp__reset_output_field(&perf_hpp_list);
  2523. }
  2524. #define INDENT (3*8 + 1)
  2525. static void add_key(struct strbuf *sb, const char *str, int *llen)
  2526. {
  2527. if (*llen >= 75) {
  2528. strbuf_addstr(sb, "\n\t\t\t ");
  2529. *llen = INDENT;
  2530. }
  2531. strbuf_addf(sb, " %s", str);
  2532. *llen += strlen(str) + 1;
  2533. }
  2534. static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
  2535. int *llen)
  2536. {
  2537. int i;
  2538. for (i = 0; i < n; i++)
  2539. add_key(sb, s[i].name, llen);
  2540. }
  2541. static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
  2542. int *llen)
  2543. {
  2544. int i;
  2545. for (i = 0; i < n; i++)
  2546. add_key(sb, s[i].name, llen);
  2547. }
  2548. const char *sort_help(const char *prefix)
  2549. {
  2550. struct strbuf sb;
  2551. char *s;
  2552. int len = strlen(prefix) + INDENT;
  2553. strbuf_init(&sb, 300);
  2554. strbuf_addstr(&sb, prefix);
  2555. add_hpp_sort_string(&sb, hpp_sort_dimensions,
  2556. ARRAY_SIZE(hpp_sort_dimensions), &len);
  2557. add_sort_string(&sb, common_sort_dimensions,
  2558. ARRAY_SIZE(common_sort_dimensions), &len);
  2559. add_sort_string(&sb, bstack_sort_dimensions,
  2560. ARRAY_SIZE(bstack_sort_dimensions), &len);
  2561. add_sort_string(&sb, memory_sort_dimensions,
  2562. ARRAY_SIZE(memory_sort_dimensions), &len);
  2563. s = strbuf_detach(&sb, NULL);
  2564. strbuf_release(&sb);
  2565. return s;
  2566. }