/kern_2.6.32/tools/perf/util/parse-events.c

http://omnia2droid.googlecode.com/ · C · 846 lines · 666 code · 148 blank · 32 comment · 148 complexity · a5300d2c9158ca1a6ede4191aafbaac7 MD5 · raw file

  1. #include "util.h"
  2. #include "../perf.h"
  3. #include "parse-options.h"
  4. #include "parse-events.h"
  5. #include "exec_cmd.h"
  6. #include "string.h"
  7. #include "cache.h"
  8. #include "header.h"
  9. int nr_counters;
  10. struct perf_event_attr attrs[MAX_COUNTERS];
  11. struct event_symbol {
  12. u8 type;
  13. u64 config;
  14. const char *symbol;
  15. const char *alias;
  16. };
  17. enum event_result {
  18. EVT_FAILED,
  19. EVT_HANDLED,
  20. EVT_HANDLED_ALL
  21. };
  22. char debugfs_path[MAXPATHLEN];
  23. #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
  24. #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
  25. static struct event_symbol event_symbols[] = {
  26. { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
  27. { CHW(INSTRUCTIONS), "instructions", "" },
  28. { CHW(CACHE_REFERENCES), "cache-references", "" },
  29. { CHW(CACHE_MISSES), "cache-misses", "" },
  30. { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
  31. { CHW(BRANCH_MISSES), "branch-misses", "" },
  32. { CHW(BUS_CYCLES), "bus-cycles", "" },
  33. { CSW(CPU_CLOCK), "cpu-clock", "" },
  34. { CSW(TASK_CLOCK), "task-clock", "" },
  35. { CSW(PAGE_FAULTS), "page-faults", "faults" },
  36. { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
  37. { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
  38. { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
  39. { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
  40. };
  41. #define __PERF_EVENT_FIELD(config, name) \
  42. ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
  43. #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
  44. #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
  45. #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
  46. #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
  47. static const char *hw_event_names[] = {
  48. "cycles",
  49. "instructions",
  50. "cache-references",
  51. "cache-misses",
  52. "branches",
  53. "branch-misses",
  54. "bus-cycles",
  55. };
  56. static const char *sw_event_names[] = {
  57. "cpu-clock-msecs",
  58. "task-clock-msecs",
  59. "page-faults",
  60. "context-switches",
  61. "CPU-migrations",
  62. "minor-faults",
  63. "major-faults",
  64. };
  65. #define MAX_ALIASES 8
  66. static const char *hw_cache[][MAX_ALIASES] = {
  67. { "L1-dcache", "l1-d", "l1d", "L1-data", },
  68. { "L1-icache", "l1-i", "l1i", "L1-instruction", },
  69. { "LLC", "L2" },
  70. { "dTLB", "d-tlb", "Data-TLB", },
  71. { "iTLB", "i-tlb", "Instruction-TLB", },
  72. { "branch", "branches", "bpu", "btb", "bpc", },
  73. };
  74. static const char *hw_cache_op[][MAX_ALIASES] = {
  75. { "load", "loads", "read", },
  76. { "store", "stores", "write", },
  77. { "prefetch", "prefetches", "speculative-read", "speculative-load", },
  78. };
  79. static const char *hw_cache_result[][MAX_ALIASES] = {
  80. { "refs", "Reference", "ops", "access", },
  81. { "misses", "miss", },
  82. };
  83. #define C(x) PERF_COUNT_HW_CACHE_##x
  84. #define CACHE_READ (1 << C(OP_READ))
  85. #define CACHE_WRITE (1 << C(OP_WRITE))
  86. #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
  87. #define COP(x) (1 << x)
  88. /*
  89. * cache operartion stat
  90. * L1I : Read and prefetch only
  91. * ITLB and BPU : Read-only
  92. */
  93. static unsigned long hw_cache_stat[C(MAX)] = {
  94. [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  95. [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
  96. [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  97. [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  98. [C(ITLB)] = (CACHE_READ),
  99. [C(BPU)] = (CACHE_READ),
  100. };
  101. #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
  102. while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
  103. if (sys_dirent.d_type == DT_DIR && \
  104. (strcmp(sys_dirent.d_name, ".")) && \
  105. (strcmp(sys_dirent.d_name, "..")))
  106. static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
  107. {
  108. char evt_path[MAXPATHLEN];
  109. int fd;
  110. snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
  111. sys_dir->d_name, evt_dir->d_name);
  112. fd = open(evt_path, O_RDONLY);
  113. if (fd < 0)
  114. return -EINVAL;
  115. close(fd);
  116. return 0;
  117. }
  118. #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
  119. while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
  120. if (evt_dirent.d_type == DT_DIR && \
  121. (strcmp(evt_dirent.d_name, ".")) && \
  122. (strcmp(evt_dirent.d_name, "..")) && \
  123. (!tp_event_has_id(&sys_dirent, &evt_dirent)))
  124. #define MAX_EVENT_LENGTH 512
  125. int valid_debugfs_mount(const char *debugfs)
  126. {
  127. struct statfs st_fs;
  128. if (statfs(debugfs, &st_fs) < 0)
  129. return -ENOENT;
  130. else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
  131. return -ENOENT;
  132. return 0;
  133. }
  134. struct tracepoint_path *tracepoint_id_to_path(u64 config)
  135. {
  136. struct tracepoint_path *path = NULL;
  137. DIR *sys_dir, *evt_dir;
  138. struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
  139. char id_buf[4];
  140. int fd;
  141. u64 id;
  142. char evt_path[MAXPATHLEN];
  143. char dir_path[MAXPATHLEN];
  144. if (valid_debugfs_mount(debugfs_path))
  145. return NULL;
  146. sys_dir = opendir(debugfs_path);
  147. if (!sys_dir)
  148. return NULL;
  149. for_each_subsystem(sys_dir, sys_dirent, sys_next) {
  150. snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
  151. sys_dirent.d_name);
  152. evt_dir = opendir(dir_path);
  153. if (!evt_dir)
  154. continue;
  155. for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
  156. snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
  157. evt_dirent.d_name);
  158. fd = open(evt_path, O_RDONLY);
  159. if (fd < 0)
  160. continue;
  161. if (read(fd, id_buf, sizeof(id_buf)) < 0) {
  162. close(fd);
  163. continue;
  164. }
  165. close(fd);
  166. id = atoll(id_buf);
  167. if (id == config) {
  168. closedir(evt_dir);
  169. closedir(sys_dir);
  170. path = calloc(1, sizeof(path));
  171. path->system = malloc(MAX_EVENT_LENGTH);
  172. if (!path->system) {
  173. free(path);
  174. return NULL;
  175. }
  176. path->name = malloc(MAX_EVENT_LENGTH);
  177. if (!path->name) {
  178. free(path->system);
  179. free(path);
  180. return NULL;
  181. }
  182. strncpy(path->system, sys_dirent.d_name,
  183. MAX_EVENT_LENGTH);
  184. strncpy(path->name, evt_dirent.d_name,
  185. MAX_EVENT_LENGTH);
  186. return path;
  187. }
  188. }
  189. closedir(evt_dir);
  190. }
  191. closedir(sys_dir);
  192. return NULL;
  193. }
  194. #define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
  195. static const char *tracepoint_id_to_name(u64 config)
  196. {
  197. static char buf[TP_PATH_LEN];
  198. struct tracepoint_path *path;
  199. path = tracepoint_id_to_path(config);
  200. if (path) {
  201. snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
  202. free(path->name);
  203. free(path->system);
  204. free(path);
  205. } else
  206. snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
  207. return buf;
  208. }
  209. static int is_cache_op_valid(u8 cache_type, u8 cache_op)
  210. {
  211. if (hw_cache_stat[cache_type] & COP(cache_op))
  212. return 1; /* valid */
  213. else
  214. return 0; /* invalid */
  215. }
  216. static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
  217. {
  218. static char name[50];
  219. if (cache_result) {
  220. sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
  221. hw_cache_op[cache_op][0],
  222. hw_cache_result[cache_result][0]);
  223. } else {
  224. sprintf(name, "%s-%s", hw_cache[cache_type][0],
  225. hw_cache_op[cache_op][1]);
  226. }
  227. return name;
  228. }
  229. const char *event_name(int counter)
  230. {
  231. u64 config = attrs[counter].config;
  232. int type = attrs[counter].type;
  233. return __event_name(type, config);
  234. }
  235. const char *__event_name(int type, u64 config)
  236. {
  237. static char buf[32];
  238. if (type == PERF_TYPE_RAW) {
  239. sprintf(buf, "raw 0x%llx", config);
  240. return buf;
  241. }
  242. switch (type) {
  243. case PERF_TYPE_HARDWARE:
  244. if (config < PERF_COUNT_HW_MAX)
  245. return hw_event_names[config];
  246. return "unknown-hardware";
  247. case PERF_TYPE_HW_CACHE: {
  248. u8 cache_type, cache_op, cache_result;
  249. cache_type = (config >> 0) & 0xff;
  250. if (cache_type > PERF_COUNT_HW_CACHE_MAX)
  251. return "unknown-ext-hardware-cache-type";
  252. cache_op = (config >> 8) & 0xff;
  253. if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
  254. return "unknown-ext-hardware-cache-op";
  255. cache_result = (config >> 16) & 0xff;
  256. if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
  257. return "unknown-ext-hardware-cache-result";
  258. if (!is_cache_op_valid(cache_type, cache_op))
  259. return "invalid-cache";
  260. return event_cache_name(cache_type, cache_op, cache_result);
  261. }
  262. case PERF_TYPE_SOFTWARE:
  263. if (config < PERF_COUNT_SW_MAX)
  264. return sw_event_names[config];
  265. return "unknown-software";
  266. case PERF_TYPE_TRACEPOINT:
  267. return tracepoint_id_to_name(config);
  268. default:
  269. break;
  270. }
  271. return "unknown";
  272. }
  273. static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size)
  274. {
  275. int i, j;
  276. int n, longest = -1;
  277. for (i = 0; i < size; i++) {
  278. for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
  279. n = strlen(names[i][j]);
  280. if (n > longest && !strncasecmp(*str, names[i][j], n))
  281. longest = n;
  282. }
  283. if (longest > 0) {
  284. *str += longest;
  285. return i;
  286. }
  287. }
  288. return -1;
  289. }
  290. static enum event_result
  291. parse_generic_hw_event(const char **str, struct perf_event_attr *attr)
  292. {
  293. const char *s = *str;
  294. int cache_type = -1, cache_op = -1, cache_result = -1;
  295. cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
  296. /*
  297. * No fallback - if we cannot get a clear cache type
  298. * then bail out:
  299. */
  300. if (cache_type == -1)
  301. return EVT_FAILED;
  302. while ((cache_op == -1 || cache_result == -1) && *s == '-') {
  303. ++s;
  304. if (cache_op == -1) {
  305. cache_op = parse_aliases(&s, hw_cache_op,
  306. PERF_COUNT_HW_CACHE_OP_MAX);
  307. if (cache_op >= 0) {
  308. if (!is_cache_op_valid(cache_type, cache_op))
  309. return 0;
  310. continue;
  311. }
  312. }
  313. if (cache_result == -1) {
  314. cache_result = parse_aliases(&s, hw_cache_result,
  315. PERF_COUNT_HW_CACHE_RESULT_MAX);
  316. if (cache_result >= 0)
  317. continue;
  318. }
  319. /*
  320. * Can't parse this as a cache op or result, so back up
  321. * to the '-'.
  322. */
  323. --s;
  324. break;
  325. }
  326. /*
  327. * Fall back to reads:
  328. */
  329. if (cache_op == -1)
  330. cache_op = PERF_COUNT_HW_CACHE_OP_READ;
  331. /*
  332. * Fall back to accesses:
  333. */
  334. if (cache_result == -1)
  335. cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
  336. attr->config = cache_type | (cache_op << 8) | (cache_result << 16);
  337. attr->type = PERF_TYPE_HW_CACHE;
  338. *str = s;
  339. return EVT_HANDLED;
  340. }
  341. static enum event_result
  342. parse_single_tracepoint_event(char *sys_name,
  343. const char *evt_name,
  344. unsigned int evt_length,
  345. char *flags,
  346. struct perf_event_attr *attr,
  347. const char **strp)
  348. {
  349. char evt_path[MAXPATHLEN];
  350. char id_buf[4];
  351. u64 id;
  352. int fd;
  353. if (flags) {
  354. if (!strncmp(flags, "record", strlen(flags))) {
  355. attr->sample_type |= PERF_SAMPLE_RAW;
  356. attr->sample_type |= PERF_SAMPLE_TIME;
  357. attr->sample_type |= PERF_SAMPLE_CPU;
  358. }
  359. }
  360. snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
  361. sys_name, evt_name);
  362. fd = open(evt_path, O_RDONLY);
  363. if (fd < 0)
  364. return EVT_FAILED;
  365. if (read(fd, id_buf, sizeof(id_buf)) < 0) {
  366. close(fd);
  367. return EVT_FAILED;
  368. }
  369. close(fd);
  370. id = atoll(id_buf);
  371. attr->config = id;
  372. attr->type = PERF_TYPE_TRACEPOINT;
  373. *strp = evt_name + evt_length;
  374. return EVT_HANDLED;
  375. }
  376. /* sys + ':' + event + ':' + flags*/
  377. #define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
  378. static enum event_result
  379. parse_subsystem_tracepoint_event(char *sys_name, char *flags)
  380. {
  381. char evt_path[MAXPATHLEN];
  382. struct dirent *evt_ent;
  383. DIR *evt_dir;
  384. snprintf(evt_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_name);
  385. evt_dir = opendir(evt_path);
  386. if (!evt_dir) {
  387. perror("Can't open event dir");
  388. return EVT_FAILED;
  389. }
  390. while ((evt_ent = readdir(evt_dir))) {
  391. char event_opt[MAX_EVOPT_LEN + 1];
  392. int len;
  393. unsigned int rem = MAX_EVOPT_LEN;
  394. if (!strcmp(evt_ent->d_name, ".")
  395. || !strcmp(evt_ent->d_name, "..")
  396. || !strcmp(evt_ent->d_name, "enable")
  397. || !strcmp(evt_ent->d_name, "filter"))
  398. continue;
  399. len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s", sys_name,
  400. evt_ent->d_name);
  401. if (len < 0)
  402. return EVT_FAILED;
  403. rem -= len;
  404. if (flags) {
  405. if (rem < strlen(flags) + 1)
  406. return EVT_FAILED;
  407. strcat(event_opt, ":");
  408. strcat(event_opt, flags);
  409. }
  410. if (parse_events(NULL, event_opt, 0))
  411. return EVT_FAILED;
  412. }
  413. return EVT_HANDLED_ALL;
  414. }
  415. static enum event_result parse_tracepoint_event(const char **strp,
  416. struct perf_event_attr *attr)
  417. {
  418. const char *evt_name;
  419. char *flags;
  420. char sys_name[MAX_EVENT_LENGTH];
  421. unsigned int sys_length, evt_length;
  422. if (valid_debugfs_mount(debugfs_path))
  423. return 0;
  424. evt_name = strchr(*strp, ':');
  425. if (!evt_name)
  426. return EVT_FAILED;
  427. sys_length = evt_name - *strp;
  428. if (sys_length >= MAX_EVENT_LENGTH)
  429. return 0;
  430. strncpy(sys_name, *strp, sys_length);
  431. sys_name[sys_length] = '\0';
  432. evt_name = evt_name + 1;
  433. flags = strchr(evt_name, ':');
  434. if (flags) {
  435. /* split it out: */
  436. evt_name = strndup(evt_name, flags - evt_name);
  437. flags++;
  438. }
  439. evt_length = strlen(evt_name);
  440. if (evt_length >= MAX_EVENT_LENGTH)
  441. return EVT_FAILED;
  442. if (!strcmp(evt_name, "*")) {
  443. *strp = evt_name + evt_length;
  444. return parse_subsystem_tracepoint_event(sys_name, flags);
  445. } else
  446. return parse_single_tracepoint_event(sys_name, evt_name,
  447. evt_length, flags,
  448. attr, strp);
  449. }
  450. static int check_events(const char *str, unsigned int i)
  451. {
  452. int n;
  453. n = strlen(event_symbols[i].symbol);
  454. if (!strncmp(str, event_symbols[i].symbol, n))
  455. return n;
  456. n = strlen(event_symbols[i].alias);
  457. if (n)
  458. if (!strncmp(str, event_symbols[i].alias, n))
  459. return n;
  460. return 0;
  461. }
  462. static enum event_result
  463. parse_symbolic_event(const char **strp, struct perf_event_attr *attr)
  464. {
  465. const char *str = *strp;
  466. unsigned int i;
  467. int n;
  468. for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
  469. n = check_events(str, i);
  470. if (n > 0) {
  471. attr->type = event_symbols[i].type;
  472. attr->config = event_symbols[i].config;
  473. *strp = str + n;
  474. return EVT_HANDLED;
  475. }
  476. }
  477. return EVT_FAILED;
  478. }
  479. static enum event_result
  480. parse_raw_event(const char **strp, struct perf_event_attr *attr)
  481. {
  482. const char *str = *strp;
  483. u64 config;
  484. int n;
  485. if (*str != 'r')
  486. return EVT_FAILED;
  487. n = hex2u64(str + 1, &config);
  488. if (n > 0) {
  489. *strp = str + n + 1;
  490. attr->type = PERF_TYPE_RAW;
  491. attr->config = config;
  492. return EVT_HANDLED;
  493. }
  494. return EVT_FAILED;
  495. }
  496. static enum event_result
  497. parse_numeric_event(const char **strp, struct perf_event_attr *attr)
  498. {
  499. const char *str = *strp;
  500. char *endp;
  501. unsigned long type;
  502. u64 config;
  503. type = strtoul(str, &endp, 0);
  504. if (endp > str && type < PERF_TYPE_MAX && *endp == ':') {
  505. str = endp + 1;
  506. config = strtoul(str, &endp, 0);
  507. if (endp > str) {
  508. attr->type = type;
  509. attr->config = config;
  510. *strp = endp;
  511. return EVT_HANDLED;
  512. }
  513. }
  514. return EVT_FAILED;
  515. }
  516. static enum event_result
  517. parse_event_modifier(const char **strp, struct perf_event_attr *attr)
  518. {
  519. const char *str = *strp;
  520. int eu = 1, ek = 1, eh = 1;
  521. if (*str++ != ':')
  522. return 0;
  523. while (*str) {
  524. if (*str == 'u')
  525. eu = 0;
  526. else if (*str == 'k')
  527. ek = 0;
  528. else if (*str == 'h')
  529. eh = 0;
  530. else
  531. break;
  532. ++str;
  533. }
  534. if (str >= *strp + 2) {
  535. *strp = str;
  536. attr->exclude_user = eu;
  537. attr->exclude_kernel = ek;
  538. attr->exclude_hv = eh;
  539. return 1;
  540. }
  541. return 0;
  542. }
  543. /*
  544. * Each event can have multiple symbolic names.
  545. * Symbolic names are (almost) exactly matched.
  546. */
  547. static enum event_result
  548. parse_event_symbols(const char **str, struct perf_event_attr *attr)
  549. {
  550. enum event_result ret;
  551. ret = parse_tracepoint_event(str, attr);
  552. if (ret != EVT_FAILED)
  553. goto modifier;
  554. ret = parse_raw_event(str, attr);
  555. if (ret != EVT_FAILED)
  556. goto modifier;
  557. ret = parse_numeric_event(str, attr);
  558. if (ret != EVT_FAILED)
  559. goto modifier;
  560. ret = parse_symbolic_event(str, attr);
  561. if (ret != EVT_FAILED)
  562. goto modifier;
  563. ret = parse_generic_hw_event(str, attr);
  564. if (ret != EVT_FAILED)
  565. goto modifier;
  566. return EVT_FAILED;
  567. modifier:
  568. parse_event_modifier(str, attr);
  569. return ret;
  570. }
  571. static void store_event_type(const char *orgname)
  572. {
  573. char filename[PATH_MAX], *c;
  574. FILE *file;
  575. int id;
  576. sprintf(filename, "%s/", debugfs_path);
  577. strncat(filename, orgname, strlen(orgname));
  578. strcat(filename, "/id");
  579. c = strchr(filename, ':');
  580. if (c)
  581. *c = '/';
  582. file = fopen(filename, "r");
  583. if (!file)
  584. return;
  585. if (fscanf(file, "%i", &id) < 1)
  586. die("cannot store event ID");
  587. fclose(file);
  588. perf_header__push_event(id, orgname);
  589. }
  590. int parse_events(const struct option *opt __used, const char *str, int unset __used)
  591. {
  592. struct perf_event_attr attr;
  593. enum event_result ret;
  594. if (strchr(str, ':'))
  595. store_event_type(str);
  596. for (;;) {
  597. if (nr_counters == MAX_COUNTERS)
  598. return -1;
  599. memset(&attr, 0, sizeof(attr));
  600. ret = parse_event_symbols(&str, &attr);
  601. if (ret == EVT_FAILED)
  602. return -1;
  603. if (!(*str == 0 || *str == ',' || isspace(*str)))
  604. return -1;
  605. if (ret != EVT_HANDLED_ALL) {
  606. attrs[nr_counters] = attr;
  607. nr_counters++;
  608. }
  609. if (*str == 0)
  610. break;
  611. if (*str == ',')
  612. ++str;
  613. while (isspace(*str))
  614. ++str;
  615. }
  616. return 0;
  617. }
  618. static const char * const event_type_descriptors[] = {
  619. "",
  620. "Hardware event",
  621. "Software event",
  622. "Tracepoint event",
  623. "Hardware cache event",
  624. };
  625. /*
  626. * Print the events from <debugfs_mount_point>/tracing/events
  627. */
  628. static void print_tracepoint_events(void)
  629. {
  630. DIR *sys_dir, *evt_dir;
  631. struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
  632. char evt_path[MAXPATHLEN];
  633. char dir_path[MAXPATHLEN];
  634. if (valid_debugfs_mount(debugfs_path))
  635. return;
  636. sys_dir = opendir(debugfs_path);
  637. if (!sys_dir)
  638. return;
  639. for_each_subsystem(sys_dir, sys_dirent, sys_next) {
  640. snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
  641. sys_dirent.d_name);
  642. evt_dir = opendir(dir_path);
  643. if (!evt_dir)
  644. continue;
  645. for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
  646. snprintf(evt_path, MAXPATHLEN, "%s:%s",
  647. sys_dirent.d_name, evt_dirent.d_name);
  648. fprintf(stderr, " %-42s [%s]\n", evt_path,
  649. event_type_descriptors[PERF_TYPE_TRACEPOINT+1]);
  650. }
  651. closedir(evt_dir);
  652. }
  653. closedir(sys_dir);
  654. }
  655. /*
  656. * Print the help text for the event symbols:
  657. */
  658. void print_events(void)
  659. {
  660. struct event_symbol *syms = event_symbols;
  661. unsigned int i, type, op, prev_type = -1;
  662. char name[40];
  663. fprintf(stderr, "\n");
  664. fprintf(stderr, "List of pre-defined events (to be used in -e):\n");
  665. for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
  666. type = syms->type + 1;
  667. if (type >= ARRAY_SIZE(event_type_descriptors))
  668. type = 0;
  669. if (type != prev_type)
  670. fprintf(stderr, "\n");
  671. if (strlen(syms->alias))
  672. sprintf(name, "%s OR %s", syms->symbol, syms->alias);
  673. else
  674. strcpy(name, syms->symbol);
  675. fprintf(stderr, " %-42s [%s]\n", name,
  676. event_type_descriptors[type]);
  677. prev_type = type;
  678. }
  679. fprintf(stderr, "\n");
  680. for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
  681. for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
  682. /* skip invalid cache type */
  683. if (!is_cache_op_valid(type, op))
  684. continue;
  685. for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
  686. fprintf(stderr, " %-42s [%s]\n",
  687. event_cache_name(type, op, i),
  688. event_type_descriptors[4]);
  689. }
  690. }
  691. }
  692. fprintf(stderr, "\n");
  693. fprintf(stderr, " %-42s [raw hardware event descriptor]\n",
  694. "rNNN");
  695. fprintf(stderr, "\n");
  696. print_tracepoint_events();
  697. exit(129);
  698. }