PageRenderTime 47ms CodeModel.GetById 16ms RepoModel.GetById 1ms app.codeStats 0ms

/kernel/trace/trace_boot.c

https://github.com/kvaneesh/linux
C | 667 lines | 534 code | 88 blank | 45 comment | 132 complexity | 82a1cc9bd2903f58c32a443d749b3a36 MD5 | raw file
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * trace_boot.c
  4. * Tracing kernel boot-time
  5. */
  6. #define pr_fmt(fmt) "trace_boot: " fmt
  7. #include <linux/bootconfig.h>
  8. #include <linux/cpumask.h>
  9. #include <linux/ftrace.h>
  10. #include <linux/init.h>
  11. #include <linux/kernel.h>
  12. #include <linux/mutex.h>
  13. #include <linux/string.h>
  14. #include <linux/slab.h>
  15. #include <linux/trace.h>
  16. #include <linux/trace_events.h>
  17. #include "trace.h"
  18. #define MAX_BUF_LEN 256
  19. static void __init
  20. trace_boot_set_instance_options(struct trace_array *tr, struct xbc_node *node)
  21. {
  22. struct xbc_node *anode;
  23. const char *p;
  24. char buf[MAX_BUF_LEN];
  25. unsigned long v = 0;
  26. /* Common ftrace options */
  27. xbc_node_for_each_array_value(node, "options", anode, p) {
  28. if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
  29. pr_err("String is too long: %s\n", p);
  30. continue;
  31. }
  32. if (trace_set_options(tr, buf) < 0)
  33. pr_err("Failed to set option: %s\n", buf);
  34. }
  35. p = xbc_node_find_value(node, "tracing_on", NULL);
  36. if (p && *p != '\0') {
  37. if (kstrtoul(p, 10, &v))
  38. pr_err("Failed to set tracing on: %s\n", p);
  39. if (v)
  40. tracer_tracing_on(tr);
  41. else
  42. tracer_tracing_off(tr);
  43. }
  44. p = xbc_node_find_value(node, "trace_clock", NULL);
  45. if (p && *p != '\0') {
  46. if (tracing_set_clock(tr, p) < 0)
  47. pr_err("Failed to set trace clock: %s\n", p);
  48. }
  49. p = xbc_node_find_value(node, "buffer_size", NULL);
  50. if (p && *p != '\0') {
  51. v = memparse(p, NULL);
  52. if (v < PAGE_SIZE)
  53. pr_err("Buffer size is too small: %s\n", p);
  54. if (tracing_resize_ring_buffer(tr, v, RING_BUFFER_ALL_CPUS) < 0)
  55. pr_err("Failed to resize trace buffer to %s\n", p);
  56. }
  57. p = xbc_node_find_value(node, "cpumask", NULL);
  58. if (p && *p != '\0') {
  59. cpumask_var_t new_mask;
  60. if (alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
  61. if (cpumask_parse(p, new_mask) < 0 ||
  62. tracing_set_cpumask(tr, new_mask) < 0)
  63. pr_err("Failed to set new CPU mask %s\n", p);
  64. free_cpumask_var(new_mask);
  65. }
  66. }
  67. }
  68. #ifdef CONFIG_EVENT_TRACING
  69. static void __init
  70. trace_boot_enable_events(struct trace_array *tr, struct xbc_node *node)
  71. {
  72. struct xbc_node *anode;
  73. char buf[MAX_BUF_LEN];
  74. const char *p;
  75. xbc_node_for_each_array_value(node, "events", anode, p) {
  76. if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf)) {
  77. pr_err("String is too long: %s\n", p);
  78. continue;
  79. }
  80. if (ftrace_set_clr_event(tr, buf, 1) < 0)
  81. pr_err("Failed to enable event: %s\n", p);
  82. }
  83. }
  84. #ifdef CONFIG_KPROBE_EVENTS
  85. static int __init
  86. trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
  87. {
  88. struct dynevent_cmd cmd;
  89. struct xbc_node *anode;
  90. char buf[MAX_BUF_LEN];
  91. const char *val;
  92. int ret = 0;
  93. xbc_node_for_each_array_value(node, "probes", anode, val) {
  94. kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
  95. ret = kprobe_event_gen_cmd_start(&cmd, event, val);
  96. if (ret) {
  97. pr_err("Failed to generate probe: %s\n", buf);
  98. break;
  99. }
  100. ret = kprobe_event_gen_cmd_end(&cmd);
  101. if (ret) {
  102. pr_err("Failed to add probe: %s\n", buf);
  103. break;
  104. }
  105. }
  106. return ret;
  107. }
  108. #else
  109. static inline int __init
  110. trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
  111. {
  112. pr_err("Kprobe event is not supported.\n");
  113. return -ENOTSUPP;
  114. }
  115. #endif
  116. #ifdef CONFIG_SYNTH_EVENTS
  117. static int __init
  118. trace_boot_add_synth_event(struct xbc_node *node, const char *event)
  119. {
  120. struct dynevent_cmd cmd;
  121. struct xbc_node *anode;
  122. char buf[MAX_BUF_LEN];
  123. const char *p;
  124. int ret;
  125. synth_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
  126. ret = synth_event_gen_cmd_start(&cmd, event, NULL);
  127. if (ret)
  128. return ret;
  129. xbc_node_for_each_array_value(node, "fields", anode, p) {
  130. ret = synth_event_add_field_str(&cmd, p);
  131. if (ret)
  132. return ret;
  133. }
  134. ret = synth_event_gen_cmd_end(&cmd);
  135. if (ret < 0)
  136. pr_err("Failed to add synthetic event: %s\n", buf);
  137. return ret;
  138. }
  139. #else
  140. static inline int __init
  141. trace_boot_add_synth_event(struct xbc_node *node, const char *event)
  142. {
  143. pr_err("Synthetic event is not supported.\n");
  144. return -ENOTSUPP;
  145. }
  146. #endif
  147. #ifdef CONFIG_HIST_TRIGGERS
  148. static int __init __printf(3, 4)
  149. append_printf(char **bufp, char *end, const char *fmt, ...)
  150. {
  151. va_list args;
  152. int ret;
  153. if (*bufp == end)
  154. return -ENOSPC;
  155. va_start(args, fmt);
  156. ret = vsnprintf(*bufp, end - *bufp, fmt, args);
  157. if (ret < end - *bufp) {
  158. *bufp += ret;
  159. } else {
  160. *bufp = end;
  161. ret = -ERANGE;
  162. }
  163. va_end(args);
  164. return ret;
  165. }
  166. static int __init
  167. append_str_nospace(char **bufp, char *end, const char *str)
  168. {
  169. char *p = *bufp;
  170. int len;
  171. while (p < end - 1 && *str != '\0') {
  172. if (!isspace(*str))
  173. *(p++) = *str;
  174. str++;
  175. }
  176. *p = '\0';
  177. if (p == end - 1) {
  178. *bufp = end;
  179. return -ENOSPC;
  180. }
  181. len = p - *bufp;
  182. *bufp = p;
  183. return (int)len;
  184. }
  185. static int __init
  186. trace_boot_hist_add_array(struct xbc_node *hnode, char **bufp,
  187. char *end, const char *key)
  188. {
  189. struct xbc_node *anode;
  190. const char *p;
  191. char sep;
  192. p = xbc_node_find_value(hnode, key, &anode);
  193. if (p) {
  194. if (!anode) {
  195. pr_err("hist.%s requires value(s).\n", key);
  196. return -EINVAL;
  197. }
  198. append_printf(bufp, end, ":%s", key);
  199. sep = '=';
  200. xbc_array_for_each_value(anode, p) {
  201. append_printf(bufp, end, "%c%s", sep, p);
  202. if (sep == '=')
  203. sep = ',';
  204. }
  205. } else
  206. return -ENOENT;
  207. return 0;
  208. }
  209. static int __init
  210. trace_boot_hist_add_one_handler(struct xbc_node *hnode, char **bufp,
  211. char *end, const char *handler,
  212. const char *param)
  213. {
  214. struct xbc_node *knode, *anode;
  215. const char *p;
  216. char sep;
  217. /* Compose 'handler' parameter */
  218. p = xbc_node_find_value(hnode, param, NULL);
  219. if (!p) {
  220. pr_err("hist.%s requires '%s' option.\n",
  221. xbc_node_get_data(hnode), param);
  222. return -EINVAL;
  223. }
  224. append_printf(bufp, end, ":%s(%s)", handler, p);
  225. /* Compose 'action' parameter */
  226. knode = xbc_node_find_subkey(hnode, "trace");
  227. if (!knode)
  228. knode = xbc_node_find_subkey(hnode, "save");
  229. if (knode) {
  230. anode = xbc_node_get_child(knode);
  231. if (!anode || !xbc_node_is_value(anode)) {
  232. pr_err("hist.%s.%s requires value(s).\n",
  233. xbc_node_get_data(hnode),
  234. xbc_node_get_data(knode));
  235. return -EINVAL;
  236. }
  237. append_printf(bufp, end, ".%s", xbc_node_get_data(knode));
  238. sep = '(';
  239. xbc_array_for_each_value(anode, p) {
  240. append_printf(bufp, end, "%c%s", sep, p);
  241. if (sep == '(')
  242. sep = ',';
  243. }
  244. append_printf(bufp, end, ")");
  245. } else if (xbc_node_find_subkey(hnode, "snapshot")) {
  246. append_printf(bufp, end, ".snapshot()");
  247. } else {
  248. pr_err("hist.%s requires an action.\n",
  249. xbc_node_get_data(hnode));
  250. return -EINVAL;
  251. }
  252. return 0;
  253. }
  254. static int __init
  255. trace_boot_hist_add_handlers(struct xbc_node *hnode, char **bufp,
  256. char *end, const char *param)
  257. {
  258. struct xbc_node *node;
  259. const char *p, *handler;
  260. int ret;
  261. handler = xbc_node_get_data(hnode);
  262. xbc_node_for_each_subkey(hnode, node) {
  263. p = xbc_node_get_data(node);
  264. if (!isdigit(p[0]))
  265. continue;
  266. /* All digit started node should be instances. */
  267. ret = trace_boot_hist_add_one_handler(node, bufp, end, handler, param);
  268. if (ret < 0)
  269. break;
  270. }
  271. if (xbc_node_find_subkey(hnode, param))
  272. ret = trace_boot_hist_add_one_handler(hnode, bufp, end, handler, param);
  273. return ret;
  274. }
  275. /*
  276. * Histogram boottime tracing syntax.
  277. *
  278. * ftrace.[instance.INSTANCE.]event.GROUP.EVENT.hist[.N] {
  279. * keys = <KEY>[,...]
  280. * values = <VAL>[,...]
  281. * sort = <SORT-KEY>[,...]
  282. * size = <ENTRIES>
  283. * name = <HISTNAME>
  284. * var { <VAR> = <EXPR> ... }
  285. * pause|continue|clear
  286. * onmax|onchange[.N] { var = <VAR>; <ACTION> [= <PARAM>] }
  287. * onmatch[.N] { event = <EVENT>; <ACTION> [= <PARAM>] }
  288. * filter = <FILTER>
  289. * }
  290. *
  291. * Where <ACTION> are;
  292. *
  293. * trace = <EVENT>, <ARG1>[, ...]
  294. * save = <ARG1>[, ...]
  295. * snapshot
  296. */
  297. static int __init
  298. trace_boot_compose_hist_cmd(struct xbc_node *hnode, char *buf, size_t size)
  299. {
  300. struct xbc_node *node, *knode;
  301. char *end = buf + size;
  302. const char *p;
  303. int ret = 0;
  304. append_printf(&buf, end, "hist");
  305. ret = trace_boot_hist_add_array(hnode, &buf, end, "keys");
  306. if (ret < 0) {
  307. if (ret == -ENOENT)
  308. pr_err("hist requires keys.\n");
  309. return -EINVAL;
  310. }
  311. ret = trace_boot_hist_add_array(hnode, &buf, end, "values");
  312. if (ret == -EINVAL)
  313. return ret;
  314. ret = trace_boot_hist_add_array(hnode, &buf, end, "sort");
  315. if (ret == -EINVAL)
  316. return ret;
  317. p = xbc_node_find_value(hnode, "size", NULL);
  318. if (p)
  319. append_printf(&buf, end, ":size=%s", p);
  320. p = xbc_node_find_value(hnode, "name", NULL);
  321. if (p)
  322. append_printf(&buf, end, ":name=%s", p);
  323. node = xbc_node_find_subkey(hnode, "var");
  324. if (node) {
  325. xbc_node_for_each_key_value(node, knode, p) {
  326. /* Expression must not include spaces. */
  327. append_printf(&buf, end, ":%s=",
  328. xbc_node_get_data(knode));
  329. append_str_nospace(&buf, end, p);
  330. }
  331. }
  332. /* Histogram control attributes (mutual exclusive) */
  333. if (xbc_node_find_value(hnode, "pause", NULL))
  334. append_printf(&buf, end, ":pause");
  335. else if (xbc_node_find_value(hnode, "continue", NULL))
  336. append_printf(&buf, end, ":continue");
  337. else if (xbc_node_find_value(hnode, "clear", NULL))
  338. append_printf(&buf, end, ":clear");
  339. /* Histogram handler and actions */
  340. node = xbc_node_find_subkey(hnode, "onmax");
  341. if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0)
  342. return -EINVAL;
  343. node = xbc_node_find_subkey(hnode, "onchange");
  344. if (node && trace_boot_hist_add_handlers(node, &buf, end, "var") < 0)
  345. return -EINVAL;
  346. node = xbc_node_find_subkey(hnode, "onmatch");
  347. if (node && trace_boot_hist_add_handlers(node, &buf, end, "event") < 0)
  348. return -EINVAL;
  349. p = xbc_node_find_value(hnode, "filter", NULL);
  350. if (p)
  351. append_printf(&buf, end, " if %s", p);
  352. if (buf == end) {
  353. pr_err("hist exceeds the max command length.\n");
  354. return -E2BIG;
  355. }
  356. return 0;
  357. }
  358. static void __init
  359. trace_boot_init_histograms(struct trace_event_file *file,
  360. struct xbc_node *hnode, char *buf, size_t size)
  361. {
  362. struct xbc_node *node;
  363. const char *p;
  364. char *tmp;
  365. xbc_node_for_each_subkey(hnode, node) {
  366. p = xbc_node_get_data(node);
  367. if (!isdigit(p[0]))
  368. continue;
  369. /* All digit started node should be instances. */
  370. if (trace_boot_compose_hist_cmd(node, buf, size) == 0) {
  371. tmp = kstrdup(buf, GFP_KERNEL);
  372. if (trigger_process_regex(file, buf) < 0)
  373. pr_err("Failed to apply hist trigger: %s\n", tmp);
  374. kfree(tmp);
  375. }
  376. }
  377. if (xbc_node_find_subkey(hnode, "keys")) {
  378. if (trace_boot_compose_hist_cmd(hnode, buf, size) == 0) {
  379. tmp = kstrdup(buf, GFP_KERNEL);
  380. if (trigger_process_regex(file, buf) < 0)
  381. pr_err("Failed to apply hist trigger: %s\n", tmp);
  382. kfree(tmp);
  383. }
  384. }
  385. }
  386. #else
  387. static void __init
  388. trace_boot_init_histograms(struct trace_event_file *file,
  389. struct xbc_node *hnode, char *buf, size_t size)
  390. {
  391. /* do nothing */
  392. }
  393. #endif
  394. static void __init
  395. trace_boot_init_one_event(struct trace_array *tr, struct xbc_node *gnode,
  396. struct xbc_node *enode)
  397. {
  398. struct trace_event_file *file;
  399. struct xbc_node *anode;
  400. char buf[MAX_BUF_LEN];
  401. const char *p, *group, *event;
  402. group = xbc_node_get_data(gnode);
  403. event = xbc_node_get_data(enode);
  404. if (!strcmp(group, "kprobes"))
  405. if (trace_boot_add_kprobe_event(enode, event) < 0)
  406. return;
  407. if (!strcmp(group, "synthetic"))
  408. if (trace_boot_add_synth_event(enode, event) < 0)
  409. return;
  410. mutex_lock(&event_mutex);
  411. file = find_event_file(tr, group, event);
  412. if (!file) {
  413. pr_err("Failed to find event: %s:%s\n", group, event);
  414. goto out;
  415. }
  416. p = xbc_node_find_value(enode, "filter", NULL);
  417. if (p && *p != '\0') {
  418. if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
  419. pr_err("filter string is too long: %s\n", p);
  420. else if (apply_event_filter(file, buf) < 0)
  421. pr_err("Failed to apply filter: %s\n", buf);
  422. }
  423. if (IS_ENABLED(CONFIG_HIST_TRIGGERS)) {
  424. xbc_node_for_each_array_value(enode, "actions", anode, p) {
  425. if (strlcpy(buf, p, ARRAY_SIZE(buf)) >= ARRAY_SIZE(buf))
  426. pr_err("action string is too long: %s\n", p);
  427. else if (trigger_process_regex(file, buf) < 0)
  428. pr_err("Failed to apply an action: %s\n", p);
  429. }
  430. anode = xbc_node_find_subkey(enode, "hist");
  431. if (anode)
  432. trace_boot_init_histograms(file, anode, buf, ARRAY_SIZE(buf));
  433. } else if (xbc_node_find_value(enode, "actions", NULL))
  434. pr_err("Failed to apply event actions because CONFIG_HIST_TRIGGERS is not set.\n");
  435. if (xbc_node_find_value(enode, "enable", NULL)) {
  436. if (trace_event_enable_disable(file, 1, 0) < 0)
  437. pr_err("Failed to enable event node: %s:%s\n",
  438. group, event);
  439. }
  440. out:
  441. mutex_unlock(&event_mutex);
  442. }
  443. static void __init
  444. trace_boot_init_events(struct trace_array *tr, struct xbc_node *node)
  445. {
  446. struct xbc_node *gnode, *enode;
  447. bool enable, enable_all = false;
  448. const char *data;
  449. node = xbc_node_find_subkey(node, "event");
  450. if (!node)
  451. return;
  452. /* per-event key starts with "event.GROUP.EVENT" */
  453. xbc_node_for_each_subkey(node, gnode) {
  454. data = xbc_node_get_data(gnode);
  455. if (!strcmp(data, "enable")) {
  456. enable_all = true;
  457. continue;
  458. }
  459. enable = false;
  460. xbc_node_for_each_subkey(gnode, enode) {
  461. data = xbc_node_get_data(enode);
  462. if (!strcmp(data, "enable")) {
  463. enable = true;
  464. continue;
  465. }
  466. trace_boot_init_one_event(tr, gnode, enode);
  467. }
  468. /* Event enablement must be done after event settings */
  469. if (enable) {
  470. data = xbc_node_get_data(gnode);
  471. trace_array_set_clr_event(tr, data, NULL, true);
  472. }
  473. }
  474. /* Ditto */
  475. if (enable_all)
  476. trace_array_set_clr_event(tr, NULL, NULL, true);
  477. }
  478. #else
  479. #define trace_boot_enable_events(tr, node) do {} while (0)
  480. #define trace_boot_init_events(tr, node) do {} while (0)
  481. #endif
  482. #ifdef CONFIG_DYNAMIC_FTRACE
  483. static void __init
  484. trace_boot_set_ftrace_filter(struct trace_array *tr, struct xbc_node *node)
  485. {
  486. struct xbc_node *anode;
  487. const char *p;
  488. char *q;
  489. xbc_node_for_each_array_value(node, "ftrace.filters", anode, p) {
  490. q = kstrdup(p, GFP_KERNEL);
  491. if (!q)
  492. return;
  493. if (ftrace_set_filter(tr->ops, q, strlen(q), 0) < 0)
  494. pr_err("Failed to add %s to ftrace filter\n", p);
  495. else
  496. ftrace_filter_param = true;
  497. kfree(q);
  498. }
  499. xbc_node_for_each_array_value(node, "ftrace.notraces", anode, p) {
  500. q = kstrdup(p, GFP_KERNEL);
  501. if (!q)
  502. return;
  503. if (ftrace_set_notrace(tr->ops, q, strlen(q), 0) < 0)
  504. pr_err("Failed to add %s to ftrace filter\n", p);
  505. else
  506. ftrace_filter_param = true;
  507. kfree(q);
  508. }
  509. }
  510. #else
  511. #define trace_boot_set_ftrace_filter(tr, node) do {} while (0)
  512. #endif
  513. static void __init
  514. trace_boot_enable_tracer(struct trace_array *tr, struct xbc_node *node)
  515. {
  516. const char *p;
  517. trace_boot_set_ftrace_filter(tr, node);
  518. p = xbc_node_find_value(node, "tracer", NULL);
  519. if (p && *p != '\0') {
  520. if (tracing_set_tracer(tr, p) < 0)
  521. pr_err("Failed to set given tracer: %s\n", p);
  522. }
  523. /* Since tracer can free snapshot buffer, allocate snapshot here.*/
  524. if (xbc_node_find_value(node, "alloc_snapshot", NULL)) {
  525. if (tracing_alloc_snapshot_instance(tr) < 0)
  526. pr_err("Failed to allocate snapshot buffer\n");
  527. }
  528. }
  529. static void __init
  530. trace_boot_init_one_instance(struct trace_array *tr, struct xbc_node *node)
  531. {
  532. trace_boot_set_instance_options(tr, node);
  533. trace_boot_init_events(tr, node);
  534. trace_boot_enable_events(tr, node);
  535. trace_boot_enable_tracer(tr, node);
  536. }
  537. static void __init
  538. trace_boot_init_instances(struct xbc_node *node)
  539. {
  540. struct xbc_node *inode;
  541. struct trace_array *tr;
  542. const char *p;
  543. node = xbc_node_find_subkey(node, "instance");
  544. if (!node)
  545. return;
  546. xbc_node_for_each_subkey(node, inode) {
  547. p = xbc_node_get_data(inode);
  548. if (!p || *p == '\0')
  549. continue;
  550. tr = trace_array_get_by_name(p);
  551. if (!tr) {
  552. pr_err("Failed to get trace instance %s\n", p);
  553. continue;
  554. }
  555. trace_boot_init_one_instance(tr, inode);
  556. trace_array_put(tr);
  557. }
  558. }
  559. static int __init trace_boot_init(void)
  560. {
  561. struct xbc_node *trace_node;
  562. struct trace_array *tr;
  563. trace_node = xbc_find_node("ftrace");
  564. if (!trace_node)
  565. return 0;
  566. tr = top_trace_array();
  567. if (!tr)
  568. return 0;
  569. /* Global trace array is also one instance */
  570. trace_boot_init_one_instance(tr, trace_node);
  571. trace_boot_init_instances(trace_node);
  572. disable_tracing_selftest("running boot-time tracing");
  573. return 0;
  574. }
  575. /*
  576. * Start tracing at the end of core-initcall, so that it starts tracing
  577. * from the beginning of postcore_initcall.
  578. */
  579. core_initcall_sync(trace_boot_init);