PageRenderTime 53ms CodeModel.GetById 24ms RepoModel.GetById 1ms app.codeStats 0ms

/include/linux/ftrace.h

https://bitbucket.org/droidzone/supernova-kernel
C Header | 528 lines | 335 code | 82 blank | 111 comment | 0 complexity | 890331a84210a1b6c85be0279a2aea1a MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. #ifndef _LINUX_FTRACE_H
  2. #define _LINUX_FTRACE_H
  3. #include <linux/trace_clock.h>
  4. #include <linux/kallsyms.h>
  5. #include <linux/linkage.h>
  6. #include <linux/bitops.h>
  7. #include <linux/module.h>
  8. #include <linux/ktime.h>
  9. #include <linux/sched.h>
  10. #include <linux/types.h>
  11. #include <linux/init.h>
  12. #include <linux/fs.h>
  13. #include <asm/ftrace.h>
  14. #ifdef CONFIG_FUNCTION_TRACER
  15. extern int ftrace_enabled;
  16. extern int
  17. ftrace_enable_sysctl(struct ctl_table *table, int write,
  18. void __user *buffer, size_t *lenp,
  19. loff_t *ppos);
  20. typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
  21. struct ftrace_ops {
  22. ftrace_func_t func;
  23. struct ftrace_ops *next;
  24. };
  25. extern int function_trace_stop;
  26. /*
  27. * Type of the current tracing.
  28. */
  29. enum ftrace_tracing_type_t {
  30. FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
  31. FTRACE_TYPE_RETURN, /* Hook the return of the function */
  32. };
  33. /* Current tracing type, default is FTRACE_TYPE_ENTER */
  34. extern enum ftrace_tracing_type_t ftrace_tracing_type;
  35. /**
  36. * ftrace_stop - stop function tracer.
  37. *
  38. * A quick way to stop the function tracer. Note this an on off switch,
  39. * it is not something that is recursive like preempt_disable.
  40. * This does not disable the calling of mcount, it only stops the
  41. * calling of functions from mcount.
  42. */
  43. static inline void ftrace_stop(void)
  44. {
  45. function_trace_stop = 1;
  46. }
  47. /**
  48. * ftrace_start - start the function tracer.
  49. *
  50. * This function is the inverse of ftrace_stop. This does not enable
  51. * the function tracing if the function tracer is disabled. This only
  52. * sets the function tracer flag to continue calling the functions
  53. * from mcount.
  54. */
  55. static inline void ftrace_start(void)
  56. {
  57. function_trace_stop = 0;
  58. }
  59. /*
  60. * The ftrace_ops must be a static and should also
  61. * be read_mostly. These functions do modify read_mostly variables
  62. * so use them sparely. Never free an ftrace_op or modify the
  63. * next pointer after it has been registered. Even after unregistering
  64. * it, the next pointer may still be used internally.
  65. */
  66. int register_ftrace_function(struct ftrace_ops *ops);
  67. int unregister_ftrace_function(struct ftrace_ops *ops);
  68. void clear_ftrace_function(void);
  69. extern void ftrace_stub(unsigned long a0, unsigned long a1);
  70. #else /* !CONFIG_FUNCTION_TRACER */
  71. /*
  72. * (un)register_ftrace_function must be a macro since the ops parameter
  73. * must not be evaluated.
  74. */
  75. #define register_ftrace_function(ops) ({ 0; })
  76. #define unregister_ftrace_function(ops) ({ 0; })
  77. static inline void clear_ftrace_function(void) { }
  78. static inline void ftrace_kill(void) { }
  79. static inline void ftrace_stop(void) { }
  80. static inline void ftrace_start(void) { }
  81. #endif /* CONFIG_FUNCTION_TRACER */
  82. #ifdef CONFIG_STACK_TRACER
  83. extern int stack_tracer_enabled;
  84. int
  85. stack_trace_sysctl(struct ctl_table *table, int write,
  86. void __user *buffer, size_t *lenp,
  87. loff_t *ppos);
  88. #endif
  89. struct ftrace_func_command {
  90. struct list_head list;
  91. char *name;
  92. int (*func)(char *func, char *cmd,
  93. char *params, int enable);
  94. };
  95. #ifdef CONFIG_DYNAMIC_FTRACE
  96. int ftrace_arch_code_modify_prepare(void);
  97. int ftrace_arch_code_modify_post_process(void);
  98. struct seq_file;
  99. struct ftrace_probe_ops {
  100. void (*func)(unsigned long ip,
  101. unsigned long parent_ip,
  102. void **data);
  103. int (*callback)(unsigned long ip, void **data);
  104. void (*free)(void **data);
  105. int (*print)(struct seq_file *m,
  106. unsigned long ip,
  107. struct ftrace_probe_ops *ops,
  108. void *data);
  109. };
  110. extern int
  111. register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
  112. void *data);
  113. extern void
  114. unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
  115. void *data);
  116. extern void
  117. unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
  118. extern void unregister_ftrace_function_probe_all(char *glob);
  119. extern int ftrace_text_reserved(void *start, void *end);
  120. enum {
  121. FTRACE_FL_FREE = (1 << 0),
  122. FTRACE_FL_FAILED = (1 << 1),
  123. FTRACE_FL_FILTER = (1 << 2),
  124. FTRACE_FL_ENABLED = (1 << 3),
  125. FTRACE_FL_NOTRACE = (1 << 4),
  126. FTRACE_FL_CONVERTED = (1 << 5),
  127. };
  128. struct dyn_ftrace {
  129. union {
  130. unsigned long ip; /* address of mcount call-site */
  131. struct dyn_ftrace *freelist;
  132. };
  133. union {
  134. unsigned long flags;
  135. struct dyn_ftrace *newlist;
  136. };
  137. struct dyn_arch_ftrace arch;
  138. };
  139. int ftrace_force_update(void);
  140. void ftrace_set_filter(unsigned char *buf, int len, int reset);
  141. int register_ftrace_command(struct ftrace_func_command *cmd);
  142. int unregister_ftrace_command(struct ftrace_func_command *cmd);
  143. /* defined in arch */
  144. extern int ftrace_ip_converted(unsigned long ip);
  145. extern int ftrace_dyn_arch_init(void *data);
  146. extern int ftrace_update_ftrace_func(ftrace_func_t func);
  147. extern void ftrace_caller(void);
  148. extern void ftrace_call(void);
  149. extern void mcount_call(void);
  150. #ifndef FTRACE_ADDR
  151. #define FTRACE_ADDR ((unsigned long)ftrace_caller)
  152. #endif
  153. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  154. extern void ftrace_graph_caller(void);
  155. extern int ftrace_enable_ftrace_graph_caller(void);
  156. extern int ftrace_disable_ftrace_graph_caller(void);
  157. #else
  158. static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
  159. static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
  160. #endif
  161. /**
  162. * ftrace_make_nop - convert code into nop
  163. * @mod: module structure if called by module load initialization
  164. * @rec: the mcount call site record
  165. * @addr: the address that the call site should be calling
  166. *
  167. * This is a very sensitive operation and great care needs
  168. * to be taken by the arch. The operation should carefully
  169. * read the location, check to see if what is read is indeed
  170. * what we expect it to be, and then on success of the compare,
  171. * it should write to the location.
  172. *
  173. * The code segment at @rec->ip should be a caller to @addr
  174. *
  175. * Return must be:
  176. * 0 on success
  177. * -EFAULT on error reading the location
  178. * -EINVAL on a failed compare of the contents
  179. * -EPERM on error writing to the location
  180. * Any other value will be considered a failure.
  181. */
  182. extern int ftrace_make_nop(struct module *mod,
  183. struct dyn_ftrace *rec, unsigned long addr);
  184. /**
  185. * ftrace_make_call - convert a nop call site into a call to addr
  186. * @rec: the mcount call site record
  187. * @addr: the address that the call site should call
  188. *
  189. * This is a very sensitive operation and great care needs
  190. * to be taken by the arch. The operation should carefully
  191. * read the location, check to see if what is read is indeed
  192. * what we expect it to be, and then on success of the compare,
  193. * it should write to the location.
  194. *
  195. * The code segment at @rec->ip should be a nop
  196. *
  197. * Return must be:
  198. * 0 on success
  199. * -EFAULT on error reading the location
  200. * -EINVAL on a failed compare of the contents
  201. * -EPERM on error writing to the location
  202. * Any other value will be considered a failure.
  203. */
  204. extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
  205. /* May be defined in arch */
  206. extern int ftrace_arch_read_dyn_info(char *buf, int size);
  207. extern int skip_trace(unsigned long ip);
  208. extern void ftrace_disable_daemon(void);
  209. extern void ftrace_enable_daemon(void);
  210. #else
  211. static inline int skip_trace(unsigned long ip) { return 0; }
  212. static inline int ftrace_force_update(void) { return 0; }
  213. static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
  214. {
  215. }
  216. static inline void ftrace_disable_daemon(void) { }
  217. static inline void ftrace_enable_daemon(void) { }
  218. static inline void ftrace_release_mod(struct module *mod) {}
  219. static inline int register_ftrace_command(struct ftrace_func_command *cmd)
  220. {
  221. return -EINVAL;
  222. }
  223. static inline int unregister_ftrace_command(char *cmd_name)
  224. {
  225. return -EINVAL;
  226. }
  227. static inline int ftrace_text_reserved(void *start, void *end)
  228. {
  229. return 0;
  230. }
  231. #endif /* CONFIG_DYNAMIC_FTRACE */
  232. /* totally disable ftrace - can not re-enable after this */
  233. void ftrace_kill(void);
  234. static inline void tracer_disable(void)
  235. {
  236. #ifdef CONFIG_FUNCTION_TRACER
  237. ftrace_enabled = 0;
  238. #endif
  239. }
  240. /*
  241. * Ftrace disable/restore without lock. Some synchronization mechanism
  242. * must be used to prevent ftrace_enabled to be changed between
  243. * disable/restore.
  244. */
  245. static inline int __ftrace_enabled_save(void)
  246. {
  247. #ifdef CONFIG_FUNCTION_TRACER
  248. int saved_ftrace_enabled = ftrace_enabled;
  249. ftrace_enabled = 0;
  250. return saved_ftrace_enabled;
  251. #else
  252. return 0;
  253. #endif
  254. }
  255. static inline void __ftrace_enabled_restore(int enabled)
  256. {
  257. #ifdef CONFIG_FUNCTION_TRACER
  258. ftrace_enabled = enabled;
  259. #endif
  260. }
  261. #ifndef HAVE_ARCH_CALLER_ADDR
  262. # ifdef CONFIG_FRAME_POINTER
  263. # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
  264. # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
  265. # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
  266. # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
  267. # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
  268. # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
  269. # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
  270. # else
  271. # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
  272. # define CALLER_ADDR1 0UL
  273. # define CALLER_ADDR2 0UL
  274. # define CALLER_ADDR3 0UL
  275. # define CALLER_ADDR4 0UL
  276. # define CALLER_ADDR5 0UL
  277. # define CALLER_ADDR6 0UL
  278. # endif
  279. #endif /* ifndef HAVE_ARCH_CALLER_ADDR */
  280. #ifdef CONFIG_IRQSOFF_TRACER
  281. extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
  282. extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
  283. #else
  284. static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
  285. static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
  286. #endif
  287. #ifdef CONFIG_PREEMPT_TRACER
  288. extern void trace_preempt_on(unsigned long a0, unsigned long a1);
  289. extern void trace_preempt_off(unsigned long a0, unsigned long a1);
  290. #else
  291. static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { }
  292. static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { }
  293. #endif
  294. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  295. extern void ftrace_init(void);
  296. #else
  297. static inline void ftrace_init(void) { }
  298. #endif
  299. /*
  300. * Structure that defines an entry function trace.
  301. */
  302. struct ftrace_graph_ent {
  303. unsigned long func; /* Current function */
  304. int depth;
  305. };
  306. /*
  307. * Structure that defines a return function trace.
  308. */
  309. struct ftrace_graph_ret {
  310. unsigned long func; /* Current function */
  311. unsigned long long calltime;
  312. unsigned long long rettime;
  313. /* Number of functions that overran the depth limit for current task */
  314. unsigned long overrun;
  315. int depth;
  316. };
  317. /* Type of the callback handlers for tracing function graph*/
  318. typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
  319. typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
  320. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  321. /* for init task */
  322. #define INIT_FTRACE_GRAPH .ret_stack = NULL,
  323. /*
  324. * Stack of return addresses for functions
  325. * of a thread.
  326. * Used in struct thread_info
  327. */
  328. struct ftrace_ret_stack {
  329. unsigned long ret;
  330. unsigned long func;
  331. unsigned long long calltime;
  332. unsigned long long subtime;
  333. unsigned long fp;
  334. };
  335. /*
  336. * Primary handler of a function return.
  337. * It relays on ftrace_return_to_handler.
  338. * Defined in entry_32/64.S
  339. */
  340. extern void return_to_handler(void);
  341. extern int
  342. ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  343. unsigned long frame_pointer);
  344. /*
  345. * Sometimes we don't want to trace a function with the function
  346. * graph tracer but we want them to keep traced by the usual function
  347. * tracer if the function graph tracer is not configured.
  348. */
  349. #define __notrace_funcgraph notrace
  350. /*
  351. * We want to which function is an entrypoint of a hardirq.
  352. * That will help us to put a signal on output.
  353. */
  354. #define __irq_entry __attribute__((__section__(".irqentry.text")))
  355. /* Limits of hardirq entrypoints */
  356. extern char __irqentry_text_start[];
  357. extern char __irqentry_text_end[];
  358. #define FTRACE_RETFUNC_DEPTH 50
  359. #define FTRACE_RETSTACK_ALLOC_SIZE 32
  360. extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  361. trace_func_graph_ent_t entryfunc);
  362. extern void ftrace_graph_stop(void);
  363. /* The current handlers in use */
  364. extern trace_func_graph_ret_t ftrace_graph_return;
  365. extern trace_func_graph_ent_t ftrace_graph_entry;
  366. extern void unregister_ftrace_graph(void);
  367. extern void ftrace_graph_init_task(struct task_struct *t);
  368. extern void ftrace_graph_exit_task(struct task_struct *t);
  369. static inline int task_curr_ret_stack(struct task_struct *t)
  370. {
  371. return t->curr_ret_stack;
  372. }
  373. static inline void pause_graph_tracing(void)
  374. {
  375. atomic_inc(&current->tracing_graph_pause);
  376. }
  377. static inline void unpause_graph_tracing(void)
  378. {
  379. atomic_dec(&current->tracing_graph_pause);
  380. }
  381. #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
  382. #define __notrace_funcgraph
  383. #define __irq_entry
  384. #define INIT_FTRACE_GRAPH
  385. static inline void ftrace_graph_init_task(struct task_struct *t) { }
  386. static inline void ftrace_graph_exit_task(struct task_struct *t) { }
  387. static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  388. trace_func_graph_ent_t entryfunc)
  389. {
  390. return -1;
  391. }
  392. static inline void unregister_ftrace_graph(void) { }
  393. static inline int task_curr_ret_stack(struct task_struct *tsk)
  394. {
  395. return -1;
  396. }
  397. static inline void pause_graph_tracing(void) { }
  398. static inline void unpause_graph_tracing(void) { }
  399. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  400. #ifdef CONFIG_TRACING
  401. /* flags for current->trace */
  402. enum {
  403. TSK_TRACE_FL_TRACE_BIT = 0,
  404. TSK_TRACE_FL_GRAPH_BIT = 1,
  405. };
  406. enum {
  407. TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
  408. TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
  409. };
  410. static inline void set_tsk_trace_trace(struct task_struct *tsk)
  411. {
  412. set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
  413. }
  414. static inline void clear_tsk_trace_trace(struct task_struct *tsk)
  415. {
  416. clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
  417. }
  418. static inline int test_tsk_trace_trace(struct task_struct *tsk)
  419. {
  420. return tsk->trace & TSK_TRACE_FL_TRACE;
  421. }
  422. static inline void set_tsk_trace_graph(struct task_struct *tsk)
  423. {
  424. set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
  425. }
  426. static inline void clear_tsk_trace_graph(struct task_struct *tsk)
  427. {
  428. clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
  429. }
  430. static inline int test_tsk_trace_graph(struct task_struct *tsk)
  431. {
  432. return tsk->trace & TSK_TRACE_FL_GRAPH;
  433. }
  434. enum ftrace_dump_mode;
  435. extern enum ftrace_dump_mode ftrace_dump_on_oops;
  436. #ifdef CONFIG_PREEMPT
  437. #define INIT_TRACE_RECURSION .trace_recursion = 0,
  438. #endif
  439. #endif /* CONFIG_TRACING */
  440. #ifndef INIT_TRACE_RECURSION
  441. #define INIT_TRACE_RECURSION
  442. #endif
  443. #ifdef CONFIG_FTRACE_SYSCALLS
  444. unsigned long arch_syscall_addr(int nr);
  445. #endif /* CONFIG_FTRACE_SYSCALLS */
  446. #endif /* _LINUX_FTRACE_H */