2008-05-13 03:20:42 +08:00
|
|
|
#ifndef _LINUX_FTRACE_H
|
|
|
|
#define _LINUX_FTRACE_H
|
|
|
|
|
|
|
|
#ifdef CONFIG_FTRACE
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
|
2008-05-13 03:20:43 +08:00
|
|
|
extern int ftrace_enabled;
|
|
|
|
extern int
|
|
|
|
ftrace_enable_sysctl(struct ctl_table *table, int write,
|
|
|
|
struct file *filp, void __user *buffer, size_t *lenp,
|
|
|
|
loff_t *ppos);
|
|
|
|
|
2008-05-13 03:20:42 +08:00
|
|
|
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
|
|
|
|
|
|
|
|
struct ftrace_ops {
|
|
|
|
ftrace_func_t func;
|
|
|
|
struct ftrace_ops *next;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The ftrace_ops must be a static and should also
|
|
|
|
* be read_mostly. These functions do modify read_mostly variables
|
|
|
|
* so use them sparely. Never free an ftrace_op or modify the
|
|
|
|
* next pointer after it has been registered. Even after unregistering
|
|
|
|
* it, the next pointer may still be used internally.
|
|
|
|
*/
|
|
|
|
int register_ftrace_function(struct ftrace_ops *ops);
|
|
|
|
int unregister_ftrace_function(struct ftrace_ops *ops);
|
|
|
|
void clear_ftrace_function(void);
|
|
|
|
|
|
|
|
extern void ftrace_stub(unsigned long a0, unsigned long a1);
|
|
|
|
extern void mcount(void);
|
|
|
|
|
|
|
|
#else /* !CONFIG_FTRACE */
|
|
|
|
# define register_ftrace_function(ops) do { } while (0)
|
|
|
|
# define unregister_ftrace_function(ops) do { } while (0)
|
|
|
|
# define clear_ftrace_function(ops) do { } while (0)
|
|
|
|
#endif /* CONFIG_FTRACE */
|
2008-05-13 03:20:42 +08:00
|
|
|
|
ftrace: dynamic enabling/disabling of function calls
This patch adds a feature to dynamically replace the ftrace code
with the jmps to allow a kernel with ftrace configured to run
as fast as it can without it configured.
The way this works, is on bootup (if ftrace is enabled), a ftrace
function is registered to record the instruction pointer of all
places that call the function.
Later, if there's still any code to patch, a kthread is awoken
(rate limited to at most once a second) that performs a stop_machine,
and replaces all the code that was called with a jmp over the call
to ftrace. It only replaces what was found the previous time. Typically
the system reaches equilibrium quickly after bootup and there's no code
patching needed at all.
e.g.
call ftrace /* 5 bytes */
is replaced with
jmp 3f /* jmp is 2 bytes and we jump 3 forward */
3:
When we want to enable ftrace for function tracing, the IP recording
is removed, and stop_machine is called again to replace all the locations
of that were recorded back to the call of ftrace. When it is disabled,
we replace the code back to the jmp.
Allocation is done by the kthread. If the ftrace recording function is
called, and we don't have any record slots available, then we simply
skip that call. Once a second a new page (if needed) is allocated for
recording new ftrace function calls. A large batch is allocated at
boot up to get most of the calls there.
Because we do this via stop_machine, we don't have to worry about another
CPU executing a ftrace call as we modify it. But we do need to worry
about NMI's so all functions that might be called via nmi must be
annotated with notrace_nmi. When this code is configured in, the NMI code
will not call notrace.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-13 03:20:42 +08:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
# define FTRACE_HASHBITS 10
|
|
|
|
# define FTRACE_HASHSIZE (1<<FTRACE_HASHBITS)
|
|
|
|
|
2008-05-13 03:20:43 +08:00
|
|
|
enum {
|
|
|
|
FTRACE_FL_FAILED = (1<<0),
|
|
|
|
};
|
|
|
|
|
ftrace: dynamic enabling/disabling of function calls
This patch adds a feature to dynamically replace the ftrace code
with the jmps to allow a kernel with ftrace configured to run
as fast as it can without it configured.
The way this works, is on bootup (if ftrace is enabled), a ftrace
function is registered to record the instruction pointer of all
places that call the function.
Later, if there's still any code to patch, a kthread is awoken
(rate limited to at most once a second) that performs a stop_machine,
and replaces all the code that was called with a jmp over the call
to ftrace. It only replaces what was found the previous time. Typically
the system reaches equilibrium quickly after bootup and there's no code
patching needed at all.
e.g.
call ftrace /* 5 bytes */
is replaced with
jmp 3f /* jmp is 2 bytes and we jump 3 forward */
3:
When we want to enable ftrace for function tracing, the IP recording
is removed, and stop_machine is called again to replace all the locations
of that were recorded back to the call of ftrace. When it is disabled,
we replace the code back to the jmp.
Allocation is done by the kthread. If the ftrace recording function is
called, and we don't have any record slots available, then we simply
skip that call. Once a second a new page (if needed) is allocated for
recording new ftrace function calls. A large batch is allocated at
boot up to get most of the calls there.
Because we do this via stop_machine, we don't have to worry about another
CPU executing a ftrace call as we modify it. But we do need to worry
about NMI's so all functions that might be called via nmi must be
annotated with notrace_nmi. When this code is configured in, the NMI code
will not call notrace.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-13 03:20:42 +08:00
|
|
|
struct dyn_ftrace {
|
|
|
|
struct hlist_node node;
|
|
|
|
unsigned long ip;
|
2008-05-13 03:20:43 +08:00
|
|
|
unsigned long flags;
|
ftrace: dynamic enabling/disabling of function calls
This patch adds a feature to dynamically replace the ftrace code
with the jmps to allow a kernel with ftrace configured to run
as fast as it can without it configured.
The way this works, is on bootup (if ftrace is enabled), a ftrace
function is registered to record the instruction pointer of all
places that call the function.
Later, if there's still any code to patch, a kthread is awoken
(rate limited to at most once a second) that performs a stop_machine,
and replaces all the code that was called with a jmp over the call
to ftrace. It only replaces what was found the previous time. Typically
the system reaches equilibrium quickly after bootup and there's no code
patching needed at all.
e.g.
call ftrace /* 5 bytes */
is replaced with
jmp 3f /* jmp is 2 bytes and we jump 3 forward */
3:
When we want to enable ftrace for function tracing, the IP recording
is removed, and stop_machine is called again to replace all the locations
of that were recorded back to the call of ftrace. When it is disabled,
we replace the code back to the jmp.
Allocation is done by the kthread. If the ftrace recording function is
called, and we don't have any record slots available, then we simply
skip that call. Once a second a new page (if needed) is allocated for
recording new ftrace function calls. A large batch is allocated at
boot up to get most of the calls there.
Because we do this via stop_machine, we don't have to worry about another
CPU executing a ftrace call as we modify it. But we do need to worry
about NMI's so all functions that might be called via nmi must be
annotated with notrace_nmi. When this code is configured in, the NMI code
will not call notrace.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-13 03:20:42 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/* defined in arch */
|
2008-05-13 03:20:43 +08:00
|
|
|
extern int ftrace_ip_converted(unsigned long ip);
|
|
|
|
extern unsigned char *ftrace_nop_replace(void);
|
|
|
|
extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr);
|
2008-05-13 03:20:43 +08:00
|
|
|
extern int ftrace_dyn_arch_init(void *data);
|
|
|
|
extern int ftrace_mcount_set(unsigned long *data);
|
2008-05-13 03:20:43 +08:00
|
|
|
extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
|
|
|
|
unsigned char *new_code);
|
2008-05-13 03:20:43 +08:00
|
|
|
extern int ftrace_update_ftrace_func(ftrace_func_t func);
|
|
|
|
extern void ftrace_caller(void);
|
|
|
|
extern void ftrace_call(void);
|
|
|
|
extern void mcount_call(void);
|
ftrace: dynamic enabling/disabling of function calls
This patch adds a feature to dynamically replace the ftrace code
with the jmps to allow a kernel with ftrace configured to run
as fast as it can without it configured.
The way this works, is on bootup (if ftrace is enabled), a ftrace
function is registered to record the instruction pointer of all
places that call the function.
Later, if there's still any code to patch, a kthread is awoken
(rate limited to at most once a second) that performs a stop_machine,
and replaces all the code that was called with a jmp over the call
to ftrace. It only replaces what was found the previous time. Typically
the system reaches equilibrium quickly after bootup and there's no code
patching needed at all.
e.g.
call ftrace /* 5 bytes */
is replaced with
jmp 3f /* jmp is 2 bytes and we jump 3 forward */
3:
When we want to enable ftrace for function tracing, the IP recording
is removed, and stop_machine is called again to replace all the locations
of that were recorded back to the call of ftrace. When it is disabled,
we replace the code back to the jmp.
Allocation is done by the kthread. If the ftrace recording function is
called, and we don't have any record slots available, then we simply
skip that call. Once a second a new page (if needed) is allocated for
recording new ftrace function calls. A large batch is allocated at
boot up to get most of the calls there.
Because we do this via stop_machine, we don't have to worry about another
CPU executing a ftrace call as we modify it. But we do need to worry
about NMI's so all functions that might be called via nmi must be
annotated with notrace_nmi. When this code is configured in, the NMI code
will not call notrace.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-13 03:20:42 +08:00
|
|
|
#endif
|
2008-05-13 03:20:42 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_FRAME_POINTER
|
|
|
|
/* TODO: need to fix this for ARM */
|
|
|
|
# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
|
|
|
|
# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
|
|
|
|
# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
|
|
|
|
# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
|
|
|
|
# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
|
|
|
|
# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
|
|
|
|
#else
|
|
|
|
# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
|
|
|
|
# define CALLER_ADDR1 0UL
|
|
|
|
# define CALLER_ADDR2 0UL
|
|
|
|
# define CALLER_ADDR3 0UL
|
|
|
|
# define CALLER_ADDR4 0UL
|
|
|
|
# define CALLER_ADDR5 0UL
|
|
|
|
#endif
|
|
|
|
|
2008-05-13 03:20:42 +08:00
|
|
|
#ifdef CONFIG_IRQSOFF_TRACER
|
|
|
|
extern void notrace time_hardirqs_on(unsigned long a0, unsigned long a1);
|
|
|
|
extern void notrace time_hardirqs_off(unsigned long a0, unsigned long a1);
|
|
|
|
#else
|
|
|
|
# define time_hardirqs_on(a0, a1) do { } while (0)
|
|
|
|
# define time_hardirqs_off(a0, a1) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2008-05-13 03:20:42 +08:00
|
|
|
#ifdef CONFIG_PREEMPT_TRACER
|
|
|
|
extern void notrace trace_preempt_on(unsigned long a0, unsigned long a1);
|
|
|
|
extern void notrace trace_preempt_off(unsigned long a0, unsigned long a1);
|
|
|
|
#else
|
|
|
|
# define trace_preempt_on(a0, a1) do { } while (0)
|
|
|
|
# define trace_preempt_off(a0, a1) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2008-05-13 03:20:42 +08:00
|
|
|
#endif /* _LINUX_FTRACE_H */
|