Skip to content

Commit

Permalink
Merge tag 'trace-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/g…
Browse files Browse the repository at this point in the history
…it/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
 "The major changes in this tracing update includes:

   - Removal of non-DYNAMIC_FTRACE from 32bit x86

   - Removal of mcount support from x86

   - Emulating a call from int3 on x86_64, fixes live kernel patching

   - Consolidated Tracing Error logs file

  Minor updates:

   - Removal of klp_check_compiler_support()

   - kdb ftrace dumping output changes

   - Accessing and creating ftrace instances from inside the kernel

   - Clean up of #define if macro

   - Introduction of TRACE_EVENT_NOP() to disable trace events based on
     config options

  And other minor fixes and clean ups"

* tag 'trace-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (44 commits)
  x86: Hide the int3_emulate_call/jmp functions from UML
  livepatch: Remove klp_check_compiler_support()
  ftrace/x86: Remove mcount support
  ftrace/x86_32: Remove support for non DYNAMIC_FTRACE
  tracing: Simplify "if" macro code
  tracing: Fix documentation about disabling options using trace_options
  tracing: Replace kzalloc with kcalloc
  tracing: Fix partial reading of trace event's id file
  tracing: Allow RCU to run between postponed startup tests
  tracing: Fix white space issues in parse_pred() function
  tracing: Eliminate const char[] auto variables
  ring-buffer: Fix mispelling of Calculate
  tracing: probeevent: Fix to make the type of $comm string
  tracing: probeevent: Do not accumulate on ret variable
  tracing: uprobes: Re-enable $comm support for uprobe events
  ftrace/x86_64: Emulate call function while updating in breakpoint handler
  x86_64: Allow breakpoints to emulate call instructions
  x86_64: Add gap to int3 to allow for call emulation
  tracing: kdb: Allow ftdump to skip all but the last few entries
  tracing: Add trace_total_entries() / trace_total_entries_cpu()
  ...
  • Loading branch information
torvalds committed May 15, 2019
2 parents 2bbacd1 + 693713c commit d2d8b14
Show file tree
Hide file tree
Showing 44 changed files with 1,345 additions and 651 deletions.
31 changes: 31 additions & 0 deletions Documentation/trace/ftrace.rst
Original file line number Diff line number Diff line change
Expand Up @@ -765,6 +765,37 @@ Here is the list of current tracers that may be configured.
tracers from tracing simply echo "nop" into
current_tracer.

Error conditions
----------------

For most ftrace commands, failure modes are obvious and communicated
using standard return codes.

For other more involved commands, extended error information may be
available via the tracing/error_log file. For the commands that
support it, reading the tracing/error_log file after an error will
display more detailed information about what went wrong, if
information is available. The tracing/error_log file is a circular
error log displaying a small number (currently, 8) of ftrace errors
for the last (8) failed commands.

The extended error information and usage takes the form shown in
this example::

# echo xxx > /sys/kernel/debug/tracing/events/sched/sched_wakeup/trigger
echo: write error: Invalid argument

# cat /sys/kernel/debug/tracing/error_log
[ 5348.887237] location: error: Couldn't yyy: zzz
Command: xxx
^
[ 7517.023364] location: error: Bad rrr: sss
Command: ppp qqq
^

To clear the error log, echo the empty string into it::

# echo > /sys/kernel/debug/tracing/error_log

Examples of using the tracer
----------------------------
Expand Down
16 changes: 2 additions & 14 deletions Documentation/trace/histogram.rst
Original file line number Diff line number Diff line change
Expand Up @@ -199,20 +199,8 @@ Extended error information

For some error conditions encountered when invoking a hist trigger
command, extended error information is available via the
corresponding event's 'hist' file. Reading the hist file after an
error will display more detailed information about what went wrong,
if information is available. This extended error information will
be available until the next hist trigger command for that event.

If available for a given error condition, the extended error
information and usage takes the following form::

# echo xxx > /sys/kernel/debug/tracing/events/sched/sched_wakeup/trigger
echo: write error: Invalid argument

# cat /sys/kernel/debug/tracing/events/sched/sched_wakeup/hist
ERROR: Couldn't yyy: zzz
Last command: xxx
tracing/error_log file. See Error Conditions in
:file:`Documentation/trace/ftrace.rst` for details.

6.2 'hist' trigger examples
---------------------------
Expand Down
1 change: 0 additions & 1 deletion arch/nds32/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
#ifndef CONFIG_DYNAMIC_FTRACE
extern void (*ftrace_trace_function)(unsigned long, unsigned long,
struct ftrace_ops*, struct pt_regs*);
extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
extern void ftrace_graph_caller(void);

noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
Expand Down
1 change: 0 additions & 1 deletion arch/parisc/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent,
unsigned long org_sp_gr3)
{
extern ftrace_func_t ftrace_trace_function; /* depends on CONFIG_DYNAMIC_FTRACE */
extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);

if (ftrace_trace_function != ftrace_stub) {
/* struct ftrace_ops *op, struct pt_regs *regs); */
Expand Down
5 changes: 0 additions & 5 deletions arch/powerpc/include/asm/livepatch.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,6 @@
#include <linux/sched/task_stack.h>

#ifdef CONFIG_LIVEPATCH
static inline int klp_check_compiler_support(void)
{
return 0;
}

static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->nip = ip;
Expand Down
5 changes: 0 additions & 5 deletions arch/s390/include/asm/livepatch.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,6 @@

#include <asm/ptrace.h>

static inline int klp_check_compiler_support(void)
{
return 0;
}

static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->psw.addr = ip;
Expand Down
11 changes: 11 additions & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,17 @@ config X86_64
select SWIOTLB
select ARCH_HAS_SYSCALL_WRAPPER

config FORCE_DYNAMIC_FTRACE
def_bool y
depends on X86_32
depends on FUNCTION_TRACER
select DYNAMIC_FTRACE
help
We keep the static function tracing (!DYNAMIC_FTRACE) around
in order to test the non static function tracing in the
generic code, as other architectures still use it. But we
only need to keep it around for x86_64. No need to keep it
for x86_32. For x86_32, force DYNAMIC_FTRACE.
#
# Arch settings
#
Expand Down
18 changes: 16 additions & 2 deletions arch/x86/entry/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -878,7 +878,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
* @paranoid == 2 is special: the stub will never switch stacks. This is for
* #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
*/
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0
ENTRY(\sym)
UNWIND_HINT_IRET_REGS offset=\has_error_code*8

Expand All @@ -898,6 +898,20 @@ ENTRY(\sym)
jnz .Lfrom_usermode_switch_stack_\@
.endif

.if \create_gap == 1
/*
* If coming from kernel space, create a 6-word gap to allow the
* int3 handler to emulate a call instruction.
*/
testb $3, CS-ORIG_RAX(%rsp)
jnz .Lfrom_usermode_no_gap_\@
.rept 6
pushq 5*8(%rsp)
.endr
UNWIND_HINT_IRET_REGS offset=8
.Lfrom_usermode_no_gap_\@:
.endif

.if \paranoid
call paranoid_entry
.else
Expand Down Expand Up @@ -1129,7 +1143,7 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \
#endif /* CONFIG_HYPERV */

idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET
idtentry int3 do_int3 has_error_code=0
idtentry int3 do_int3 has_error_code=0 create_gap=1
idtentry stack_segment do_stack_segment has_error_code=1

#ifdef CONFIG_XEN_PV
Expand Down
8 changes: 3 additions & 5 deletions arch/x86/include/asm/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,10 @@
#define _ASM_X86_FTRACE_H

#ifdef CONFIG_FUNCTION_TRACER
#ifdef CC_USING_FENTRY
# define MCOUNT_ADDR ((unsigned long)(__fentry__))
#else
# define MCOUNT_ADDR ((unsigned long)(mcount))
# define HAVE_FUNCTION_GRAPH_FP_TEST
#ifndef CC_USING_FENTRY
# error Compiler does not support fentry?
#endif
# define MCOUNT_ADDR ((unsigned long)(__fentry__))
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */

#ifdef CONFIG_DYNAMIC_FTRACE
Expand Down
8 changes: 0 additions & 8 deletions arch/x86/include/asm/livepatch.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,6 @@
#include <asm/setup.h>
#include <linux/ftrace.h>

static inline int klp_check_compiler_support(void)
{
#ifndef CC_USING_FENTRY
return 1;
#endif
return 0;
}

static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->ip = ip;
Expand Down
30 changes: 30 additions & 0 deletions arch/x86/include/asm/text-patching.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,4 +42,34 @@ extern int after_bootmem;
extern __ro_after_init struct mm_struct *poking_mm;
extern __ro_after_init unsigned long poking_addr;

#ifndef CONFIG_UML_X86
static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
{
regs->ip = ip;
}

#define INT3_INSN_SIZE 1
#define CALL_INSN_SIZE 5

#ifdef CONFIG_X86_64
static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
{
/*
* The int3 handler in entry_64.S adds a gap between the
* stack where the break point happened, and the saving of
* pt_regs. We can extend the original stack because of
* this gap. See the idtentry macro's create_gap option.
*/
regs->sp -= sizeof(unsigned long);
*(unsigned long *)regs->sp = val;
}

static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
{
int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
int3_emulate_jmp(regs, func);
}
#endif /* CONFIG_X86_64 */
#endif /* !CONFIG_UML_X86 */

#endif /* _ASM_X86_TEXT_PATCHING_H */
32 changes: 27 additions & 5 deletions arch/x86/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#include <asm/kprobes.h>
#include <asm/ftrace.h>
#include <asm/nops.h>
#include <asm/text-patching.h>

#ifdef CONFIG_DYNAMIC_FTRACE

Expand Down Expand Up @@ -231,6 +232,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
}

static unsigned long ftrace_update_func;
static unsigned long ftrace_update_func_call;

static int update_ftrace_func(unsigned long ip, void *new)
{
Expand Down Expand Up @@ -259,6 +261,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
unsigned char *new;
int ret;

ftrace_update_func_call = (unsigned long)func;

new = ftrace_call_replace(ip, (unsigned long)func);
ret = update_ftrace_func(ip, new);

Expand Down Expand Up @@ -294,13 +298,28 @@ int ftrace_int3_handler(struct pt_regs *regs)
if (WARN_ON_ONCE(!regs))
return 0;

ip = regs->ip - 1;
if (!ftrace_location(ip) && !is_ftrace_caller(ip))
return 0;
ip = regs->ip - INT3_INSN_SIZE;

regs->ip += MCOUNT_INSN_SIZE - 1;
#ifdef CONFIG_X86_64
if (ftrace_location(ip)) {
int3_emulate_call(regs, (unsigned long)ftrace_regs_caller);
return 1;
} else if (is_ftrace_caller(ip)) {
if (!ftrace_update_func_call) {
int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
return 1;
}
int3_emulate_call(regs, ftrace_update_func_call);
return 1;
}
#else
if (ftrace_location(ip) || is_ftrace_caller(ip)) {
int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
return 1;
}
#endif

return 1;
return 0;
}
NOKPROBE_SYMBOL(ftrace_int3_handler);

Expand Down Expand Up @@ -865,6 +884,8 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)

func = ftrace_ops_get_func(ops);

ftrace_update_func_call = (unsigned long)func;

/* Do a safe modify in case the trampoline is executing */
new = ftrace_call_replace(ip, (unsigned long)func);
ret = update_ftrace_func(ip, new);
Expand Down Expand Up @@ -966,6 +987,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func)
{
unsigned char *new;

ftrace_update_func_call = 0UL;
new = ftrace_jmp_replace(ip, (unsigned long)func);

return update_ftrace_func(ip, new);
Expand Down
Loading

0 comments on commit d2d8b14

Please sign in to comment.