diff --git a/init/init_task.c b/init/init_task.c index 5aebe3be4d7cd65ce1bed995c76b9293fea00b2d..994ffe018120859569b99c58b18f5df7d5550fdf 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -168,7 +168,8 @@ struct task_struct init_task .lockdep_recursion = 0, #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER - .ret_stack = NULL, + .ret_stack = NULL, + .tracing_graph_pause = ATOMIC_INIT(0), #endif #if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPT) .trace_recursion = 0, diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 85fe273cadaea130c83ca75fcd8b6ab256d6ef00..e214ef5003e461555362da423bc81bb1e212577a 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -6867,7 +6867,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) } if (t->ret_stack == NULL) { - atomic_set(&t->tracing_graph_pause, 0); atomic_set(&t->trace_overrun, 0); t->curr_ret_stack = -1; t->curr_ret_depth = -1; @@ -7080,7 +7079,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); static void graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) { - atomic_set(&t->tracing_graph_pause, 0); atomic_set(&t->trace_overrun, 0); t->ftrace_timestamp = 0; /* make curr_ret_stack visible before we add the ret_stack */