Skip to content

Commit 250ab25

Browse files
Wes2000leyrostedt
authored andcommitted
tracing: Drain deferred trigger frees if kthread creation fails
Boot-time trigger registration can fail before the trigger-data cleanup kthread exists. Deferring those frees until late init is fine, but the post-boot fallback must still drain the deferred list if kthread creation never succeeds. Otherwise, boot-deferred nodes can accumulate on trigger_data_free_list, later frees fall back to synchronously freeing only the current object, and the older queued entries are leaked forever. To trigger this, add the following to the kernel command line: trace_event=sched_switch trace_trigger=sched_switch.traceon,sched_switch.traceon The second traceon trigger will fail and be freed. This triggers a NULL pointer dereference and crashes the kernel. Keep the deferred boot-time behavior, but when kthread creation fails, drain the whole queued list synchronously. Do the same in the late-init drain path so queued entries are not stranded there either. Cc: [email protected] Link: https://patch.msgid.link/[email protected] Fixes: 61d445a ("tracing: Add bulk garbage collection of freeing event_trigger_data") Signed-off-by: Wesley Atwell <[email protected]> Signed-off-by: Steven Rostedt (Google) <[email protected]>
1 parent 1f98857 commit 250ab25

1 file changed

Lines changed: 66 additions & 13 deletions

File tree

kernel/trace/trace_events_trigger.c

Lines changed: 66 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,39 @@ static struct task_struct *trigger_kthread;
2222
static struct llist_head trigger_data_free_list;
2323
static DEFINE_MUTEX(trigger_data_kthread_mutex);
2424

25+
static int trigger_kthread_fn(void *ignore);
26+
27+
static void trigger_create_kthread_locked(void)
28+
{
29+
lockdep_assert_held(&trigger_data_kthread_mutex);
30+
31+
if (!trigger_kthread) {
32+
struct task_struct *kthread;
33+
34+
kthread = kthread_create(trigger_kthread_fn, NULL,
35+
"trigger_data_free");
36+
if (!IS_ERR(kthread))
37+
WRITE_ONCE(trigger_kthread, kthread);
38+
}
39+
}
40+
41+
static void trigger_data_free_queued_locked(void)
42+
{
43+
struct event_trigger_data *data, *tmp;
44+
struct llist_node *llnodes;
45+
46+
lockdep_assert_held(&trigger_data_kthread_mutex);
47+
48+
llnodes = llist_del_all(&trigger_data_free_list);
49+
if (!llnodes)
50+
return;
51+
52+
tracepoint_synchronize_unregister();
53+
54+
llist_for_each_entry_safe(data, tmp, llnodes, llist)
55+
kfree(data);
56+
}
57+
2558
/* Bulk garbage collection of event_trigger_data elements */
2659
static int trigger_kthread_fn(void *ignore)
2760
{
@@ -56,30 +89,50 @@ void trigger_data_free(struct event_trigger_data *data)
5689
if (data->cmd_ops->set_filter)
5790
data->cmd_ops->set_filter(NULL, data, NULL);
5891

92+
/*
93+
* Boot-time trigger registration can fail before kthread creation
94+
* works. Keep the deferred-free semantics during boot and let late
95+
* init start the kthread to drain the list.
96+
*/
97+
if (system_state == SYSTEM_BOOTING && !trigger_kthread) {
98+
llist_add(&data->llist, &trigger_data_free_list);
99+
return;
100+
}
101+
59102
if (unlikely(!trigger_kthread)) {
60103
guard(mutex)(&trigger_data_kthread_mutex);
104+
105+
trigger_create_kthread_locked();
61106
/* Check again after taking mutex */
62107
if (!trigger_kthread) {
63-
struct task_struct *kthread;
64-
65-
kthread = kthread_create(trigger_kthread_fn, NULL,
66-
"trigger_data_free");
67-
if (!IS_ERR(kthread))
68-
WRITE_ONCE(trigger_kthread, kthread);
108+
llist_add(&data->llist, &trigger_data_free_list);
109+
/* Drain the queued frees synchronously if creation failed. */
110+
trigger_data_free_queued_locked();
111+
return;
69112
}
70113
}
71114

72-
if (!trigger_kthread) {
73-
/* Do it the slow way */
74-
tracepoint_synchronize_unregister();
75-
kfree(data);
76-
return;
77-
}
78-
79115
llist_add(&data->llist, &trigger_data_free_list);
80116
wake_up_process(trigger_kthread);
81117
}
82118

119+
static int __init trigger_data_free_init(void)
120+
{
121+
guard(mutex)(&trigger_data_kthread_mutex);
122+
123+
if (llist_empty(&trigger_data_free_list))
124+
return 0;
125+
126+
trigger_create_kthread_locked();
127+
if (trigger_kthread)
128+
wake_up_process(trigger_kthread);
129+
else
130+
trigger_data_free_queued_locked();
131+
132+
return 0;
133+
}
134+
late_initcall(trigger_data_free_init);
135+
83136
static inline void data_ops_trigger(struct event_trigger_data *data,
84137
struct trace_buffer *buffer, void *rec,
85138
struct ring_buffer_event *event)

0 commit comments

Comments
 (0)