Skip to content

Commit cbfffcc

Browse files
committed
Merge tag 'trace-v7.0-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull tracing fixes from Steven Rostedt: - Fix potential deadlock in osnoise and hotplug The interface_lock can be called by a osnoise thread and the CPU shutdown logic of osnoise can wait for this thread to finish. But cpus_read_lock() can also be taken while holding the interface_lock. This produces a circular lock dependency and can cause a deadlock. Swap the ordering of cpus_read_lock() and the interface_lock to have interface_lock taken within the cpus_read_lock() context to prevent this circular dependency. - Fix freeing of event triggers in early boot up If the same trigger is added on the kernel command line, the second one will fail to be applied and the trigger created will be freed. This calls into the deferred logic and creates a kernel thread to do the freeing. But the command line logic is called before kernel threads can be created and this leads to a NULL pointer dereference. Delay freeing event triggers until late init. * tag 'trace-v7.0-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: tracing: Drain deferred trigger frees if kthread creation fails tracing: Fix potential deadlock in cpu hotplug with osnoise
2 parents e522b75 + 250ab25 commit cbfffcc

2 files changed

Lines changed: 71 additions & 18 deletions

File tree

kernel/trace/trace_events_trigger.c

Lines changed: 66 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,39 @@ static struct task_struct *trigger_kthread;
2222
static struct llist_head trigger_data_free_list;
2323
static DEFINE_MUTEX(trigger_data_kthread_mutex);
2424

25+
static int trigger_kthread_fn(void *ignore);
26+
27+
static void trigger_create_kthread_locked(void)
28+
{
29+
lockdep_assert_held(&trigger_data_kthread_mutex);
30+
31+
if (!trigger_kthread) {
32+
struct task_struct *kthread;
33+
34+
kthread = kthread_create(trigger_kthread_fn, NULL,
35+
"trigger_data_free");
36+
if (!IS_ERR(kthread))
37+
WRITE_ONCE(trigger_kthread, kthread);
38+
}
39+
}
40+
41+
static void trigger_data_free_queued_locked(void)
42+
{
43+
struct event_trigger_data *data, *tmp;
44+
struct llist_node *llnodes;
45+
46+
lockdep_assert_held(&trigger_data_kthread_mutex);
47+
48+
llnodes = llist_del_all(&trigger_data_free_list);
49+
if (!llnodes)
50+
return;
51+
52+
tracepoint_synchronize_unregister();
53+
54+
llist_for_each_entry_safe(data, tmp, llnodes, llist)
55+
kfree(data);
56+
}
57+
2558
/* Bulk garbage collection of event_trigger_data elements */
2659
static int trigger_kthread_fn(void *ignore)
2760
{
@@ -56,30 +89,50 @@ void trigger_data_free(struct event_trigger_data *data)
5689
if (data->cmd_ops->set_filter)
5790
data->cmd_ops->set_filter(NULL, data, NULL);
5891

92+
/*
93+
* Boot-time trigger registration can fail before kthread creation
94+
* works. Keep the deferred-free semantics during boot and let late
95+
* init start the kthread to drain the list.
96+
*/
97+
if (system_state == SYSTEM_BOOTING && !trigger_kthread) {
98+
llist_add(&data->llist, &trigger_data_free_list);
99+
return;
100+
}
101+
59102
if (unlikely(!trigger_kthread)) {
60103
guard(mutex)(&trigger_data_kthread_mutex);
104+
105+
trigger_create_kthread_locked();
61106
/* Check again after taking mutex */
62107
if (!trigger_kthread) {
63-
struct task_struct *kthread;
64-
65-
kthread = kthread_create(trigger_kthread_fn, NULL,
66-
"trigger_data_free");
67-
if (!IS_ERR(kthread))
68-
WRITE_ONCE(trigger_kthread, kthread);
108+
llist_add(&data->llist, &trigger_data_free_list);
109+
/* Drain the queued frees synchronously if creation failed. */
110+
trigger_data_free_queued_locked();
111+
return;
69112
}
70113
}
71114

72-
if (!trigger_kthread) {
73-
/* Do it the slow way */
74-
tracepoint_synchronize_unregister();
75-
kfree(data);
76-
return;
77-
}
78-
79115
llist_add(&data->llist, &trigger_data_free_list);
80116
wake_up_process(trigger_kthread);
81117
}
82118

119+
static int __init trigger_data_free_init(void)
120+
{
121+
guard(mutex)(&trigger_data_kthread_mutex);
122+
123+
if (llist_empty(&trigger_data_free_list))
124+
return 0;
125+
126+
trigger_create_kthread_locked();
127+
if (trigger_kthread)
128+
wake_up_process(trigger_kthread);
129+
else
130+
trigger_data_free_queued_locked();
131+
132+
return 0;
133+
}
134+
late_initcall(trigger_data_free_init);
135+
83136
static inline void data_ops_trigger(struct event_trigger_data *data,
84137
struct trace_buffer *buffer, void *rec,
85138
struct ring_buffer_event *event)

kernel/trace/trace_osnoise.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2073,8 +2073,8 @@ static void osnoise_hotplug_workfn(struct work_struct *dummy)
20732073
if (!osnoise_has_registered_instances())
20742074
return;
20752075

2076-
guard(mutex)(&interface_lock);
20772076
guard(cpus_read_lock)();
2077+
guard(mutex)(&interface_lock);
20782078

20792079
if (!cpu_online(cpu))
20802080
return;
@@ -2237,11 +2237,11 @@ static ssize_t osnoise_options_write(struct file *filp, const char __user *ubuf,
22372237
if (running)
22382238
stop_per_cpu_kthreads();
22392239

2240-
mutex_lock(&interface_lock);
22412240
/*
22422241
* avoid CPU hotplug operations that might read options.
22432242
*/
22442243
cpus_read_lock();
2244+
mutex_lock(&interface_lock);
22452245

22462246
retval = cnt;
22472247

@@ -2257,8 +2257,8 @@ static ssize_t osnoise_options_write(struct file *filp, const char __user *ubuf,
22572257
clear_bit(option, &osnoise_options);
22582258
}
22592259

2260-
cpus_read_unlock();
22612260
mutex_unlock(&interface_lock);
2261+
cpus_read_unlock();
22622262

22632263
if (running)
22642264
start_per_cpu_kthreads();
@@ -2345,16 +2345,16 @@ osnoise_cpus_write(struct file *filp, const char __user *ubuf, size_t count,
23452345
if (running)
23462346
stop_per_cpu_kthreads();
23472347

2348-
mutex_lock(&interface_lock);
23492348
/*
23502349
* osnoise_cpumask is read by CPU hotplug operations.
23512350
*/
23522351
cpus_read_lock();
2352+
mutex_lock(&interface_lock);
23532353

23542354
cpumask_copy(&osnoise_cpumask, osnoise_cpumask_new);
23552355

2356-
cpus_read_unlock();
23572356
mutex_unlock(&interface_lock);
2357+
cpus_read_unlock();
23582358

23592359
if (running)
23602360
start_per_cpu_kthreads();

0 commit comments

Comments
 (0)