Skip to content

Commit 4b9ce67

Browse files
author
Peter Zijlstra
committed
perf: Make sure to use pmu_ctx->pmu for groups
Oliver reported that x86_pmu_del() ended up doing an out-of-bound memory access when group_sched_in() fails and needs to roll back. This *should* be handled by the transaction callbacks, but he found that when the group leader is a software event, the transaction handlers of the wrong PMU are used. Despite the move_group case in perf_event_open() and group_sched_in() using pmu_ctx->pmu. Turns out, inherit uses event->pmu to clone the events, effectively undoing the move_group case for all inherited contexts. Fix this by also making inherit use pmu_ctx->pmu, ensuring all inherited counters end up in the same pmu context. Similarly, __perf_event_read() should use equally use pmu_ctx->pmu for the group case. Fixes: bd27568 ("perf: Rewrite core context handling") Reported-by: Oliver Rosenberg <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Ian Rogers <[email protected]> Link: https://patch.msgid.link/[email protected]
1 parent f1cac6a commit 4b9ce67

1 file changed

Lines changed: 8 additions & 11 deletions

File tree

kernel/events/core.c

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4813,7 +4813,7 @@ static void __perf_event_read(void *info)
48134813
struct perf_event *sub, *event = data->event;
48144814
struct perf_event_context *ctx = event->ctx;
48154815
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
4816-
struct pmu *pmu = event->pmu;
4816+
struct pmu *pmu;
48174817

48184818
/*
48194819
* If this is a task context, we need to check whether it is
@@ -4825,33 +4825,30 @@ static void __perf_event_read(void *info)
48254825
if (ctx->task && cpuctx->task_ctx != ctx)
48264826
return;
48274827

4828-
raw_spin_lock(&ctx->lock);
4828+
guard(raw_spinlock)(&ctx->lock);
48294829
ctx_time_update_event(ctx, event);
48304830

48314831
perf_event_update_time(event);
48324832
if (data->group)
48334833
perf_event_update_sibling_time(event);
48344834

48354835
if (event->state != PERF_EVENT_STATE_ACTIVE)
4836-
goto unlock;
4836+
return;
48374837

48384838
if (!data->group) {
4839-
pmu->read(event);
4839+
perf_pmu_read(event);
48404840
data->ret = 0;
4841-
goto unlock;
4841+
return;
48424842
}
48434843

4844+
pmu = event->pmu_ctx->pmu;
48444845
pmu->start_txn(pmu, PERF_PMU_TXN_READ);
48454846

4846-
pmu->read(event);
4847-
4847+
perf_pmu_read(event);
48484848
for_each_sibling_event(sub, event)
48494849
perf_pmu_read(sub);
48504850

48514851
data->ret = pmu->commit_txn(pmu);
4852-
4853-
unlock:
4854-
raw_spin_unlock(&ctx->lock);
48554852
}
48564853

48574854
static inline u64 perf_event_count(struct perf_event *event, bool self)
@@ -14744,7 +14741,7 @@ inherit_event(struct perf_event *parent_event,
1474414741
get_ctx(child_ctx);
1474514742
child_event->ctx = child_ctx;
1474614743

14747-
pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event);
14744+
pmu_ctx = find_get_pmu_context(parent_event->pmu_ctx->pmu, child_ctx, child_event);
1474814745
if (IS_ERR(pmu_ctx)) {
1474914746
free_event(child_event);
1475014747
return ERR_CAST(pmu_ctx);

0 commit comments

Comments
 (0)