Skip to content

Commit efe951d

Browse files
Peter ZijlstraIngo Molnar
Peter Zijlstra
authored and
Ingo Molnar
committed
perf/x86: Fix perf,x86,cpuhp deadlock
More lockdep gifts, a 5-way lockup race: perf_event_create_kernel_counter() perf_event_alloc() perf_try_init_event() x86_pmu_event_init() __x86_pmu_event_init() x86_reserve_hardware() #0 mutex_lock(&pmc_reserve_mutex); reserve_ds_buffer() #1 get_online_cpus() perf_event_release_kernel() _free_event() hw_perf_event_destroy() x86_release_hardware() #0 mutex_lock(&pmc_reserve_mutex) release_ds_buffer() #1 get_online_cpus() #1 do_cpu_up() perf_event_init_cpu() #2 mutex_lock(&pmus_lock) #3 mutex_lock(&ctx->mutex) sys_perf_event_open() mutex_lock_double() #3 mutex_lock(ctx->mutex) #4 mutex_lock_nested(ctx->mutex, 1); perf_try_init_event() #4 mutex_lock_nested(ctx->mutex, 1) x86_pmu_event_init() intel_pmu_hw_config() x86_add_exclusive() #0 mutex_lock(&pmc_reserve_mutex) Fix it by using ordering constructs instead of locking. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 0c7296c commit efe951d

File tree

1 file changed

+18
-15
lines changed
  • arch/x86/events/intel

1 file changed

+18
-15
lines changed

arch/x86/events/intel/ds.c

+18-15
Original file line numberDiff line numberDiff line change
@@ -372,10 +372,9 @@ static int alloc_pebs_buffer(int cpu)
372372
static void release_pebs_buffer(int cpu)
373373
{
374374
struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
375-
struct debug_store *ds = hwev->ds;
376375
void *cea;
377376

378-
if (!ds || !x86_pmu.pebs)
377+
if (!x86_pmu.pebs)
379378
return;
380379

381380
kfree(per_cpu(insn_buffer, cpu));
@@ -384,7 +383,6 @@ static void release_pebs_buffer(int cpu)
384383
/* Clear the fixmap */
385384
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
386385
ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
387-
ds->pebs_buffer_base = 0;
388386
dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
389387
hwev->ds_pebs_vaddr = NULL;
390388
}
@@ -419,16 +417,14 @@ static int alloc_bts_buffer(int cpu)
419417
static void release_bts_buffer(int cpu)
420418
{
421419
struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
422-
struct debug_store *ds = hwev->ds;
423420
void *cea;
424421

425-
if (!ds || !x86_pmu.bts)
422+
if (!x86_pmu.bts)
426423
return;
427424

428425
/* Clear the fixmap */
429426
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
430427
ds_clear_cea(cea, BTS_BUFFER_SIZE);
431-
ds->bts_buffer_base = 0;
432428
dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE);
433429
hwev->ds_bts_vaddr = NULL;
434430
}
@@ -454,16 +450,22 @@ void release_ds_buffers(void)
454450
if (!x86_pmu.bts && !x86_pmu.pebs)
455451
return;
456452

457-
get_online_cpus();
458-
for_each_online_cpu(cpu)
453+
for_each_possible_cpu(cpu)
454+
release_ds_buffer(cpu);
455+
456+
for_each_possible_cpu(cpu) {
457+
/*
458+
* Again, ignore errors from offline CPUs, they will no longer
459+
* observe cpu_hw_events.ds and not program the DS_AREA when
460+
* they come up.
461+
*/
459462
fini_debug_store_on_cpu(cpu);
463+
}
460464

461465
for_each_possible_cpu(cpu) {
462466
release_pebs_buffer(cpu);
463467
release_bts_buffer(cpu);
464-
release_ds_buffer(cpu);
465468
}
466-
put_online_cpus();
467469
}
468470

469471
void reserve_ds_buffers(void)
@@ -483,8 +485,6 @@ void reserve_ds_buffers(void)
483485
if (!x86_pmu.pebs)
484486
pebs_err = 1;
485487

486-
get_online_cpus();
487-
488488
for_each_possible_cpu(cpu) {
489489
if (alloc_ds_buffer(cpu)) {
490490
bts_err = 1;
@@ -521,11 +521,14 @@ void reserve_ds_buffers(void)
521521
if (x86_pmu.pebs && !pebs_err)
522522
x86_pmu.pebs_active = 1;
523523

524-
for_each_online_cpu(cpu)
524+
for_each_possible_cpu(cpu) {
525+
/*
526+
* Ignores wrmsr_on_cpu() errors for offline CPUs they
527+
* will get this call through intel_pmu_cpu_starting().
528+
*/
525529
init_debug_store_on_cpu(cpu);
530+
}
526531
}
527-
528-
put_online_cpus();
529532
}
530533

531534
/*

0 commit comments

Comments
 (0)