@@ -372,10 +372,9 @@ static int alloc_pebs_buffer(int cpu)
372
372
static void release_pebs_buffer (int cpu )
373
373
{
374
374
struct cpu_hw_events * hwev = per_cpu_ptr (& cpu_hw_events , cpu );
375
- struct debug_store * ds = hwev -> ds ;
376
375
void * cea ;
377
376
378
- if (!ds || ! x86_pmu .pebs )
377
+ if (!x86_pmu .pebs )
379
378
return ;
380
379
381
380
kfree (per_cpu (insn_buffer , cpu ));
@@ -384,7 +383,6 @@ static void release_pebs_buffer(int cpu)
384
383
/* Clear the fixmap */
385
384
cea = & get_cpu_entry_area (cpu )-> cpu_debug_buffers .pebs_buffer ;
386
385
ds_clear_cea (cea , x86_pmu .pebs_buffer_size );
387
- ds -> pebs_buffer_base = 0 ;
388
386
dsfree_pages (hwev -> ds_pebs_vaddr , x86_pmu .pebs_buffer_size );
389
387
hwev -> ds_pebs_vaddr = NULL ;
390
388
}
@@ -419,16 +417,14 @@ static int alloc_bts_buffer(int cpu)
419
417
static void release_bts_buffer (int cpu )
420
418
{
421
419
struct cpu_hw_events * hwev = per_cpu_ptr (& cpu_hw_events , cpu );
422
- struct debug_store * ds = hwev -> ds ;
423
420
void * cea ;
424
421
425
- if (!ds || ! x86_pmu .bts )
422
+ if (!x86_pmu .bts )
426
423
return ;
427
424
428
425
/* Clear the fixmap */
429
426
cea = & get_cpu_entry_area (cpu )-> cpu_debug_buffers .bts_buffer ;
430
427
ds_clear_cea (cea , BTS_BUFFER_SIZE );
431
- ds -> bts_buffer_base = 0 ;
432
428
dsfree_pages (hwev -> ds_bts_vaddr , BTS_BUFFER_SIZE );
433
429
hwev -> ds_bts_vaddr = NULL ;
434
430
}
@@ -454,16 +450,22 @@ void release_ds_buffers(void)
454
450
if (!x86_pmu .bts && !x86_pmu .pebs )
455
451
return ;
456
452
457
- get_online_cpus ();
458
- for_each_online_cpu (cpu )
453
+ for_each_possible_cpu (cpu )
454
+ release_ds_buffer (cpu );
455
+
456
+ for_each_possible_cpu (cpu ) {
457
+ /*
458
+ * Again, ignore errors from offline CPUs, they will no longer
459
+ * observe cpu_hw_events.ds and not program the DS_AREA when
460
+ * they come up.
461
+ */
459
462
fini_debug_store_on_cpu (cpu );
463
+ }
460
464
461
465
for_each_possible_cpu (cpu ) {
462
466
release_pebs_buffer (cpu );
463
467
release_bts_buffer (cpu );
464
- release_ds_buffer (cpu );
465
468
}
466
- put_online_cpus ();
467
469
}
468
470
469
471
void reserve_ds_buffers (void )
@@ -483,8 +485,6 @@ void reserve_ds_buffers(void)
483
485
if (!x86_pmu .pebs )
484
486
pebs_err = 1 ;
485
487
486
- get_online_cpus ();
487
-
488
488
for_each_possible_cpu (cpu ) {
489
489
if (alloc_ds_buffer (cpu )) {
490
490
bts_err = 1 ;
@@ -521,11 +521,14 @@ void reserve_ds_buffers(void)
521
521
if (x86_pmu .pebs && !pebs_err )
522
522
x86_pmu .pebs_active = 1 ;
523
523
524
- for_each_online_cpu (cpu )
524
+ for_each_possible_cpu (cpu ) {
525
+ /*
526
+ * Ignores wrmsr_on_cpu() errors for offline CPUs they
527
+ * will get this call through intel_pmu_cpu_starting().
528
+ */
525
529
init_debug_store_on_cpu (cpu );
530
+ }
526
531
}
527
-
528
- put_online_cpus ();
529
532
}
530
533
531
534
/*
0 commit comments