|
29 | 29 | #include <linux/kallsyms.h>
|
30 | 30 | #include <linux/random.h>
|
31 | 31 | #include <linux/prctl.h>
|
| 32 | +#include <linux/nmi.h> |
32 | 33 |
|
33 | 34 | #include <asm/asm.h>
|
34 | 35 | #include <asm/bootinfo.h>
|
@@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp)
|
655 | 656 | return sp & ALMASK;
|
656 | 657 | }
|
657 | 658 |
|
658 |
| -static void arch_dump_stack(void *info) |
659 |
| -{ |
660 |
| - struct pt_regs *regs; |
| 659 | +static DEFINE_PER_CPU(call_single_data_t, backtrace_csd); |
| 660 | +static struct cpumask backtrace_csd_busy; |
661 | 661 |
|
662 |
| - regs = get_irq_regs(); |
663 |
| - |
664 |
| - if (regs) |
665 |
| - show_regs(regs); |
666 |
| - else |
667 |
| - dump_stack(); |
| 662 | +static void handle_backtrace(void *info) |
| 663 | +{ |
| 664 | + nmi_cpu_backtrace(get_irq_regs()); |
| 665 | + cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); |
668 | 666 | }
|
669 | 667 |
|
670 |
| -void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) |
| 668 | +static void raise_backtrace(cpumask_t *mask) |
671 | 669 | {
|
672 |
| - long this_cpu = get_cpu(); |
| 670 | + call_single_data_t *csd; |
| 671 | + int cpu; |
673 | 672 |
|
674 |
| - if (cpumask_test_cpu(this_cpu, mask) && !exclude_self) |
675 |
| - dump_stack(); |
| 673 | + for_each_cpu(cpu, mask) { |
| 674 | + /* |
| 675 | + * If we previously sent an IPI to the target CPU & it hasn't |
| 676 | + * cleared its bit in the busy cpumask then it didn't handle |
| 677 | + * our previous IPI & it's not safe for us to reuse the |
| 678 | + * call_single_data_t. |
| 679 | + */ |
| 680 | + if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { |
| 681 | + pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", |
| 682 | + cpu); |
| 683 | + continue; |
| 684 | + } |
676 | 685 |
|
677 |
| - smp_call_function_many(mask, arch_dump_stack, NULL, 1); |
| 686 | + csd = &per_cpu(backtrace_csd, cpu); |
| 687 | + csd->func = handle_backtrace; |
| 688 | + smp_call_function_single_async(cpu, csd); |
| 689 | + } |
| 690 | +} |
678 | 691 |
|
679 |
| - put_cpu(); |
| 692 | +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) |
| 693 | +{ |
| 694 | + nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace); |
680 | 695 | }
|
681 | 696 |
|
682 | 697 | int mips_get_process_fp_mode(struct task_struct *task)
|
|
0 commit comments