qemu-ppc
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-ppc] pseries on qemu-system-ppc64le crashes in doorbell_core_i


From: Peter Zijlstra
Subject: Re: [Qemu-ppc] pseries on qemu-system-ppc64le crashes in doorbell_core_ipi()
Date: Mon, 1 Apr 2019 10:38:27 +0200
User-agent: Mutt/1.10.1 (2018-07-13)

+ fweisbec, who did the remote bits

On Sat, Mar 30, 2019 at 01:10:28PM +1000, Nicholas Piggin wrote:
> Something like this?
> 
> kernel/irq_work: Do not raise an IPI when queueing work on the local CPU
> 
> The QEMU powerpc/pseries machine model was not expecting a self-IPI,
> and it may be a bit surprising thing to do, so have irq_work_queue_on
> do local queueing when target is current CPU.

This seems OK to me.

> Suggested-by: Steven Rostedt <address@hidden>
> Signed-off-by: Nicholas Piggin <address@hidden>

Acked-by: Peter Zijlstra (Intel) <address@hidden>

> ---
>  kernel/irq_work.c | 78 ++++++++++++++++++++++++++---------------------
>  1 file changed, 43 insertions(+), 35 deletions(-)
> 
> diff --git a/kernel/irq_work.c b/kernel/irq_work.c
> index 6b7cdf17ccf8..f0e539d0f879 100644
> --- a/kernel/irq_work.c
> +++ b/kernel/irq_work.c
> @@ -56,61 +56,69 @@ void __weak arch_irq_work_raise(void)
>        */
>  }
>  
> -/*
> - * Enqueue the irq_work @work on @cpu unless it's already pending
> - * somewhere.
> - *
> - * Can be re-enqueued while the callback is still in progress.
> - */
> -bool irq_work_queue_on(struct irq_work *work, int cpu)
> +/* Enqueue on current CPU, work must already be claimed and preempt disabled 
> */
> +static void __irq_work_queue(struct irq_work *work)
>  {
> -     /* All work should have been flushed before going offline */
> -     WARN_ON_ONCE(cpu_is_offline(cpu));
> -
> -#ifdef CONFIG_SMP
> -
> -     /* Arch remote IPI send/receive backend aren't NMI safe */
> -     WARN_ON_ONCE(in_nmi());
> +     /* If the work is "lazy", handle it from next tick if any */
> +     if (work->flags & IRQ_WORK_LAZY) {
> +             if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
> +                 tick_nohz_tick_stopped())
> +                     arch_irq_work_raise();
> +     } else {
> +             if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
> +                     arch_irq_work_raise();
> +     }
> +}
>  
> +/* Enqueue the irq work @work on the current CPU */
> +bool irq_work_queue(struct irq_work *work)
> +{
>       /* Only queue if not already pending */
>       if (!irq_work_claim(work))
>               return false;
>  
> -     if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
> -             arch_send_call_function_single_ipi(cpu);
> -
> -#else /* #ifdef CONFIG_SMP */
> -     irq_work_queue(work);
> -#endif /* #else #ifdef CONFIG_SMP */
> +     /* Queue the entry and raise the IPI if needed. */
> +     preempt_disable();
> +     __irq_work_queue(work);
> +     preempt_enable();
>  
>       return true;
>  }
> +EXPORT_SYMBOL_GPL(irq_work_queue);
>  
> -/* Enqueue the irq work @work on the current CPU */
> -bool irq_work_queue(struct irq_work *work)
> +/*
> + * Enqueue the irq_work @work on @cpu unless it's already pending
> + * somewhere.
> + *
> + * Can be re-enqueued while the callback is still in progress.
> + */
> +bool irq_work_queue_on(struct irq_work *work, int cpu)
>  {
> +#ifndef CONFIG_SMP
> +     return irq_work_queue(work);
> +
> +#else /* #ifndef CONFIG_SMP */
> +     /* All work should have been flushed before going offline */
> +     WARN_ON_ONCE(cpu_is_offline(cpu));
> +
>       /* Only queue if not already pending */
>       if (!irq_work_claim(work))
>               return false;
>  
> -     /* Queue the entry and raise the IPI if needed. */
>       preempt_disable();
> -
> -     /* If the work is "lazy", handle it from next tick if any */
> -     if (work->flags & IRQ_WORK_LAZY) {
> -             if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
> -                 tick_nohz_tick_stopped())
> -                     arch_irq_work_raise();
> -     } else {
> -             if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
> -                     arch_irq_work_raise();
> -     }
> -
> +     if (cpu != smp_processor_id()) {
> +             /* Arch remote IPI send/receive backend aren't NMI safe */
> +             WARN_ON_ONCE(in_nmi());
> +             if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
> +                     arch_send_call_function_single_ipi(cpu);
> +     } else
> +             __irq_work_queue(work);
>       preempt_enable();
>  
>       return true;
> +#endif /* #else #ifndef CONFIG_SMP */
>  }
> -EXPORT_SYMBOL_GPL(irq_work_queue);
> +
>  
>  bool irq_work_needs_cpu(void)
>  {
> -- 
> 2.20.1
> 



reply via email to

[Prev in Thread] Current Thread [Next in Thread]