Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753996AbdDCPtP (ORCPT ); Mon, 3 Apr 2017 11:49:15 -0400 Received: from mx1.redhat.com ([209.132.183.28]:50352 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753355AbdDCPtN (ORCPT ); Mon, 3 Apr 2017 11:49:13 -0400 DMARC-Filter: OpenDMARC Filter v1.3.2 mx1.redhat.com 8EE6737E72 Authentication-Results: ext-mx05.extmail.prod.ext.phx2.redhat.com; dmarc=none (p=none dis=none) header.from=redhat.com Authentication-Results: ext-mx05.extmail.prod.ext.phx2.redhat.com; spf=pass smtp.mailfrom=mst@redhat.com DKIM-Filter: OpenDKIM Filter v2.11.0 mx1.redhat.com 8EE6737E72 Date: Mon, 3 Apr 2017 18:49:09 +0300 From: "Michael S. Tsirkin" To: Christoph Hellwig Cc: Mike Galbraith , Thorsten Leemhuis , virtio-dev@lists.oasis-open.org, Linux Kernel Mailing List , rjones@redhat.com Subject: Re: Random guest crashes since 5c34d002dcc7 ("virtio_pci: use shared interrupts for virtqueues") Message-ID: <20170403184557-mutt-send-email-mst@kernel.org> References: <1490638711.26533.44.camel@gmx.de> <1490768602.5950.25.camel@gmx.de> <20170329230936-mutt-send-email-mst@kernel.org> <1490843414.4167.11.camel@gmx.de> <1490858435.4696.25.camel@gmx.de> <20170331041959-mutt-send-email-mst@kernel.org> <20170331032231.GA2471@redhat.com> <20170331082049.GA4485@lst.de> <20170331194416-mutt-send-email-mst@kernel.org> <20170403141823.GA24747@lst.de> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <20170403141823.GA24747@lst.de> X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.29]); Mon, 03 Apr 2017 15:49:13 +0000 (UTC) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 8892 Lines: 265 On Mon, Apr 03, 2017 at 04:18:23PM +0200, Christoph Hellwig wrote: > Mike, > > can you try the patch below? It's really easy to test on qemu so I will - just add a dummy virtio-serial-pci device with -device virtio-serial-pci and add threadirqs to kernel command line. However it doesn't look like this will fix the error recovery for when request irq fails - it will just make the error less likely. So we still need to look into that - failure should recover and use the intx path, ATM it causes hybernation to hang. > --- > >From fe41a30b54878cc631623b7511267125e0da4b15 Mon Sep 17 00:00:00 2001 > From: Christoph Hellwig > Date: Mon, 3 Apr 2017 14:51:35 +0200 > Subject: virtio_pci: don't use shared irq for virtqueues > > Reimplement the shared irq feature manually, as we might have a larger > number of virtqueues than the core shared interrupt code can handle > in threaded interrupt mode. > > Signed-off-by: Christoph Hellwig > --- > drivers/virtio/virtio_pci_common.c | 142 +++++++++++++++++++++---------------- > drivers/virtio/virtio_pci_common.h | 1 + > 2 files changed, 83 insertions(+), 60 deletions(-) > > diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c > index 590534910dc6..6dd719543410 100644 > --- a/drivers/virtio/virtio_pci_common.c > +++ b/drivers/virtio/virtio_pci_common.c > @@ -137,6 +137,9 @@ void vp_del_vqs(struct virtio_device *vdev) > kfree(vp_dev->msix_vector_map); > } > > + /* free the shared virtuqueue irq if we don't use per-vq irqs */ > + if (vp_dev->shared_vq_vec) > + free_irq(pci_irq_vector(vp_dev->pci_dev, 1), vp_dev); > free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); > pci_free_irq_vectors(vp_dev->pci_dev); > } > @@ -147,10 +150,10 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, > { > struct virtio_pci_device *vp_dev = to_vp_device(vdev); > const char *name = dev_name(&vp_dev->vdev.dev); > - int i, j, err = -ENOMEM, allocated_vectors, nvectors; > + struct pci_dev *pdev = vp_dev->pci_dev; > + int i, err = -ENOMEM, nvectors; > unsigned flags = PCI_IRQ_MSIX; > - bool shared = false; > - u16 msix_vec; > + u16 msix_vec = 0; > > if (desc) { > flags |= PCI_IRQ_AFFINITY; > @@ -162,19 +165,18 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, > if (callbacks[i]) > nvectors++; > > - /* Try one vector per queue first. */ > - err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, > - nvectors, flags, desc); > + /* Try one vector for config and one per queue first. */ > + err = pci_alloc_irq_vectors_affinity(pdev, nvectors, nvectors, flags, > + desc); > if (err < 0) { > /* Fallback to one vector for config, one shared for queues. */ > - shared = true; > - err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2, > + nvectors = 2; > + vp_dev->shared_vq_vec = true; > + err = pci_alloc_irq_vectors(pdev, nvectors, nvectors, > PCI_IRQ_MSIX); > if (err < 0) > return err; > } > - if (err < 0) > - return err; > > vp_dev->msix_vectors = nvectors; > vp_dev->msix_names = kmalloc_array(nvectors, > @@ -194,79 +196,99 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, > } > > /* Set the vector used for configuration */ > - snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names), > + snprintf(vp_dev->msix_names[msix_vec], sizeof(*vp_dev->msix_names), > "%s-config", name); > - err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed, > - 0, vp_dev->msix_names[0], vp_dev); > + err = request_irq(pci_irq_vector(pdev, msix_vec), vp_config_changed, 0, > + vp_dev->msix_names[msix_vec], vp_dev); > if (err) > goto out_free_msix_affinity_masks; > > /* Verify we had enough resources to assign the vector */ > - if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) { > + if (vp_dev->config_vector(vp_dev, msix_vec) == VIRTIO_MSI_NO_VECTOR) { > err = -EBUSY; > goto out_free_config_irq; > } > > - vp_dev->msix_vector_map = kmalloc_array(nvqs, > - sizeof(*vp_dev->msix_vector_map), GFP_KERNEL); > - if (!vp_dev->msix_vector_map) > - goto out_disable_config_irq; > - > - allocated_vectors = j = 1; /* vector 0 is the config interrupt */ > - for (i = 0; i < nvqs; ++i) { > - if (!names[i]) { > - vqs[i] = NULL; > - continue; > - } > - > - if (callbacks[i]) > - msix_vec = allocated_vectors; > - else > - msix_vec = VIRTIO_MSI_NO_VECTOR; > - > - vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], > - msix_vec); > - if (IS_ERR(vqs[i])) { > - err = PTR_ERR(vqs[i]); > - goto out_remove_vqs; > + msix_vec++; > + > + /* > + * Use a different vector for each queue if they are available, > + * else share the same vector for all VQs. > + */ > + if (vp_dev->shared_vq_vec) { > + snprintf(vp_dev->msix_names[msix_vec], > + sizeof(vp_dev->msix_names[msix_vec]), > + "%s-virtqueues", name); > + err = request_irq(pci_irq_vector(pdev, msix_vec), > + vp_vring_interrupt, 0, > + vp_dev->msix_names[msix_vec], vp_dev); > + if (err) > + goto out_disable_config_irq; > + > + for (i = 0; i < nvqs; ++i) { > + if (!names[i]) { > + vqs[i] = NULL; > + continue; > + } > + > + vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], > + names[i], callbacks[i] ? > + msix_vec : VIRTIO_MSI_NO_VECTOR); > + if (IS_ERR(vqs[i])) { > + err = PTR_ERR(vqs[i]); > + goto out_remove_vqs; > + } > } > + } else { > + vp_dev->msix_vector_map = kmalloc_array(nvqs, > + sizeof(*vp_dev->msix_vector_map), GFP_KERNEL); > + if (!vp_dev->msix_vector_map) > + goto out_disable_config_irq; > > - if (msix_vec == VIRTIO_MSI_NO_VECTOR) { > - vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; > - continue; > - } > + for (i = 0; i < nvqs; ++i) { > + if (!names[i]) { > + vqs[i] = NULL; > + continue; > + } > > - snprintf(vp_dev->msix_names[j], > - sizeof(*vp_dev->msix_names), "%s-%s", > - dev_name(&vp_dev->vdev.dev), names[i]); > - err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), > - vring_interrupt, IRQF_SHARED, > - vp_dev->msix_names[j], vqs[i]); > - if (err) { > /* don't free this irq on error */ > vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; > - goto out_remove_vqs; > + > + vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], > + names[i], callbacks[i] ? > + msix_vec : VIRTIO_MSI_NO_VECTOR); > + if (IS_ERR(vqs[i])) { > + err = PTR_ERR(vqs[i]); > + goto out_remove_vqs; > + } > + > + if (!callbacks[i]) > + continue; > + > + snprintf(vp_dev->msix_names[msix_vec], > + sizeof(*vp_dev->msix_names), "%s-%s", > + dev_name(&vp_dev->vdev.dev), names[i]); > + err = request_irq(pci_irq_vector(pdev, msix_vec), > + vring_interrupt, IRQF_SHARED, > + vp_dev->msix_names[msix_vec], > + vqs[i]); > + if (err) > + goto out_remove_vqs; > + vp_dev->msix_vector_map[i] = msix_vec++; > } > - vp_dev->msix_vector_map[i] = msix_vec; > - j++; > - > - /* > - * Use a different vector for each queue if they are available, > - * else share the same vector for all VQs. > - */ > - if (!shared) > - allocated_vectors++; > } > > return 0; > > out_remove_vqs: > vp_remove_vqs(vdev); > + if (vp_dev->shared_vq_vec) > + free_irq(pci_irq_vector(pdev, 1), vp_dev); > kfree(vp_dev->msix_vector_map); > out_disable_config_irq: > vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); > out_free_config_irq: > - free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); > + free_irq(pci_irq_vector(pdev, 0), vp_dev); > out_free_msix_affinity_masks: > for (i = 0; i < nvectors; i++) { > if (vp_dev->msix_affinity_masks[i]) > @@ -276,7 +298,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, > out_free_msix_names: > kfree(vp_dev->msix_names); > out_free_irq_vectors: > - pci_free_irq_vectors(vp_dev->pci_dev); > + pci_free_irq_vectors(pdev); > return err; > } > > @@ -346,7 +368,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) > if (!vq->callback) > return -EINVAL; > > - if (vp_dev->pci_dev->msix_enabled) { > + if (vp_dev->msix_vector_map) { > int vec = vp_dev->msix_vector_map[vq->index]; > struct cpumask *mask = vp_dev->msix_affinity_masks[vec]; > unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec); > diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h > index ac8c9d788964..d6d7fb99e47f 100644 > --- a/drivers/virtio/virtio_pci_common.h > +++ b/drivers/virtio/virtio_pci_common.h > @@ -72,6 +72,7 @@ struct virtio_pci_device { > int msix_vectors; > /* Map of per-VQ MSI-X vectors, may be NULL */ > unsigned *msix_vector_map; > + bool shared_vq_vec; > > struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, > unsigned idx, > -- > 2.11.0