Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753517AbbEMGUD (ORCPT ); Wed, 13 May 2015 02:20:03 -0400 Received: from e23smtp01.au.ibm.com ([202.81.31.143]:42014 "EHLO e23smtp01.au.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753239AbbEMGT4 (ORCPT ); Wed, 13 May 2015 02:19:56 -0400 Date: Wed, 13 May 2015 16:18:53 +1000 From: Gavin Shan To: Alexey Kardashevskiy Cc: linuxppc-dev@lists.ozlabs.org, David Gibson , Benjamin Herrenschmidt , Paul Mackerras , Alex Williamson , Gavin Shan , Wei Yang , linux-kernel@vger.kernel.org Subject: Re: [PATCH kernel v10 09/34] vfio: powerpc/spapr: Move locked_vm accounting to helpers Message-ID: <20150513061853.GA7539@gwshan> Reply-To: Gavin Shan References: <1431358763-24371-1-git-send-email-aik@ozlabs.ru> <1431358763-24371-10-git-send-email-aik@ozlabs.ru> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <1431358763-24371-10-git-send-email-aik@ozlabs.ru> User-Agent: Mutt/1.5.23 (2014-03-12) X-TM-AS-MML: disable X-Content-Scanned: Fidelis XPS MAILER x-cbid: 15051306-1618-0000-0000-000002125A69 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 5771 Lines: 174 On Tue, May 12, 2015 at 01:38:58AM +1000, Alexey Kardashevskiy wrote: >There moves locked pages accounting to helpers. >Later they will be reused for Dynamic DMA windows (DDW). > >This reworks debug messages to show the current value and the limit. > >This stores the locked pages number in the container so when unlocking >the iommu table pointer won't be needed. This does not have an effect >now but it will with the multiple tables per container as then we will >allow attaching/detaching groups on fly and we may end up having >a container with no group attached but with the counter incremented. > >While we are here, update the comment explaining why RLIMIT_MEMLOCK >might be required to be bigger than the guest RAM. This also prints >pid of the current process in pr_warn/pr_debug. > >Signed-off-by: Alexey Kardashevskiy >[aw: for the vfio related changes] >Acked-by: Alex Williamson >Reviewed-by: David Gibson Reviewed-by: Gavin Shan >--- >Changes: >v4: >* new helpers do nothing if @npages == 0 >* tce_iommu_disable() now can decrement the counter if the group was >detached (not possible now but will be in the future) >--- > drivers/vfio/vfio_iommu_spapr_tce.c | 82 ++++++++++++++++++++++++++++--------- > 1 file changed, 63 insertions(+), 19 deletions(-) > >diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c >index 64300cc..40583f9 100644 >--- a/drivers/vfio/vfio_iommu_spapr_tce.c >+++ b/drivers/vfio/vfio_iommu_spapr_tce.c >@@ -29,6 +29,51 @@ > static void tce_iommu_detach_group(void *iommu_data, > struct iommu_group *iommu_group); > >+static long try_increment_locked_vm(long npages) >+{ >+ long ret = 0, locked, lock_limit; >+ >+ if (!current || !current->mm) >+ return -ESRCH; /* process exited */ >+ >+ if (!npages) >+ return 0; >+ >+ down_write(¤t->mm->mmap_sem); >+ locked = current->mm->locked_vm + npages; >+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; >+ if (locked > lock_limit && !capable(CAP_IPC_LOCK)) >+ ret = -ENOMEM; >+ else >+ current->mm->locked_vm += npages; >+ >+ pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid, >+ npages << PAGE_SHIFT, >+ current->mm->locked_vm << PAGE_SHIFT, >+ rlimit(RLIMIT_MEMLOCK), >+ ret ? " - exceeded" : ""); >+ I'm not sure if current->pid + current->comm can give a bit more readability or not. Thanks, Gavin >+ up_write(¤t->mm->mmap_sem); >+ >+ return ret; >+} >+ >+static void decrement_locked_vm(long npages) >+{ >+ if (!current || !current->mm || !npages) >+ return; /* process exited */ >+ >+ down_write(¤t->mm->mmap_sem); >+ if (npages > current->mm->locked_vm) >+ npages = current->mm->locked_vm; >+ current->mm->locked_vm -= npages; >+ pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid, >+ npages << PAGE_SHIFT, >+ current->mm->locked_vm << PAGE_SHIFT, >+ rlimit(RLIMIT_MEMLOCK)); >+ up_write(¤t->mm->mmap_sem); >+} >+ > /* > * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation > * >@@ -45,6 +90,7 @@ struct tce_container { > struct mutex lock; > struct iommu_table *tbl; > bool enabled; >+ unsigned long locked_pages; > }; > > static bool tce_page_is_contained(struct page *page, unsigned page_shift) >@@ -60,7 +106,7 @@ static bool tce_page_is_contained(struct page *page, unsigned page_shift) > static int tce_iommu_enable(struct tce_container *container) > { > int ret = 0; >- unsigned long locked, lock_limit, npages; >+ unsigned long locked; > struct iommu_table *tbl = container->tbl; > > if (!container->tbl) >@@ -89,21 +135,22 @@ static int tce_iommu_enable(struct tce_container *container) > * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits, > * that would effectively kill the guest at random points, much better > * enforcing the limit based on the max that the guest can map. >+ * >+ * Unfortunately at the moment it counts whole tables, no matter how >+ * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups >+ * each with 2GB DMA window, 8GB will be counted here. The reason for >+ * this is that we cannot tell here the amount of RAM used by the guest >+ * as this information is only available from KVM and VFIO is >+ * KVM agnostic. > */ >- down_write(¤t->mm->mmap_sem); >- npages = (tbl->it_size << tbl->it_page_shift) >> PAGE_SHIFT; >- locked = current->mm->locked_vm + npages; >- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; >- if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { >- pr_warn("RLIMIT_MEMLOCK (%ld) exceeded\n", >- rlimit(RLIMIT_MEMLOCK)); >- ret = -ENOMEM; >- } else { >+ locked = (tbl->it_size << tbl->it_page_shift) >> PAGE_SHIFT; >+ ret = try_increment_locked_vm(locked); >+ if (ret) >+ return ret; > >- current->mm->locked_vm += npages; >- container->enabled = true; >- } >- up_write(¤t->mm->mmap_sem); >+ container->locked_pages = locked; >+ >+ container->enabled = true; > > return ret; > } >@@ -115,13 +162,10 @@ static void tce_iommu_disable(struct tce_container *container) > > container->enabled = false; > >- if (!container->tbl || !current->mm) >+ if (!current->mm) > return; > >- down_write(¤t->mm->mmap_sem); >- current->mm->locked_vm -= (container->tbl->it_size << >- container->tbl->it_page_shift) >> PAGE_SHIFT; >- up_write(¤t->mm->mmap_sem); >+ decrement_locked_vm(container->locked_pages); > } > > static void *tce_iommu_open(unsigned long arg) >-- >2.4.0.rc3.8.gfb3e7d5 > -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/