Hi Jérôme, Ben and Jason,
below is a series against the hmm tree which fixes up the mmap_sem
locking in nouveau and while at it also removes leftover legacy HMM APIs
only used by nouveau.
Changes since v1:
- don't return the valid state from hmm_range_unregister
- additional nouveau cleanups
These two functions are marked as a legacy APIs to get rid of, but seem
to suit the current nouveau flow. Move it to the only user in
preparation for fixing a locking bug involving caller and callee.
All comments referring to the old API have been removed as this now
is a driver private helper.
Signed-off-by: Christoph Hellwig <[email protected]>
---
drivers/gpu/drm/nouveau/nouveau_svm.c | 52 +++++++++++++++++++++-
include/linux/hmm.h | 63 ---------------------------
2 files changed, 50 insertions(+), 65 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 8c92374afcf2..033a9241a14a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -36,6 +36,13 @@
#include <linux/sort.h>
#include <linux/hmm.h>
+/*
+ * When waiting for mmu notifiers we need some kind of time out otherwise we
+ * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
+ * wait already.
+ */
+#define NOUVEAU_RANGE_FAULT_TIMEOUT 1000
+
struct nouveau_svm {
struct nouveau_drm *drm;
struct mutex mutex;
@@ -475,6 +482,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
fault->inst, fault->addr, fault->access);
}
+static inline bool nouveau_range_done(struct hmm_range *range)
+{
+ bool ret = hmm_range_valid(range);
+
+ hmm_range_unregister(range);
+ return ret;
+}
+
+static int
+nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range,
+ bool block)
+{
+ long ret;
+
+ range->default_flags = 0;
+ range->pfn_flags_mask = -1UL;
+
+ ret = hmm_range_register(range, mirror,
+ range->start, range->end,
+ PAGE_SHIFT);
+ if (ret)
+ return (int)ret;
+
+ if (!hmm_range_wait_until_valid(range, NOUVEAU_RANGE_FAULT_TIMEOUT)) {
+ up_read(&range->vma->vm_mm->mmap_sem);
+ return -EAGAIN;
+ }
+
+ ret = hmm_range_fault(range, block);
+ if (ret <= 0) {
+ if (ret == -EBUSY || !ret) {
+ up_read(&range->vma->vm_mm->mmap_sem);
+ ret = -EBUSY;
+ } else if (ret == -EAGAIN)
+ ret = -EBUSY;
+ hmm_range_unregister(range);
+ return ret;
+ }
+ return 0;
+}
+
static int
nouveau_svm_fault(struct nvif_notify *notify)
{
@@ -649,10 +697,10 @@ nouveau_svm_fault(struct nvif_notify *notify)
range.values = nouveau_svm_pfn_values;
range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
- ret = hmm_vma_fault(&svmm->mirror, &range, true);
+ ret = nouveau_range_fault(&svmm->mirror, &range, true);
if (ret == 0) {
mutex_lock(&svmm->mutex);
- if (!hmm_vma_range_done(&range)) {
+ if (!nouveau_range_done(&range)) {
mutex_unlock(&svmm->mutex);
goto again;
}
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index b8a08b2a10ca..fa43a9f53833 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -475,69 +475,6 @@ long hmm_range_dma_unmap(struct hmm_range *range,
dma_addr_t *daddrs,
bool dirty);
-/*
- * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
- *
- * When waiting for mmu notifiers we need some kind of time out otherwise we
- * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
- * wait already.
- */
-#define HMM_RANGE_DEFAULT_TIMEOUT 1000
-
-/* This is a temporary helper to avoid merge conflict between trees. */
-static inline bool hmm_vma_range_done(struct hmm_range *range)
-{
- bool ret = hmm_range_valid(range);
-
- hmm_range_unregister(range);
- return ret;
-}
-
-/* This is a temporary helper to avoid merge conflict between trees. */
-static inline int hmm_vma_fault(struct hmm_mirror *mirror,
- struct hmm_range *range, bool block)
-{
- long ret;
-
- /*
- * With the old API the driver must set each individual entries with
- * the requested flags (valid, write, ...). So here we set the mask to
- * keep intact the entries provided by the driver and zero out the
- * default_flags.
- */
- range->default_flags = 0;
- range->pfn_flags_mask = -1UL;
-
- ret = hmm_range_register(range, mirror,
- range->start, range->end,
- PAGE_SHIFT);
- if (ret)
- return (int)ret;
-
- if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
- /*
- * The mmap_sem was taken by driver we release it here and
- * returns -EAGAIN which correspond to mmap_sem have been
- * drop in the old API.
- */
- up_read(&range->vma->vm_mm->mmap_sem);
- return -EAGAIN;
- }
-
- ret = hmm_range_fault(range, block);
- if (ret <= 0) {
- if (ret == -EBUSY || !ret) {
- /* Same as above, drop mmap_sem to match old API. */
- up_read(&range->vma->vm_mm->mmap_sem);
- ret = -EBUSY;
- } else if (ret == -EAGAIN)
- ret = -EBUSY;
- hmm_range_unregister(range);
- return ret;
- }
- return 0;
-}
-
/* Below are for HMM internal use only! Not to be used by device driver! */
static inline void hmm_mm_init(struct mm_struct *mm)
{
--
2.20.1
The parameter is always false, so remove it as well as the -EAGAIN
handling that can only happen for the non-blocking case.
Signed-off-by: Christoph Hellwig <[email protected]>
---
drivers/gpu/drm/nouveau/nouveau_svm.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 033a9241a14a..9a9f71e4be29 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -491,8 +491,7 @@ static inline bool nouveau_range_done(struct hmm_range *range)
}
static int
-nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range,
- bool block)
+nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
{
long ret;
@@ -510,7 +509,7 @@ nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range,
return -EAGAIN;
}
- ret = hmm_range_fault(range, block);
+ ret = hmm_range_fault(range, true);
if (ret <= 0) {
if (ret == -EBUSY || !ret) {
up_read(&range->vma->vm_mm->mmap_sem);
@@ -697,7 +696,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
range.values = nouveau_svm_pfn_values;
range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
- ret = nouveau_range_fault(&svmm->mirror, &range, true);
+ ret = nouveau_range_fault(&svmm->mirror, &range);
if (ret == 0) {
mutex_lock(&svmm->mutex);
if (!nouveau_range_done(&range)) {
--
2.20.1
On Wed, Jul 03, 2019 at 03:02:08PM -0700, Christoph Hellwig wrote:
> Hi Jérôme, Ben and Jason,
>
> below is a series against the hmm tree which fixes up the mmap_sem
> locking in nouveau and while at it also removes leftover legacy HMM APIs
> only used by nouveau.
>
> Changes since v1:
> - don't return the valid state from hmm_range_unregister
> - additional nouveau cleanups
Ralph, since most of this is nouveau could you contribute a
Tested-by? Thanks
Jason
On Wed, Jul 03, 2019 at 03:02:08PM -0700, Christoph Hellwig wrote:
> Hi Jérôme, Ben and Jason,
>
> below is a series against the hmm tree which fixes up the mmap_sem
> locking in nouveau and while at it also removes leftover legacy HMM APIs
> only used by nouveau.
As much as I like this series, it won't make it to this merge window,
sorry.
Let's revisit it in a few weeks at rc1.
Regards,
Jason
On 7/4/19 9:42 AM, Jason Gunthorpe wrote:
> On Wed, Jul 03, 2019 at 03:02:08PM -0700, Christoph Hellwig wrote:
>> Hi Jérôme, Ben and Jason,
>>
>> below is a series against the hmm tree which fixes up the mmap_sem
>> locking in nouveau and while at it also removes leftover legacy HMM APIs
>> only used by nouveau.
>>
>> Changes since v1:
>> - don't return the valid state from hmm_range_unregister
>> - additional nouveau cleanups
>
> Ralph, since most of this is nouveau could you contribute a
> Tested-by? Thanks
>
> Jason
>
I can test things fairly easily but with all the different patches,
conflicts, and personal git trees, can you specify the git tree
and branch with everything applied that you want me to test?
On Mon, Jul 08, 2019 at 10:30:55AM -0700, Ralph Campbell wrote:
>
> On 7/4/19 9:42 AM, Jason Gunthorpe wrote:
> > On Wed, Jul 03, 2019 at 03:02:08PM -0700, Christoph Hellwig wrote:
> > > Hi Jérôme, Ben and Jason,
> > >
> > > below is a series against the hmm tree which fixes up the mmap_sem
> > > locking in nouveau and while at it also removes leftover legacy HMM APIs
> > > only used by nouveau.
> > >
> > > Changes since v1:
> > > - don't return the valid state from hmm_range_unregister
> > > - additional nouveau cleanups
> >
> > Ralph, since most of this is nouveau could you contribute a
> > Tested-by? Thanks
> >
> > Jason
> >
>
> I can test things fairly easily but with all the different patches,
> conflicts, and personal git trees, can you specify the git tree
> and branch with everything applied that you want me to test?
This series will be pushed to the next cycle, so if you test v5.3-rc1
+ this series you'd get the right coverage.
Thanks,
Jason
On Fri, Jul 05, 2019 at 09:33:36AM -0300, Jason Gunthorpe wrote:
> On Wed, Jul 03, 2019 at 03:02:08PM -0700, Christoph Hellwig wrote:
> > Hi J?r?me, Ben and Jason,
> >
> > below is a series against the hmm tree which fixes up the mmap_sem
> > locking in nouveau and while at it also removes leftover legacy HMM APIs
> > only used by nouveau.
>
> As much as I like this series, it won't make it to this merge window,
> sorry.
Note that patch 4 fixes a pretty severe locking bug, and 1-3 is just
preparation for that.
On Tue, Jul 09, 2019 at 04:30:38PM +0200, Christoph Hellwig wrote:
> On Fri, Jul 05, 2019 at 09:33:36AM -0300, Jason Gunthorpe wrote:
> > On Wed, Jul 03, 2019 at 03:02:08PM -0700, Christoph Hellwig wrote:
> > > Hi Jérôme, Ben and Jason,
> > >
> > > below is a series against the hmm tree which fixes up the mmap_sem
> > > locking in nouveau and while at it also removes leftover legacy HMM APIs
> > > only used by nouveau.
> >
> > As much as I like this series, it won't make it to this merge window,
> > sorry.
>
> Note that patch 4 fixes a pretty severe locking bug, and 1-3 is just
> preparation for that.
Yes, I know, but that code is all marked STAGING last I saw, so I
don't feel an urgency to get severe bug fixes in for it after the
merge window opens.
I'd like to apply it to hmm.git when rc1 comes out with Ralph's test
result..
Jason