First, when memory allocation for sg_list_unaligned failed, there
is a bug of calling put_pages() as we haven't pinned any pages.
Second, if get_user_pages_fast() failed we should unpinned num_pinned
pages instead of checking till num_pages.
This will address both.
As part of these changes, minor update in documentation.
Fixes: 6db7199407ca ("drivers/virt: introduce Freescale hypervisor
management driver")
Signed-off-by: Souptick Joarder <[email protected]>
Reviewed-by: Dan Carpenter <[email protected]>
Reviewed-by: John Hubbard <[email protected]>
---
v2:
Added review tag.
v3:
Address review comment on v2 from John.
Added review tag.
drivers/virt/fsl_hypervisor.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 1b0b11b..efb4e7f 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -157,7 +157,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
unsigned int i;
long ret = 0;
- int num_pinned; /* return value from get_user_pages() */
+ int num_pinned = 0; /* return value from get_user_pages_fast() */
phys_addr_t remote_paddr; /* The next address in the remote buffer */
uint32_t count; /* The number of bytes left to copy */
@@ -174,7 +174,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
return -EINVAL;
/*
- * The array of pages returned by get_user_pages() covers only
+ * The array of pages returned by get_user_pages_fast() covers only
* page-aligned memory. Since the user buffer is probably not
* page-aligned, we need to handle the discrepancy.
*
@@ -224,7 +224,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
/*
* 'pages' is an array of struct page pointers that's initialized by
- * get_user_pages().
+ * get_user_pages_fast().
*/
pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
@@ -241,7 +241,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
if (!sg_list_unaligned) {
pr_debug("fsl-hv: could not allocate S/G list\n");
ret = -ENOMEM;
- goto exit;
+ goto free_pages;
}
sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
@@ -250,7 +250,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
if (num_pinned != num_pages) {
- /* get_user_pages() failed */
+ /* get_user_pages_fast() failed */
pr_debug("fsl-hv: could not lock source buffer\n");
ret = (num_pinned < 0) ? num_pinned : -EFAULT;
goto exit;
@@ -293,12 +293,12 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
exit:
if (pages) {
- for (i = 0; i < num_pages; i++)
- if (pages[i])
- put_page(pages[i]);
+ for (i = 0; i < num_pinned; i++)
+ put_page(pages[i]);
}
kfree(sg_list_unaligned);
+free_pages:
kfree(pages);
if (!ret)
--
1.9.1
Hi John,
On Tue, Sep 1, 2020 at 3:38 AM Souptick Joarder <[email protected]> wrote:
>
> First, when memory allocation for sg_list_unaligned failed, there
> is a bug of calling put_pages() as we haven't pinned any pages.
>
> Second, if get_user_pages_fast() failed we should unpinned num_pinned
> pages instead of checking till num_pages.
>
> This will address both.
>
> As part of these changes, minor update in documentation.
>
> Fixes: 6db7199407ca ("drivers/virt: introduce Freescale hypervisor
> management driver")
> Signed-off-by: Souptick Joarder <[email protected]>
> Reviewed-by: Dan Carpenter <[email protected]>
> Reviewed-by: John Hubbard <[email protected]>
Few questions ->
First, there are minor updates from v2 -> v3 other than addressing your
review comments in v2. Would you like to review it again?
Second, I think, as per Documentation/core-api/pin_user_pages.rst,
this is case 5.
Shall I make the conversion as part of this series ?
> ---
> v2:
> Added review tag.
>
> v3:
> Address review comment on v2 from John.
> Added review tag.
>
> drivers/virt/fsl_hypervisor.c | 16 ++++++++--------
> 1 file changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
> index 1b0b11b..efb4e7f 100644
> --- a/drivers/virt/fsl_hypervisor.c
> +++ b/drivers/virt/fsl_hypervisor.c
> @@ -157,7 +157,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
>
> unsigned int i;
> long ret = 0;
> - int num_pinned; /* return value from get_user_pages() */
> + int num_pinned = 0; /* return value from get_user_pages_fast() */
> phys_addr_t remote_paddr; /* The next address in the remote buffer */
> uint32_t count; /* The number of bytes left to copy */
>
> @@ -174,7 +174,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
> return -EINVAL;
>
> /*
> - * The array of pages returned by get_user_pages() covers only
> + * The array of pages returned by get_user_pages_fast() covers only
> * page-aligned memory. Since the user buffer is probably not
> * page-aligned, we need to handle the discrepancy.
> *
> @@ -224,7 +224,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
>
> /*
> * 'pages' is an array of struct page pointers that's initialized by
> - * get_user_pages().
> + * get_user_pages_fast().
> */
> pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
> if (!pages) {
> @@ -241,7 +241,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
> if (!sg_list_unaligned) {
> pr_debug("fsl-hv: could not allocate S/G list\n");
> ret = -ENOMEM;
> - goto exit;
> + goto free_pages;
> }
> sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
>
> @@ -250,7 +250,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
> num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
>
> if (num_pinned != num_pages) {
> - /* get_user_pages() failed */
> + /* get_user_pages_fast() failed */
> pr_debug("fsl-hv: could not lock source buffer\n");
> ret = (num_pinned < 0) ? num_pinned : -EFAULT;
> goto exit;
> @@ -293,12 +293,12 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
>
> exit:
> if (pages) {
> - for (i = 0; i < num_pages; i++)
> - if (pages[i])
> - put_page(pages[i]);
> + for (i = 0; i < num_pinned; i++)
> + put_page(pages[i]);
> }
>
> kfree(sg_list_unaligned);
> +free_pages:
> kfree(pages);
>
> if (!ret)
> --
> 1.9.1
>
On 8/31/20 3:07 PM, Souptick Joarder wrote:
> First, when memory allocation for sg_list_unaligned failed, there
> is a bug of calling put_pages() as we haven't pinned any pages.
"we should unpin"
...
>
> @@ -250,7 +250,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
> num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
>
> if (num_pinned != num_pages) {
> - /* get_user_pages() failed */
> + /* get_user_pages_fast() failed */
Let's please just delete that particular comment entirely. It's of
questionable accuracy (partial success is allowed with this API), and it
is echoing the code too closely to be worth the line that it consumes.
More importantly, though, we need to split up the cases of gup_fast
returning a negative value, and a zero or positive value. Either here,
or at "exit:", the negative return case should just skip any attempt to
do any put_page() calls at all. Because it's a maintenance hazard to
leave in a loop that depends on looping from zero, to -ERRNO, and *not*
doing any loops--especially in the signed/unsigned soupy mess around gup
calls.
> pr_debug("fsl-hv: could not lock source buffer\n");
> ret = (num_pinned < 0) ? num_pinned : -EFAULT;
> goto exit;
> @@ -293,12 +293,12 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
>
> exit:
> if (pages) {
> - for (i = 0; i < num_pages; i++)
> - if (pages[i])
> - put_page(pages[i]);
> + for (i = 0; i < num_pinned; i++)
> + put_page(pages[i]);
Looks correct. I sometimes wonder why more callers don't use
release_pages() in situations like this, but that's beyond the scope of
your work here.
thanks,
--
John Hubbard
NVIDIA
On 8/31/20 3:10 PM, Souptick Joarder wrote:
> Hi John,
>
> On Tue, Sep 1, 2020 at 3:38 AM Souptick Joarder <[email protected]> wrote:
>>
>> First, when memory allocation for sg_list_unaligned failed, there
>> is a bug of calling put_pages() as we haven't pinned any pages.
>>
>> Second, if get_user_pages_fast() failed we should unpinned num_pinned
>> pages instead of checking till num_pages.
>>
>> This will address both.
>>
>> As part of these changes, minor update in documentation.
>>
>> Fixes: 6db7199407ca ("drivers/virt: introduce Freescale hypervisor
>> management driver")
>> Signed-off-by: Souptick Joarder <[email protected]>
>> Reviewed-by: Dan Carpenter <[email protected]>
>> Reviewed-by: John Hubbard <[email protected]>
>
> Few questions ->
>
> First, there are minor updates from v2 -> v3 other than addressing your
> review comments in v2. Would you like to review it again?
I reviewed it again.
>
> Second, I think, as per Documentation/core-api/pin_user_pages.rst,
> this is case 5.
> Shall I make the conversion as part of this series ?
Not entirely sure what you mean, but if you just want to add words to the
effect that "this is case 5" to the commit description I certainly don't
see why not. (It would be ideal for a domain expert to weigh in on the
cases here, too.)
thanks,
--
John Hubbard
NVIDIA
Hi John,
On Tue, Sep 1, 2020 at 4:28 AM John Hubbard <[email protected]> wrote:
>
> On 8/31/20 3:07 PM, Souptick Joarder wrote:
> > First, when memory allocation for sg_list_unaligned failed, there
> > is a bug of calling put_pages() as we haven't pinned any pages.
>
> "we should unpin"
will it be "we shouldn't unpin" ? can you please clarify this ?
>
> ...
> >
> > @@ -250,7 +250,7 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
> > num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
> >
> > if (num_pinned != num_pages) {
> > - /* get_user_pages() failed */
> > + /* get_user_pages_fast() failed */
>
> Let's please just delete that particular comment entirely. It's of
> questionable accuracy (partial success is allowed with this API), and it
> is echoing the code too closely to be worth the line that it consumes.
>
> More importantly, though, we need to split up the cases of gup_fast
> returning a negative value, and a zero or positive value. Either here,
> or at "exit:", the negative return case should just skip any attempt to
> do any put_page() calls at all. Because it's a maintenance hazard to
> leave in a loop that depends on looping from zero, to -ERRNO, and *not*
> doing any loops--especially in the signed/unsigned soupy mess around gup
> calls.
>
>
> > pr_debug("fsl-hv: could not lock source buffer\n");
> > ret = (num_pinned < 0) ? num_pinned : -EFAULT;
> > goto exit;
> > @@ -293,12 +293,12 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
> >
> > exit:
> > if (pages) {
> > - for (i = 0; i < num_pages; i++)
> > - if (pages[i])
> > - put_page(pages[i]);
> > + for (i = 0; i < num_pinned; i++)
> > + put_page(pages[i]);
>
> Looks correct. I sometimes wonder why more callers don't use
> release_pages() in situations like this, but that's beyond the scope of
> your work here.
>
>
> thanks,
> --
> John Hubbard
> NVIDIA
On 9/1/20 1:14 PM, Souptick Joarder wrote:
> Hi John,
>
> On Tue, Sep 1, 2020 at 4:28 AM John Hubbard <[email protected]> wrote:
>>
>> On 8/31/20 3:07 PM, Souptick Joarder wrote:
>>> First, when memory allocation for sg_list_unaligned failed, there
>>> is a bug of calling put_pages() as we haven't pinned any pages.
>>
>> "we should unpin"
>
> will it be "we shouldn't unpin" ? can you please clarify this ?
Whoops, I put my comment next to the wrong part of your email. Sorry
about that! It was supposed to be a correction to this sentence:
"
Second, if get_user_pages_fast() failed we should unpinned num_pinned
pages instead of checking till num_pages.
"
...which I wanted to change to:
Second, if get_user_pages_fast() failed we should unpin num_pinned pages
instead of checking till num_pages.
thanks,
--
John Hubbard
NVIDIA