From: Longpeng <[email protected]>
There can be cases when there are more memory regions that need to be
set for an enclave than the maximum supported number of memory regions
per enclave. One example can be when the memory regions are backed by 2
MiB hugepages (the minimum supported hugepage size).
Let's merge the adjacent regions if they are physical contiguous. This
way the final number of memory regions is less than before merging and
could potentially avoid reaching maximum.
Signed-off-by: Longpeng <[email protected]>
---
Changes v2 -> v3:
- update the commit title and commit message. [Andra]
- use 'struct range' to instead of 'struct phys_mem_region'. [Andra, Greg KH]
- add comments before the function definition. [Andra]
- rename several variables, parameters and function. [Andra]
---
drivers/virt/nitro_enclaves/ne_misc_dev.c | 79 +++++++++++++++++++++----------
1 file changed, 55 insertions(+), 24 deletions(-)
diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
index e21e1e8..eea53e9 100644
--- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
+++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
@@ -26,6 +26,7 @@
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/range.h>
#include <uapi/linux/vm_sockets.h>
#include "ne_misc_dev.h"
@@ -126,6 +127,16 @@ struct ne_cpu_pool {
static struct ne_cpu_pool ne_cpu_pool;
/**
+ * struct phys_contig_mem_regions - Physical contiguous memory regions
+ * @num: The number of regions that currently has.
+ * @region: The array of physical memory regions.
+ */
+struct phys_contig_mem_regions {
+ unsigned long num;
+ struct range region[0];
+};
+
+/**
* ne_check_enclaves_created() - Verify if at least one enclave has been created.
* @void: No parameters provided.
*
@@ -825,6 +836,33 @@ static int ne_sanity_check_user_mem_region_page(struct ne_enclave *ne_enclave,
}
/**
+ * ne_merge_phys_contig_memory_regions() - Add a memory region and merge the adjacent
+ * regions if they are physical contiguous.
+ * @regions : Private data associated with the physical contiguous memory regions.
+ * @page_paddr: Physical start address of the region to be added.
+ * @page_size : Length of the region to be added.
+ *
+ * Return:
+ * * No return value.
+ */
+static void
+ne_merge_phys_contig_memory_regions(struct phys_contig_mem_regions *regions,
+ u64 page_paddr, u64 page_size)
+{
+ /* Physical contiguous, just merge */
+ if (regions->num &&
+ (regions->region[regions->num - 1].end + 1) == page_paddr) {
+ regions->region[regions->num - 1].end += page_size;
+
+ return;
+ }
+
+ regions->region[regions->num].start = page_paddr;
+ regions->region[regions->num].end = page_paddr + page_size - 1;
+ regions->num++;
+}
+
+/**
* ne_set_user_memory_region_ioctl() - Add user space memory region to the slot
* associated with the current enclave.
* @ne_enclave : Private data associated with the current enclave.
@@ -843,9 +881,9 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
unsigned long max_nr_pages = 0;
unsigned long memory_size = 0;
struct ne_mem_region *ne_mem_region = NULL;
- unsigned long nr_phys_contig_mem_regions = 0;
struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
- struct page **phys_contig_mem_regions = NULL;
+ struct phys_contig_mem_regions *phys_contig_mem_regions = NULL;
+ size_t size_to_alloc = 0;
int rc = -EINVAL;
rc = ne_sanity_check_user_mem_region(ne_enclave, mem_region);
@@ -866,8 +904,9 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
goto free_mem_region;
}
- phys_contig_mem_regions = kcalloc(max_nr_pages, sizeof(*phys_contig_mem_regions),
- GFP_KERNEL);
+ size_to_alloc = sizeof(*phys_contig_mem_regions) +
+ max_nr_pages * sizeof(struct range);
+ phys_contig_mem_regions = kzalloc(size_to_alloc, GFP_KERNEL);
if (!phys_contig_mem_regions) {
rc = -ENOMEM;
@@ -901,26 +940,16 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
if (rc < 0)
goto put_pages;
- /*
- * TODO: Update once handled non-contiguous memory regions
- * received from user space or contiguous physical memory regions
- * larger than 2 MiB e.g. 8 MiB.
- */
- phys_contig_mem_regions[i] = ne_mem_region->pages[i];
+ ne_merge_phys_contig_memory_regions(phys_contig_mem_regions,
+ page_to_phys(ne_mem_region->pages[i]),
+ page_size(ne_mem_region->pages[i]));
memory_size += page_size(ne_mem_region->pages[i]);
ne_mem_region->nr_pages++;
} while (memory_size < mem_region.memory_size);
- /*
- * TODO: Update once handled non-contiguous memory regions received
- * from user space or contiguous physical memory regions larger than
- * 2 MiB e.g. 8 MiB.
- */
- nr_phys_contig_mem_regions = ne_mem_region->nr_pages;
-
- if ((ne_enclave->nr_mem_regions + nr_phys_contig_mem_regions) >
+ if ((ne_enclave->nr_mem_regions + phys_contig_mem_regions->num) >
ne_enclave->max_mem_regions) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Reached max memory regions %lld\n",
@@ -931,9 +960,10 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
goto put_pages;
}
- for (i = 0; i < nr_phys_contig_mem_regions; i++) {
- u64 phys_region_addr = page_to_phys(phys_contig_mem_regions[i]);
- u64 phys_region_size = page_size(phys_contig_mem_regions[i]);
+ for (i = 0; i < phys_contig_mem_regions->num; i++) {
+ struct range *range = phys_contig_mem_regions->region + i;
+ u64 phys_region_addr = range->start;
+ u64 phys_region_size = range_len(range);
if (phys_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) {
dev_err_ratelimited(ne_misc_dev.this_device,
@@ -959,13 +989,14 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
list_add(&ne_mem_region->mem_region_list_entry, &ne_enclave->mem_regions_list);
- for (i = 0; i < nr_phys_contig_mem_regions; i++) {
+ for (i = 0; i < phys_contig_mem_regions->num; i++) {
struct ne_pci_dev_cmd_reply cmd_reply = {};
struct slot_add_mem_req slot_add_mem_req = {};
+ struct range *range = phys_contig_mem_regions->region + i;
slot_add_mem_req.slot_uid = ne_enclave->slot_uid;
- slot_add_mem_req.paddr = page_to_phys(phys_contig_mem_regions[i]);
- slot_add_mem_req.size = page_size(phys_contig_mem_regions[i]);
+ slot_add_mem_req.paddr = range->start;
+ slot_add_mem_req.size = range_len(range);
rc = ne_do_request(pdev, SLOT_ADD_MEM,
&slot_add_mem_req, sizeof(slot_add_mem_req),
--
1.8.3.1
On 09/10/2021 04:32, Longpeng(Mike) wrote:
>
> From: Longpeng <[email protected]>
>
> There can be cases when there are more memory regions that need to be
> set for an enclave than the maximum supported number of memory regions
> per enclave. One example can be when the memory regions are backed by 2
> MiB hugepages (the minimum supported hugepage size).
>
> Let's merge the adjacent regions if they are physical contiguous. This
physical contiguous => physically contiguous
> way the final number of memory regions is less than before merging and
> could potentially avoid reaching maximum.
>
> Signed-off-by: Longpeng <[email protected]>
> ---
> Changes v2 -> v3:
> - update the commit title and commit message. [Andra]
> - use 'struct range' to instead of 'struct phys_mem_region'. [Andra, Greg KH]
> - add comments before the function definition. [Andra]
> - rename several variables, parameters and function. [Andra]
> ---
> drivers/virt/nitro_enclaves/ne_misc_dev.c | 79 +++++++++++++++++++++----------
> 1 file changed, 55 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
> index e21e1e8..eea53e9 100644
> --- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
> +++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
> @@ -26,6 +26,7 @@
> #include <linux/poll.h>
> #include <linux/slab.h>
> #include <linux/types.h>
> +#include <linux/range.h>
Please keep the alphabetical order and move this before "#include
<linux/slab.h>."
> #include <uapi/linux/vm_sockets.h>
>
> #include "ne_misc_dev.h"
> @@ -126,6 +127,16 @@ struct ne_cpu_pool {
> static struct ne_cpu_pool ne_cpu_pool;
>
> /**
> + * struct phys_contig_mem_regions - Physical contiguous memory regions
Physical contiguous memory regions => Contiguous physical memory regions
> + * @num: The number of regions that currently has.
> + * @region: The array of physical memory regions.
> + */
> +struct phys_contig_mem_regions {
> + unsigned long num;
> + struct range region[0];
Should use "struct range *regions" instead, to keep a similar style with
regard to arrays throughout the code.
Please align the fields name (e.g. [1]) so that it's easier to
distinguish between fields type and name.
Let's also prefix with "ne" the data structure naming e.g.
ne_phys_contig_mem_regions. So that is similar to other data structures
defined in the NE kernel driver codebase.
[1]
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/virt/nitro_enclaves/ne_misc_dev.c#n118
> +};
> +
> +/**
> * ne_check_enclaves_created() - Verify if at least one enclave has been created.
> * @void: No parameters provided.
> *
> @@ -825,6 +836,33 @@ static int ne_sanity_check_user_mem_region_page(struct ne_enclave *ne_enclave,
> }
>
> /**
> + * ne_merge_phys_contig_memory_regions() - Add a memory region and merge the adjacent
> + * regions if they are physical contiguous.
physical contiguous => physically contiguous
> + * @regions : Private data associated with the physical contiguous memory regions.
physical contiguous memory regions => contiguous physical memory regions
@regions => @phys_contig_regions
> + * @page_paddr: Physical start address of the region to be added.
> + * @page_size : Length of the region to be added.
> + *
> + * Return:
> + * * No return value.
Please add "Context: Process context. This function is called with the
ne_enclave mutex held." before "Return", similar to other defined functions.
Can remove these lines, as for now there is no return value:
* Return:
* * No return value.
And then can add this in the second patch in the series:
* Context: Process context. This function is called with the ne_enclave
mutex held. (note: already available in the first patch)
* Return: (note: added in the second patch)
* * 0 ...
> + */
> +static void
> +ne_merge_phys_contig_memory_regions(struct phys_contig_mem_regions *regions,
> + u64 page_paddr, u64 page_size)
phys_contig_mem_regions => ne_phys_contig_mem_regions
*regions => *phys_contig_regions
Can define something like this:
unsigned long num = phys_contig_regions->num;
and then use it inside the function, except the last line that
increments the num.
> +{
> + /* Physical contiguous, just merge */
Physical contiguous => Physically contiguous
> + if (regions->num &&
> + (regions->region[regions->num - 1].end + 1) == page_paddr) {
e.g. phys_contig_regions->regions[num - 1]
> + regions->region[regions->num - 1].end += page_size;
> +
> + return;
> + }
> +
> + regions->region[regions->num].start = page_paddr;
> + regions->region[regions->num].end = page_paddr + page_size - 1;
> + regions->num++;
> +}
> +
> +/**
> * ne_set_user_memory_region_ioctl() - Add user space memory region to the slot
> * associated with the current enclave.
> * @ne_enclave : Private data associated with the current enclave.
> @@ -843,9 +881,9 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
> unsigned long max_nr_pages = 0;
> unsigned long memory_size = 0;
> struct ne_mem_region *ne_mem_region = NULL;
> - unsigned long nr_phys_contig_mem_regions = 0;
> struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
> - struct page **phys_contig_mem_regions = NULL;
> + struct phys_contig_mem_regions *phys_contig_mem_regions = NULL;
struct phys_contig_mem_regions *phys_contig_mem_regions = NULL; =>
struct ne_phys_contig_mem_regions phys_contig_mem_regions = {};
> + size_t size_to_alloc = 0;
> int rc = -EINVAL;
>
> rc = ne_sanity_check_user_mem_region(ne_enclave, mem_region);
> @@ -866,8 +904,9 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
> goto free_mem_region;
> }
>
> - phys_contig_mem_regions = kcalloc(max_nr_pages, sizeof(*phys_contig_mem_regions),
> - GFP_KERNEL);
> + size_to_alloc = sizeof(*phys_contig_mem_regions) +
> + max_nr_pages * sizeof(struct range);
> + phys_contig_mem_regions = kzalloc(size_to_alloc, GFP_KERNEL);
Then can alloc memory for the regions field.
phys_contig_mem_regions.regions = kcalloc(max_nr_pages,
sizeof(*phys_contig_mem_regions.regions), GFP_KERNEL);
The "size_of_alloc" will not be needed in this case.
> if (!phys_contig_mem_regions) {
> rc = -ENOMEM;
>
> @@ -901,26 +940,16 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
> if (rc < 0)
> goto put_pages;
>
> - /*
> - * TODO: Update once handled non-contiguous memory regions
> - * received from user space or contiguous physical memory regions
> - * larger than 2 MiB e.g. 8 MiB.
> - */
> - phys_contig_mem_regions[i] = ne_mem_region->pages[i];
> + ne_merge_phys_contig_memory_regions(phys_contig_mem_regions,
And can pass it here like this: &phys_contig_mem_regions.
> + page_to_phys(ne_mem_region->pages[i]),
> + page_size(ne_mem_region->pages[i]));
>
> memory_size += page_size(ne_mem_region->pages[i]);
>
> ne_mem_region->nr_pages++;
> } while (memory_size < mem_region.memory_size);
>
> - /*
> - * TODO: Update once handled non-contiguous memory regions received
> - * from user space or contiguous physical memory regions larger than
> - * 2 MiB e.g. 8 MiB.
> - */
> - nr_phys_contig_mem_regions = ne_mem_region->nr_pages;
> -
> - if ((ne_enclave->nr_mem_regions + nr_phys_contig_mem_regions) >
> + if ((ne_enclave->nr_mem_regions + phys_contig_mem_regions->num) >
> ne_enclave->max_mem_regions) {
> dev_err_ratelimited(ne_misc_dev.this_device,
> "Reached max memory regions %lld\n",
> @@ -931,9 +960,10 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
> goto put_pages;
> }
>
> - for (i = 0; i < nr_phys_contig_mem_regions; i++) {
> - u64 phys_region_addr = page_to_phys(phys_contig_mem_regions[i]);
> - u64 phys_region_size = page_size(phys_contig_mem_regions[i]);
> + for (i = 0; i < phys_contig_mem_regions->num; i++) {
> + struct range *range = phys_contig_mem_regions->region + i;
> + u64 phys_region_addr = range->start;
> + u64 phys_region_size = range_len(range);
With the updated data structure field, could be:
u64 phys_region_addr = phys_contig_mem_regions.regions[i].start;
u64 phys_region_size = range_len(&phys_contig_mem_regions.regions[i]);
>
> if (phys_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) {
> dev_err_ratelimited(ne_misc_dev.this_device,
> @@ -959,13 +989,14 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
>
> list_add(&ne_mem_region->mem_region_list_entry, &ne_enclave->mem_regions_list);
>
> - for (i = 0; i < nr_phys_contig_mem_regions; i++) {
> + for (i = 0; i < phys_contig_mem_regions->num; i++) {
> struct ne_pci_dev_cmd_reply cmd_reply = {};
> struct slot_add_mem_req slot_add_mem_req = {};
> + struct range *range = phys_contig_mem_regions->region + i;
>
> slot_add_mem_req.slot_uid = ne_enclave->slot_uid;
> - slot_add_mem_req.paddr = page_to_phys(phys_contig_mem_regions[i]);
> - slot_add_mem_req.size = page_size(phys_contig_mem_regions[i]);
> + slot_add_mem_req.paddr = range->start;
> + slot_add_mem_req.size = range_len(range);
Similarly here:
slot_add_mem_req.paddr = phys_contig_mem_regions.regions[i].start;
slot_add_mem_req.size = range_len(&phys_contig_mem_regions.regions[i]);
Then remain the kfree paths for "phys_contig_mem_regions.regions"
instead of "phys_contig_mem_regions".
Thanks,
Andra
>
> rc = ne_do_request(pdev, SLOT_ADD_MEM,
> &slot_add_mem_req, sizeof(slot_add_mem_req),
> --
> 1.8.3.1
>
Amazon Development Center (Romania) S.R.L. registered office: 27A Sf. Lazar Street, UBC5, floor 2, Iasi, Iasi County, 700045, Romania. Registered in Romania. Registration number J22/2621/2005.
Hi Andra,
Sorry for the long delay.
I've read all your suggestions in each patch, there's no objection. I'll
send v4 later, please review when you free, thanks.
> -----Original Message-----
> From: Paraschiv, Andra-Irina [mailto:[email protected]]
> Sent: Friday, October 15, 2021 9:34 PM
> To: Longpeng (Mike, Cloud Infrastructure Service Product Dept.)
> <[email protected]>
> Cc: Gonglei (Arei) <[email protected]>; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; [email protected];
> [email protected]; [email protected]; [email protected]
> Subject: Re: [PATCH v3 1/4] nitro_enclaves: Merge contiguous physical memory
> regions
>
>
>
> On 09/10/2021 04:32, Longpeng(Mike) wrote:
> >
> > From: Longpeng <[email protected]>
> >
> > There can be cases when there are more memory regions that need to be
> > set for an enclave than the maximum supported number of memory regions
> > per enclave. One example can be when the memory regions are backed by 2
> > MiB hugepages (the minimum supported hugepage size).
> >
> > Let's merge the adjacent regions if they are physical contiguous. This
>
> physical contiguous => physically contiguous
>
> > way the final number of memory regions is less than before merging and
> > could potentially avoid reaching maximum.
> >
> > Signed-off-by: Longpeng <[email protected]>
> > ---
> > Changes v2 -> v3:
> > - update the commit title and commit message. [Andra]
> > - use 'struct range' to instead of 'struct phys_mem_region'. [Andra, Greg
> KH]
> > - add comments before the function definition. [Andra]
> > - rename several variables, parameters and function. [Andra]
> > ---
> > drivers/virt/nitro_enclaves/ne_misc_dev.c | 79
> +++++++++++++++++++++----------
> > 1 file changed, 55 insertions(+), 24 deletions(-)
> >
> > diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c
> b/drivers/virt/nitro_enclaves/ne_misc_dev.c
> > index e21e1e8..eea53e9 100644
> > --- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
> > +++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
> > @@ -26,6 +26,7 @@
> > #include <linux/poll.h>
> > #include <linux/slab.h>
> > #include <linux/types.h>
> > +#include <linux/range.h>
>
> Please keep the alphabetical order and move this before "#include
> <linux/slab.h>."
>
> > #include <uapi/linux/vm_sockets.h>
> >
> > #include "ne_misc_dev.h"
> > @@ -126,6 +127,16 @@ struct ne_cpu_pool {
> > static struct ne_cpu_pool ne_cpu_pool;
> >
> > /**
> > + * struct phys_contig_mem_regions - Physical contiguous memory regions
>
> Physical contiguous memory regions => Contiguous physical memory regions
>
> > + * @num: The number of regions that currently has.
> > + * @region: The array of physical memory regions.
> > + */
> > +struct phys_contig_mem_regions {
> > + unsigned long num;
> > + struct range region[0];
>
> Should use "struct range *regions" instead, to keep a similar style with
> regard to arrays throughout the code.
>
> Please align the fields name (e.g. [1]) so that it's easier to
> distinguish between fields type and name.
>
> Let's also prefix with "ne" the data structure naming e.g.
> ne_phys_contig_mem_regions. So that is similar to other data structures
> defined in the NE kernel driver codebase.
>
> [1]
> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/dri
> vers/virt/nitro_enclaves/ne_misc_dev.c#n118
>
> > +};
> > +
> > +/**
> > * ne_check_enclaves_created() - Verify if at least one enclave has been
> created.
> > * @void: No parameters provided.
> > *
> > @@ -825,6 +836,33 @@ static int ne_sanity_check_user_mem_region_page(struct
> ne_enclave *ne_enclave,
> > }
> >
> > /**
> > + * ne_merge_phys_contig_memory_regions() - Add a memory region and merge the
> adjacent
> > + * regions if they are physical
> contiguous.
>
> physical contiguous => physically contiguous
>
> > + * @regions : Private data associated with the physical contiguous memory
> regions.
>
> physical contiguous memory regions => contiguous physical memory regions
>
> @regions => @phys_contig_regions
>
> > + * @page_paddr: Physical start address of the region to be added.
> > + * @page_size : Length of the region to be added.
> > + *
> > + * Return:
> > + * * No return value.
>
> Please add "Context: Process context. This function is called with the
> ne_enclave mutex held." before "Return", similar to other defined functions.
>
> Can remove these lines, as for now there is no return value:
>
> * Return:
> * * No return value.
>
> And then can add this in the second patch in the series:
>
> * Context: Process context. This function is called with the ne_enclave
> mutex held. (note: already available in the first patch)
> * Return: (note: added in the second patch)
> * * 0 ...
>
> > + */
> > +static void
> > +ne_merge_phys_contig_memory_regions(struct phys_contig_mem_regions
> *regions,
> > + u64 page_paddr, u64 page_size)
>
> phys_contig_mem_regions => ne_phys_contig_mem_regions
> *regions => *phys_contig_regions
>
> Can define something like this:
>
> unsigned long num = phys_contig_regions->num;
>
> and then use it inside the function, except the last line that
> increments the num.
>
> > +{
> > + /* Physical contiguous, just merge */
>
> Physical contiguous => Physically contiguous
>
> > + if (regions->num &&
> > + (regions->region[regions->num - 1].end + 1) == page_paddr) {
>
> e.g. phys_contig_regions->regions[num - 1]
>
> > + regions->region[regions->num - 1].end += page_size;
> > +
> > + return;
> > + }
> > +
> > + regions->region[regions->num].start = page_paddr;
> > + regions->region[regions->num].end = page_paddr + page_size - 1;
> > + regions->num++;
> > +}
> > +
> > +/**
> > * ne_set_user_memory_region_ioctl() - Add user space memory region to the
> slot
> > * associated with the current enclave.
> > * @ne_enclave : Private data associated with the current enclave.
> > @@ -843,9 +881,9 @@ static int ne_set_user_memory_region_ioctl(struct
> ne_enclave *ne_enclave,
> > unsigned long max_nr_pages = 0;
> > unsigned long memory_size = 0;
> > struct ne_mem_region *ne_mem_region = NULL;
> > - unsigned long nr_phys_contig_mem_regions = 0;
> > struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
> > - struct page **phys_contig_mem_regions = NULL;
> > + struct phys_contig_mem_regions *phys_contig_mem_regions = NULL;
>
> struct phys_contig_mem_regions *phys_contig_mem_regions = NULL; =>
> struct ne_phys_contig_mem_regions phys_contig_mem_regions = {};
>
> > + size_t size_to_alloc = 0;
> > int rc = -EINVAL;
> >
> > rc = ne_sanity_check_user_mem_region(ne_enclave, mem_region);
> > @@ -866,8 +904,9 @@ static int ne_set_user_memory_region_ioctl(struct
> ne_enclave *ne_enclave,
> > goto free_mem_region;
> > }
> >
> > - phys_contig_mem_regions = kcalloc(max_nr_pages,
> sizeof(*phys_contig_mem_regions),
> > - GFP_KERNEL);
> > + size_to_alloc = sizeof(*phys_contig_mem_regions) +
> > + max_nr_pages * sizeof(struct range);
> > + phys_contig_mem_regions = kzalloc(size_to_alloc, GFP_KERNEL);
>
> Then can alloc memory for the regions field.
>
> phys_contig_mem_regions.regions = kcalloc(max_nr_pages,
> sizeof(*phys_contig_mem_regions.regions), GFP_KERNEL);
>
> The "size_of_alloc" will not be needed in this case.
>
> > if (!phys_contig_mem_regions) {
> > rc = -ENOMEM;
> >
> > @@ -901,26 +940,16 @@ static int ne_set_user_memory_region_ioctl(struct
> ne_enclave *ne_enclave,
> > if (rc < 0)
> > goto put_pages;
> >
> > - /*
> > - * TODO: Update once handled non-contiguous memory regions
> > - * received from user space or contiguous physical memory regions
> > - * larger than 2 MiB e.g. 8 MiB.
> > - */
> > - phys_contig_mem_regions[i] = ne_mem_region->pages[i];
> > +
> ne_merge_phys_contig_memory_regions(phys_contig_mem_regions,
>
> And can pass it here like this: &phys_contig_mem_regions.
>
> > +
> page_to_phys(ne_mem_region->pages[i]),
> > +
> page_size(ne_mem_region->pages[i]));
> >
> > memory_size += page_size(ne_mem_region->pages[i]);
> >
> > ne_mem_region->nr_pages++;
> > } while (memory_size < mem_region.memory_size);
> >
> > - /*
> > - * TODO: Update once handled non-contiguous memory regions received
> > - * from user space or contiguous physical memory regions larger than
> > - * 2 MiB e.g. 8 MiB.
> > - */
> > - nr_phys_contig_mem_regions = ne_mem_region->nr_pages;
> > -
> > - if ((ne_enclave->nr_mem_regions + nr_phys_contig_mem_regions) >
> > + if ((ne_enclave->nr_mem_regions + phys_contig_mem_regions->num) >
> > ne_enclave->max_mem_regions) {
> > dev_err_ratelimited(ne_misc_dev.this_device,
> > "Reached max memory regions %lld\n",
> > @@ -931,9 +960,10 @@ static int ne_set_user_memory_region_ioctl(struct
> ne_enclave *ne_enclave,
> > goto put_pages;
> > }
> >
> > - for (i = 0; i < nr_phys_contig_mem_regions; i++) {
> > - u64 phys_region_addr =
> page_to_phys(phys_contig_mem_regions[i]);
> > - u64 phys_region_size = page_size(phys_contig_mem_regions[i]);
> > + for (i = 0; i < phys_contig_mem_regions->num; i++) {
> > + struct range *range = phys_contig_mem_regions->region + i;
> > + u64 phys_region_addr = range->start;
> > + u64 phys_region_size = range_len(range);
>
> With the updated data structure field, could be:
>
> u64 phys_region_addr = phys_contig_mem_regions.regions[i].start;
> u64 phys_region_size = range_len(&phys_contig_mem_regions.regions[i]);
>
> >
> > if (phys_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) {
> > dev_err_ratelimited(ne_misc_dev.this_device,
> > @@ -959,13 +989,14 @@ static int ne_set_user_memory_region_ioctl(struct
> ne_enclave *ne_enclave,
> >
> > list_add(&ne_mem_region->mem_region_list_entry,
> &ne_enclave->mem_regions_list);
> >
> > - for (i = 0; i < nr_phys_contig_mem_regions; i++) {
> > + for (i = 0; i < phys_contig_mem_regions->num; i++) {
> > struct ne_pci_dev_cmd_reply cmd_reply = {};
> > struct slot_add_mem_req slot_add_mem_req = {};
> > + struct range *range = phys_contig_mem_regions->region + i;
> >
> > slot_add_mem_req.slot_uid = ne_enclave->slot_uid;
> > - slot_add_mem_req.paddr =
> page_to_phys(phys_contig_mem_regions[i]);
> > - slot_add_mem_req.size =
> page_size(phys_contig_mem_regions[i]);
> > + slot_add_mem_req.paddr = range->start;
> > + slot_add_mem_req.size = range_len(range);
>
> Similarly here:
>
> slot_add_mem_req.paddr = phys_contig_mem_regions.regions[i].start;
> slot_add_mem_req.size = range_len(&phys_contig_mem_regions.regions[i]);
>
> Then remain the kfree paths for "phys_contig_mem_regions.regions"
> instead of "phys_contig_mem_regions".
>
> Thanks,
> Andra
>
> >
> > rc = ne_do_request(pdev, SLOT_ADD_MEM,
> > &slot_add_mem_req,
> sizeof(slot_add_mem_req),
> > --
> > 1.8.3.1
> >
>
>
>
> Amazon Development Center (Romania) S.R.L. registered office: 27A Sf. Lazar
> Street, UBC5, floor 2, Iasi, Iasi County, 700045, Romania. Registered in Romania.
> Registration number J22/2621/2005.
On 03/11/2021 15:54, Longpeng (Mike, Cloud Infrastructure Service
Product Dept.) wrote:
>
> Hi Andra,
>
> Sorry for the long delay.
>
> I've read all your suggestions in each patch, there's no objection. I'll
> send v4 later, please review when you free, thanks.
Thank you, no problem. I'll review the patch series till the end of this
week.
Andra
>
>> -----Original Message-----
>> From: Paraschiv, Andra-Irina [mailto:[email protected]]
>> Sent: Friday, October 15, 2021 9:34 PM
>> To: Longpeng (Mike, Cloud Infrastructure Service Product Dept.)
>> <[email protected]>
>> Cc: Gonglei (Arei) <[email protected]>; [email protected];
>> [email protected]; [email protected]; [email protected];
>> [email protected]; [email protected]; [email protected];
>> [email protected]; [email protected]; [email protected]
>> Subject: Re: [PATCH v3 1/4] nitro_enclaves: Merge contiguous physical memory
>> regions
>>
>>
>>
>> On 09/10/2021 04:32, Longpeng(Mike) wrote:
>>>
>>> From: Longpeng <[email protected]>
>>>
>>> There can be cases when there are more memory regions that need to be
>>> set for an enclave than the maximum supported number of memory regions
>>> per enclave. One example can be when the memory regions are backed by 2
>>> MiB hugepages (the minimum supported hugepage size).
>>>
>>> Let's merge the adjacent regions if they are physical contiguous. This
>>
>> physical contiguous => physically contiguous
>>
>>> way the final number of memory regions is less than before merging and
>>> could potentially avoid reaching maximum.
>>>
>>> Signed-off-by: Longpeng <[email protected]>
>>> ---
>>> Changes v2 -> v3:
>>> - update the commit title and commit message. [Andra]
>>> - use 'struct range' to instead of 'struct phys_mem_region'. [Andra, Greg
>> KH]
>>> - add comments before the function definition. [Andra]
>>> - rename several variables, parameters and function. [Andra]
>>> ---
>>> drivers/virt/nitro_enclaves/ne_misc_dev.c | 79
>> +++++++++++++++++++++----------
>>> 1 file changed, 55 insertions(+), 24 deletions(-)
>>>
>>> diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c
>> b/drivers/virt/nitro_enclaves/ne_misc_dev.c
>>> index e21e1e8..eea53e9 100644
>>> --- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
>>> +++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
>>> @@ -26,6 +26,7 @@
>>> #include <linux/poll.h>
>>> #include <linux/slab.h>
>>> #include <linux/types.h>
>>> +#include <linux/range.h>
>>
>> Please keep the alphabetical order and move this before "#include
>> <linux/slab.h>."
>>
>>> #include <uapi/linux/vm_sockets.h>
>>>
>>> #include "ne_misc_dev.h"
>>> @@ -126,6 +127,16 @@ struct ne_cpu_pool {
>>> static struct ne_cpu_pool ne_cpu_pool;
>>>
>>> /**
>>> + * struct phys_contig_mem_regions - Physical contiguous memory regions
>>
>> Physical contiguous memory regions => Contiguous physical memory regions
>>
>>> + * @num: The number of regions that currently has.
>>> + * @region: The array of physical memory regions.
>>> + */
>>> +struct phys_contig_mem_regions {
>>> + unsigned long num;
>>> + struct range region[0];
>>
>> Should use "struct range *regions" instead, to keep a similar style with
>> regard to arrays throughout the code.
>>
>> Please align the fields name (e.g. [1]) so that it's easier to
>> distinguish between fields type and name.
>>
>> Let's also prefix with "ne" the data structure naming e.g.
>> ne_phys_contig_mem_regions. So that is similar to other data structures
>> defined in the NE kernel driver codebase.
>>
>> [1]
>> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/dri
>> vers/virt/nitro_enclaves/ne_misc_dev.c#n118
>>
>>> +};
>>> +
>>> +/**
>>> * ne_check_enclaves_created() - Verify if at least one enclave has been
>> created.
>>> * @void: No parameters provided.
>>> *
>>> @@ -825,6 +836,33 @@ static int ne_sanity_check_user_mem_region_page(struct
>> ne_enclave *ne_enclave,
>>> }
>>>
>>> /**
>>> + * ne_merge_phys_contig_memory_regions() - Add a memory region and merge the
>> adjacent
>>> + * regions if they are physical
>> contiguous.
>>
>> physical contiguous => physically contiguous
>>
>>> + * @regions : Private data associated with the physical contiguous memory
>> regions.
>>
>> physical contiguous memory regions => contiguous physical memory regions
>>
>> @regions => @phys_contig_regions
>>
>>> + * @page_paddr: Physical start address of the region to be added.
>>> + * @page_size : Length of the region to be added.
>>> + *
>>> + * Return:
>>> + * * No return value.
>>
>> Please add "Context: Process context. This function is called with the
>> ne_enclave mutex held." before "Return", similar to other defined functions.
>>
>> Can remove these lines, as for now there is no return value:
>>
>> * Return:
>> * * No return value.
>>
>> And then can add this in the second patch in the series:
>>
>> * Context: Process context. This function is called with the ne_enclave
>> mutex held. (note: already available in the first patch)
>> * Return: (note: added in the second patch)
>> * * 0 ...
>>
>>> + */
>>> +static void
>>> +ne_merge_phys_contig_memory_regions(struct phys_contig_mem_regions
>> *regions,
>>> + u64 page_paddr, u64 page_size)
>>
>> phys_contig_mem_regions => ne_phys_contig_mem_regions
>> *regions => *phys_contig_regions
>>
>> Can define something like this:
>>
>> unsigned long num = phys_contig_regions->num;
>>
>> and then use it inside the function, except the last line that
>> increments the num.
>>
>>> +{
>>> + /* Physical contiguous, just merge */
>>
>> Physical contiguous => Physically contiguous
>>
>>> + if (regions->num &&
>>> + (regions->region[regions->num - 1].end + 1) == page_paddr) {
>>
>> e.g. phys_contig_regions->regions[num - 1]
>>
>>> + regions->region[regions->num - 1].end += page_size;
>>> +
>>> + return;
>>> + }
>>> +
>>> + regions->region[regions->num].start = page_paddr;
>>> + regions->region[regions->num].end = page_paddr + page_size - 1;
>>> + regions->num++;
>>> +}
>>> +
>>> +/**
>>> * ne_set_user_memory_region_ioctl() - Add user space memory region to the
>> slot
>>> * associated with the current enclave.
>>> * @ne_enclave : Private data associated with the current enclave.
>>> @@ -843,9 +881,9 @@ static int ne_set_user_memory_region_ioctl(struct
>> ne_enclave *ne_enclave,
>>> unsigned long max_nr_pages = 0;
>>> unsigned long memory_size = 0;
>>> struct ne_mem_region *ne_mem_region = NULL;
>>> - unsigned long nr_phys_contig_mem_regions = 0;
>>> struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
>>> - struct page **phys_contig_mem_regions = NULL;
>>> + struct phys_contig_mem_regions *phys_contig_mem_regions = NULL;
>>
>> struct phys_contig_mem_regions *phys_contig_mem_regions = NULL; =>
>> struct ne_phys_contig_mem_regions phys_contig_mem_regions = {};
>>
>>> + size_t size_to_alloc = 0;
>>> int rc = -EINVAL;
>>>
>>> rc = ne_sanity_check_user_mem_region(ne_enclave, mem_region);
>>> @@ -866,8 +904,9 @@ static int ne_set_user_memory_region_ioctl(struct
>> ne_enclave *ne_enclave,
>>> goto free_mem_region;
>>> }
>>>
>>> - phys_contig_mem_regions = kcalloc(max_nr_pages,
>> sizeof(*phys_contig_mem_regions),
>>> - GFP_KERNEL);
>>> + size_to_alloc = sizeof(*phys_contig_mem_regions) +
>>> + max_nr_pages * sizeof(struct range);
>>> + phys_contig_mem_regions = kzalloc(size_to_alloc, GFP_KERNEL);
>>
>> Then can alloc memory for the regions field.
>>
>> phys_contig_mem_regions.regions = kcalloc(max_nr_pages,
>> sizeof(*phys_contig_mem_regions.regions), GFP_KERNEL);
>>
>> The "size_of_alloc" will not be needed in this case.
>>
>>> if (!phys_contig_mem_regions) {
>>> rc = -ENOMEM;
>>>
>>> @@ -901,26 +940,16 @@ static int ne_set_user_memory_region_ioctl(struct
>> ne_enclave *ne_enclave,
>>> if (rc < 0)
>>> goto put_pages;
>>>
>>> - /*
>>> - * TODO: Update once handled non-contiguous memory regions
>>> - * received from user space or contiguous physical memory regions
>>> - * larger than 2 MiB e.g. 8 MiB.
>>> - */
>>> - phys_contig_mem_regions[i] = ne_mem_region->pages[i];
>>> +
>> ne_merge_phys_contig_memory_regions(phys_contig_mem_regions,
>>
>> And can pass it here like this: &phys_contig_mem_regions.
>>
>>> +
>> page_to_phys(ne_mem_region->pages[i]),
>>> +
>> page_size(ne_mem_region->pages[i]));
>>>
>>> memory_size += page_size(ne_mem_region->pages[i]);
>>>
>>> ne_mem_region->nr_pages++;
>>> } while (memory_size < mem_region.memory_size);
>>>
>>> - /*
>>> - * TODO: Update once handled non-contiguous memory regions received
>>> - * from user space or contiguous physical memory regions larger than
>>> - * 2 MiB e.g. 8 MiB.
>>> - */
>>> - nr_phys_contig_mem_regions = ne_mem_region->nr_pages;
>>> -
>>> - if ((ne_enclave->nr_mem_regions + nr_phys_contig_mem_regions) >
>>> + if ((ne_enclave->nr_mem_regions + phys_contig_mem_regions->num) >
>>> ne_enclave->max_mem_regions) {
>>> dev_err_ratelimited(ne_misc_dev.this_device,
>>> "Reached max memory regions %lld\n",
>>> @@ -931,9 +960,10 @@ static int ne_set_user_memory_region_ioctl(struct
>> ne_enclave *ne_enclave,
>>> goto put_pages;
>>> }
>>>
>>> - for (i = 0; i < nr_phys_contig_mem_regions; i++) {
>>> - u64 phys_region_addr =
>> page_to_phys(phys_contig_mem_regions[i]);
>>> - u64 phys_region_size = page_size(phys_contig_mem_regions[i]);
>>> + for (i = 0; i < phys_contig_mem_regions->num; i++) {
>>> + struct range *range = phys_contig_mem_regions->region + i;
>>> + u64 phys_region_addr = range->start;
>>> + u64 phys_region_size = range_len(range);
>>
>> With the updated data structure field, could be:
>>
>> u64 phys_region_addr = phys_contig_mem_regions.regions[i].start;
>> u64 phys_region_size = range_len(&phys_contig_mem_regions.regions[i]);
>>
>>>
>>> if (phys_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) {
>>> dev_err_ratelimited(ne_misc_dev.this_device,
>>> @@ -959,13 +989,14 @@ static int ne_set_user_memory_region_ioctl(struct
>> ne_enclave *ne_enclave,
>>>
>>> list_add(&ne_mem_region->mem_region_list_entry,
>> &ne_enclave->mem_regions_list);
>>>
>>> - for (i = 0; i < nr_phys_contig_mem_regions; i++) {
>>> + for (i = 0; i < phys_contig_mem_regions->num; i++) {
>>> struct ne_pci_dev_cmd_reply cmd_reply = {};
>>> struct slot_add_mem_req slot_add_mem_req = {};
>>> + struct range *range = phys_contig_mem_regions->region + i;
>>>
>>> slot_add_mem_req.slot_uid = ne_enclave->slot_uid;
>>> - slot_add_mem_req.paddr =
>> page_to_phys(phys_contig_mem_regions[i]);
>>> - slot_add_mem_req.size =
>> page_size(phys_contig_mem_regions[i]);
>>> + slot_add_mem_req.paddr = range->start;
>>> + slot_add_mem_req.size = range_len(range);
>>
>> Similarly here:
>>
>> slot_add_mem_req.paddr = phys_contig_mem_regions.regions[i].start;
>> slot_add_mem_req.size = range_len(&phys_contig_mem_regions.regions[i]);
>>
>> Then remain the kfree paths for "phys_contig_mem_regions.regions"
>> instead of "phys_contig_mem_regions".
>>
>> Thanks,
>> Andra
>>
>>>
>>> rc = ne_do_request(pdev, SLOT_ADD_MEM,
>>> &slot_add_mem_req,
>> sizeof(slot_add_mem_req),
>>> --
>>> 1.8.3.1
>>>
>>
>>
>>
>> Amazon Development Center (Romania) S.R.L. registered office: 27A Sf. Lazar
>> Street, UBC5, floor 2, Iasi, Iasi County, 700045, Romania. Registered in Romania.
>> Registration number J22/2621/2005.
Amazon Development Center (Romania) S.R.L. registered office: 27A Sf. Lazar Street, UBC5, floor 2, Iasi, Iasi County, 700045, Romania. Registered in Romania. Registration number J22/2621/2005.