Short-circuit the whole function on 32-bit.
Replace the loop to determine the number of 1Gb pages with arithmetic.
Fix one minor bug: if the end of the region is aligned on a 1Gb
boundary, the current code will not use the last available 1Gb page due
to an off-by-one error.
Signed-off-by: Arvind Sankar <[email protected]>
---
arch/x86/boot/compressed/kaslr.c | 48 ++++++++++++++------------------
1 file changed, 21 insertions(+), 27 deletions(-)
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 850e131121f7..29ec964b1330 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -546,49 +546,43 @@ static void store_slot_info(struct mem_vector *region, unsigned long image_size)
static void
process_gb_huge_pages(struct mem_vector *region, unsigned long image_size)
{
- unsigned long addr, size = 0;
+ unsigned long pud_start, pud_end, gb_huge_pages;
struct mem_vector tmp;
- int i = 0;
- if (!max_gb_huge_pages) {
+ if (IS_ENABLED(CONFIG_X86_32) || !max_gb_huge_pages) {
store_slot_info(region, image_size);
return;
}
- addr = ALIGN(region->start, PUD_SIZE);
- /* Did we raise the address above the passed in memory entry? */
- if (addr < region->start + region->size)
- size = region->size - (addr - region->start);
-
- /* Check how many 1GB huge pages can be filtered out: */
- while (size > PUD_SIZE && max_gb_huge_pages) {
- size -= PUD_SIZE;
- max_gb_huge_pages--;
- i++;
- }
+ /* Are there any 1GB pages in the region? */
+ pud_start = ALIGN(region->start, PUD_SIZE);
+ pud_end = ALIGN_DOWN(region->start + region->size, PUD_SIZE);
/* No good 1GB huge pages found: */
- if (!i) {
+ if (pud_start >= pud_end) {
store_slot_info(region, image_size);
return;
}
- /*
- * Skip those 'i'*1GB good huge pages, and continue checking and
- * processing the remaining head or tail part of the passed region
- * if available.
- */
-
- if (addr >= region->start + image_size) {
+ /* Check if the head part of the region is usable. */
+ if (pud_start - region->start >= image_size) {
tmp.start = region->start;
- tmp.size = addr - region->start;
+ tmp.size = pud_start - region->start;
store_slot_info(&tmp, image_size);
}
- size = region->size - (addr - region->start) - i * PUD_SIZE;
- if (size >= image_size) {
- tmp.start = addr + i * PUD_SIZE;
- tmp.size = size;
+ /* Skip the good 1GB pages. */
+ gb_huge_pages = (pud_end - pud_start) >> PUD_SHIFT;
+ if (gb_huge_pages > max_gb_huge_pages) {
+ pud_end = pud_start + (max_gb_huge_pages << PUD_SHIFT);
+ max_gb_huge_pages = 0;
+ } else
+ max_gb_huge_pages -= gb_huge_pages;
+
+ /* Check if the tail part of the region is usable. */
+ if (region->start + region->size - pud_end >= image_size) {
+ tmp.start = pud_end;
+ tmp.size = region->start + region->size - pud_end;
store_slot_info(&tmp, image_size);
}
}
--
2.26.2
* Arvind Sankar <[email protected]> wrote:
> Short-circuit the whole function on 32-bit.
>
> Replace the loop to determine the number of 1Gb pages with arithmetic.
>
> Fix one minor bug: if the end of the region is aligned on a 1Gb
> boundary, the current code will not use the last available 1Gb page due
> to an off-by-one error.
Please split this up into two patches: first the minor bugfix, and
then the cleanup which will then be an invariant patch, in principle.
Thanks,
Ingo