Get rid of unnecessary temporary variables and redundant tests in
__process_mem_region.
Fix one minor bug: in case of an overlap, the beginning of the region
should be used even if it is exactly image_size, not just strictly
larger.
Change type of minimum/image_size arguments in process_mem_region to
unsigned long. These actually can never be above 4G (even on x86_64),
and they're unsigned long in every other function except this one.
Signed-off-by: Arvind Sankar <[email protected]>
---
arch/x86/boot/compressed/kaslr.c | 50 ++++++++------------------------
1 file changed, 12 insertions(+), 38 deletions(-)
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 758d78433f94..850e131121f7 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -622,42 +622,24 @@ static void __process_mem_region(struct mem_vector *entry,
unsigned long image_size)
{
struct mem_vector region, overlap;
- unsigned long start_orig, end;
- struct mem_vector cur_entry;
+ unsigned long region_end;
- /* Ignore entries entirely below our minimum. */
- if (entry->start + entry->size < minimum)
- return;
-
- /* Ignore entries above memory limit */
- end = min(entry->size + entry->start, mem_limit);
- if (entry->start >= end)
- return;
- cur_entry.start = entry->start;
- cur_entry.size = end - entry->start;
-
- region.start = cur_entry.start;
- region.size = cur_entry.size;
+ /* Clamp region to minimum and memory limit. */
+ region.start = clamp_val(entry->start, minimum, mem_limit);
+ region_end = clamp_val(entry->start + entry->size, minimum, mem_limit);
/* Give up if slot area array is full. */
while (slot_area_index < MAX_SLOT_AREA) {
- start_orig = region.start;
-
- /* Potentially raise address to minimum location. */
- if (region.start < minimum)
- region.start = minimum;
-
/* Potentially raise address to meet alignment needs. */
region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
/* Did we raise the address above the passed in memory entry? */
- if (region.start > cur_entry.start + cur_entry.size)
+ if (region.start > region_end)
return;
- /* Reduce size by any delta from the original address. */
- region.size -= region.start - start_orig;
+ region.size = region_end - region.start;
- /* Return if region can't contain decompressed kernel */
+ /* Return if region can't contain decompressed kernel. */
if (region.size < image_size)
return;
@@ -668,27 +650,19 @@ static void __process_mem_region(struct mem_vector *entry,
}
/* Store beginning of region if holds at least image_size. */
- if (overlap.start > region.start + image_size) {
- struct mem_vector beginning;
-
- beginning.start = region.start;
- beginning.size = overlap.start - region.start;
- process_gb_huge_pages(&beginning, image_size);
+ if (overlap.start - region.start >= image_size) {
+ region.size = overlap.start - region.start;
+ process_gb_huge_pages(®ion, image_size);
}
- /* Return if overlap extends to or past end of region. */
- if (overlap.start + overlap.size >= region.start + region.size)
- return;
-
/* Clip off the overlapping region and start over. */
- region.size -= overlap.start - region.start + overlap.size;
region.start = overlap.start + overlap.size;
}
}
static bool process_mem_region(struct mem_vector *region,
- unsigned long long minimum,
- unsigned long long image_size)
+ unsigned long minimum,
+ unsigned long image_size)
{
int i;
/*
--
2.26.2
* Arvind Sankar <[email protected]> wrote:
> Get rid of unnecessary temporary variables and redundant tests in
> __process_mem_region.
>
> Fix one minor bug: in case of an overlap, the beginning of the region
> should be used even if it is exactly image_size, not just strictly
> larger.
>
> Change type of minimum/image_size arguments in process_mem_region to
> unsigned long. These actually can never be above 4G (even on x86_64),
> and they're unsigned long in every other function except this one.
Please split this up into 3 separate patches. The KASLR code is
difficult to debug, so we want to approach changes as conservatively
as possible. This is also reinforced by the fact that this patch was
buggy in v1 already.
Thanks,
Ingo