Subject: [PATCH 0/7] x86: pat.c: reserve_memtype strip down

This patch set will (first of all) cleanup reserve_memtype a little
bit to make it easier to read and maintain.

(... and hopefully it doesn't break anything.;-)

Overall diffstat is

arch/x86/mm/ioremap.c | 2
arch/x86/mm/pat.c | 263 ++++++++++++++++---------------------------------
include/asm-x86/e820.h | 1
3 files changed, 93 insertions(+), 173 deletions(-)

Instead of sending one large patch I broke it down into several pieces
to make it easier to review.

Patches are against today's tip/x86/pat (v2.6.26-rc6-107-gdd0c7c4).
Please apply.


Thanks,

Andreas


Subject: [PATCH 1/7] x86: introduce macro to check whether an address range is in the ISA range


Signed-off-by: Andreas Herrmann <[email protected]>
---
arch/x86/mm/ioremap.c | 2 +-
arch/x86/mm/pat.c | 5 ++---
include/asm-x86/e820.h | 1 +
3 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 7452eb3..6d9960d 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -142,7 +142,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
/*
* Don't remap the low PCI/ISA area, it's always mapped..
*/
- if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
+ if (is_ISA_range(phys_addr, last_addr))
return (__force void __iomem *)phys_to_virt(phys_addr);

/*
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 227df3c..5bbc22e 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -215,7 +215,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
}

/* Low ISA region is always mapped WB in page table. No need to track */
- if (start >= ISA_START_ADDRESS && (end - 1) <= ISA_END_ADDRESS) {
+ if (is_ISA_range(start, end - 1)) {
if (ret_type)
*ret_type = _PAGE_CACHE_WB;

@@ -415,9 +415,8 @@ int free_memtype(u64 start, u64 end)
}

/* Low ISA region is always mapped WB. No need to track */
- if (start >= ISA_START_ADDRESS && end <= ISA_END_ADDRESS) {
+ if (is_ISA_range(start, end - 1))
return 0;
- }

spin_lock(&memtype_lock);
list_for_each_entry(ml, &memtype_list, nd) {
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h
index 7004251..5103d0b 100644
--- a/include/asm-x86/e820.h
+++ b/include/asm-x86/e820.h
@@ -24,6 +24,7 @@ struct e820map {

#define ISA_START_ADDRESS 0xa0000
#define ISA_END_ADDRESS 0x100000
+#define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)

#define BIOS_BEGIN 0x000a0000
#define BIOS_END 0x00100000
--
1.5.5.4


Subject: [PATCH 4/7] x86: pat.c consolidate error/debug messages in reserve_memtype


... and move last debug message out of locked section.

Signed-off-by: Andreas Herrmann <[email protected]>
---
arch/x86/mm/pat.c | 51 +++++++++++----------------------------------------
1 files changed, 11 insertions(+), 40 deletions(-)

diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index c996a36..1118288 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -269,12 +269,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
}

if (actual_type != entry->type) {
- printk(
- KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
- current->comm, current->pid,
- start, end,
- cattr_name(actual_type),
- cattr_name(entry->type));
err = -EBUSY;
break;
}
@@ -290,12 +284,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
}

if (actual_type != entry->type) {
- printk(
- KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
- current->comm, current->pid,
- start, end,
- cattr_name(actual_type),
- cattr_name(entry->type));
err = -EBUSY;
break;
}
@@ -321,12 +309,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
}

if (actual_type != entry->type) {
- printk(
- KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
- current->comm, current->pid,
- start, end,
- cattr_name(actual_type),
- cattr_name(entry->type));
err = -EBUSY;
break;
}
@@ -342,12 +324,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
}

if (actual_type != entry->type) {
- printk(
- KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
- current->comm, current->pid,
- start, end,
- cattr_name(actual_type),
- cattr_name(entry->type));
err = -EBUSY;
break;
}
@@ -367,10 +343,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
}

if (err) {
- printk(KERN_INFO
- "reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n",
- start, end, cattr_name(new->type),
- cattr_name(req_type));
+ printk(KERN_INFO "%s:%d conflicting memory types "
+ "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
+ end, cattr_name(new->type), cattr_name(entry->type));
+ printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
+ "track %s, req %s\n",
+ start, end, cattr_name(new->type), cattr_name(req_type));
kfree(new);
spin_unlock(&memtype_lock);
return err;
@@ -382,19 +360,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
dprintk("New Entry\n");
}

- if (new_type) {
- dprintk(
- "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
- start, end, cattr_name(actual_type),
- cattr_name(req_type), cattr_name(*new_type));
- } else {
- dprintk(
- "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n",
- start, end, cattr_name(actual_type),
- cattr_name(req_type));
- }
-
spin_unlock(&memtype_lock);
+
+ dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
+ start, end, cattr_name(new->type), cattr_name(req_type),
+ new_type ? cattr_name(*new_type) : "-");
+
return err;
}

--
1.5.5.4


Subject: [PATCH 5/7] x86: pat.c consolidate list_add handling in reserve_memtype


Signed-off-by: Andreas Herrmann <[email protected]>
---
arch/x86/mm/pat.c | 21 ++++++++-------------
1 files changed, 8 insertions(+), 13 deletions(-)

diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 1118288..49dcd96 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -198,6 +198,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
{
struct memtype *new, *entry;
unsigned long actual_type;
+ struct list_head *where;
int err = 0;

BUG_ON(start >= end); /* end is exclusive */
@@ -251,13 +252,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
spin_lock(&memtype_lock);

/* Search for existing mapping that overlaps the current range */
+ where = NULL;
list_for_each_entry(entry, &memtype_list, nd) {
struct memtype *saved_ptr;

if (entry->start >= end) {
- dprintk("New Entry\n");
- list_add(&new->nd, entry->nd.prev);
- new = NULL;
+ where = entry->nd.prev;
break;
}

@@ -295,9 +295,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,

dprintk("Overlap at 0x%Lx-0x%Lx\n",
saved_ptr->start, saved_ptr->end);
- /* No conflict. Go ahead and add this new entry */
- list_add(&new->nd, saved_ptr->nd.prev);
- new = NULL;
+ where = saved_ptr->nd.prev;
break;
}

@@ -335,9 +333,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,

dprintk("Overlap at 0x%Lx-0x%Lx\n",
saved_ptr->start, saved_ptr->end);
- /* No conflict. Go ahead and add this new entry */
- list_add(&new->nd, &saved_ptr->nd);
- new = NULL;
+ where = &saved_ptr->nd;
break;
}
}
@@ -354,11 +350,10 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
return err;
}

- if (new) {
- /* No conflict. Not yet added to the list. Add to the tail */
+ if (where)
+ list_add(&new->nd, where);
+ else
list_add_tail(&new->nd, &memtype_list);
- dprintk("New Entry\n");
- }

spin_unlock(&memtype_lock);

--
1.5.5.4


Subject: [PATCH 6/7] x86: pat.c introduce function to check for conflicts with existing memtypes


... to strip down loop body in reserve_memtype.

Signed-off-by: Andreas Herrmann <[email protected]>
---
arch/x86/mm/pat.c | 96 ++++++++++++++++++----------------------------------
1 files changed, 33 insertions(+), 63 deletions(-)

diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 49dcd96..281ac64 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -178,6 +178,33 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
return req_type;
}

+static int chk_conflict(struct memtype *new, struct memtype *entry,
+ unsigned long *type)
+{
+ if (new->type != entry->type) {
+ if (type) {
+ new->type = entry->type;
+ *type = entry->type;
+ } else
+ goto conflict;
+ }
+
+ /* check overlaps with more than one entry in the list */
+ list_for_each_entry_continue(entry, &memtype_list, nd) {
+ if (new->end <= entry->start)
+ break;
+ else if (new->type != entry->type)
+ goto conflict;
+ }
+ return 0;
+
+ conflict:
+ printk(KERN_INFO "%s:%d conflicting memory types "
+ "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
+ new->end, cattr_name(new->type), cattr_name(entry->type));
+ return -EBUSY;
+}
+
/*
* req_type typically has one of the:
* - _PAGE_CACHE_WB
@@ -254,94 +281,37 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
/* Search for existing mapping that overlaps the current range */
where = NULL;
list_for_each_entry(entry, &memtype_list, nd) {
- struct memtype *saved_ptr;
-
if (entry->start >= end) {
where = entry->nd.prev;
break;
}

if (start <= entry->start && end >= entry->start) {
- if (actual_type != entry->type && new_type) {
- actual_type = entry->type;
- *new_type = actual_type;
- new->type = actual_type;
- }
-
- if (actual_type != entry->type) {
- err = -EBUSY;
- break;
- }
-
- saved_ptr = entry;
- /*
- * Check to see whether the request overlaps more
- * than one entry in the list
- */
- list_for_each_entry_continue(entry, &memtype_list, nd) {
- if (end <= entry->start) {
- break;
- }
-
- if (actual_type != entry->type) {
- err = -EBUSY;
- break;
- }
- }
-
+ err = chk_conflict(new, entry, new_type);
if (err) {
break;
}

dprintk("Overlap at 0x%Lx-0x%Lx\n",
- saved_ptr->start, saved_ptr->end);
- where = saved_ptr->nd.prev;
+ entry->start, entry->end);
+ where = entry->nd.prev;
break;
}

if (start < entry->end) {
- if (actual_type != entry->type && new_type) {
- actual_type = entry->type;
- *new_type = actual_type;
- new->type = actual_type;
- }
-
- if (actual_type != entry->type) {
- err = -EBUSY;
- break;
- }
-
- saved_ptr = entry;
- /*
- * Check to see whether the request overlaps more
- * than one entry in the list
- */
- list_for_each_entry_continue(entry, &memtype_list, nd) {
- if (end <= entry->start) {
- break;
- }
-
- if (actual_type != entry->type) {
- err = -EBUSY;
- break;
- }
- }
-
+ err = chk_conflict(new, entry, new_type);
if (err) {
break;
}

dprintk("Overlap at 0x%Lx-0x%Lx\n",
- saved_ptr->start, saved_ptr->end);
- where = &saved_ptr->nd;
+ entry->start, entry->end);
+ where = &entry->nd;
break;
}
}

if (err) {
- printk(KERN_INFO "%s:%d conflicting memory types "
- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
- end, cattr_name(new->type), cattr_name(entry->type));
printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
"track %s, req %s\n",
start, end, cattr_name(new->type), cattr_name(req_type));
--
1.5.5.4


Subject: [PATCH 7/7] x86: pat.c final cleanup of loop body in reserve_memtype


Signed-off-by: Andreas Herrmann <[email protected]>
---
arch/x86/mm/pat.c | 30 +++++++++++-------------------
1 files changed, 11 insertions(+), 19 deletions(-)

diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 281ac64..a885a10 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -281,32 +281,24 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
/* Search for existing mapping that overlaps the current range */
where = NULL;
list_for_each_entry(entry, &memtype_list, nd) {
- if (entry->start >= end) {
+ if (end <= entry->start) {
where = entry->nd.prev;
break;
- }
-
- if (start <= entry->start && end >= entry->start) {
+ } else if (start <= entry->start) { /* end > entry->start */
err = chk_conflict(new, entry, new_type);
- if (err) {
- break;
+ if (!err) {
+ dprintk("Overlap at 0x%Lx-0x%Lx\n",
+ entry->start, entry->end);
+ where = entry->nd.prev;
}
-
- dprintk("Overlap at 0x%Lx-0x%Lx\n",
- entry->start, entry->end);
- where = entry->nd.prev;
break;
- }
-
- if (start < entry->end) {
+ } else if (start < entry->end) { /* start > entry->start */
err = chk_conflict(new, entry, new_type);
- if (err) {
- break;
+ if (!err) {
+ dprintk("Overlap at 0x%Lx-0x%Lx\n",
+ entry->start, entry->end);
+ where = &entry->nd;
}
-
- dprintk("Overlap at 0x%Lx-0x%Lx\n",
- entry->start, entry->end);
- where = &entry->nd;
break;
}
}
--
1.5.5.4


Subject: Re: [PATCH 2/7] x86: pat.c choose more crisp variable names

Change some variable names

- parse/ml => entry (within list_for_each and friends)
- new_entry => new
- ret_type => new_type (to avoid confusion with req_type)

(... to make it more readable...)

Signed-off-by: Andreas Herrmann <[email protected]>
---
arch/x86/mm/pat.c | 127 ++++++++++++++++++++++++++---------------------------
1 files changed, 62 insertions(+), 65 deletions(-)

diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 5bbc22e..a6507bf 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -188,37 +188,34 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
* req_type will have a special case value '-1', when requester want to inherit
* the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
*
- * If ret_type is NULL, function will return an error if it cannot reserve the
- * region with req_type. If ret_type is non-null, function will return
- * available type in ret_type in case of no error. In case of any error
+ * If new_type is NULL, function will return an error if it cannot reserve the
+ * region with req_type. If new_type is non-NULL, function will return
+ * available type in new_type in case of no error. In case of any error
* it will return a negative return value.
*/
int reserve_memtype(u64 start, u64 end, unsigned long req_type,
- unsigned long *ret_type)
+ unsigned long *new_type)
{
- struct memtype *new_entry = NULL;
- struct memtype *parse;
+ struct memtype *new, *entry;
unsigned long actual_type;
int err = 0;

/* Only track when pat_enabled */
if (!pat_enabled) {
/* This is identical to page table setting without PAT */
- if (ret_type) {
- if (req_type == -1) {
- *ret_type = _PAGE_CACHE_WB;
- } else {
- *ret_type = req_type & _PAGE_CACHE_MASK;
- }
+ if (new_type) {
+ if (req_type == -1)
+ *new_type = _PAGE_CACHE_WB;
+ else
+ *new_type = req_type & _PAGE_CACHE_MASK;
}
return 0;
}

/* Low ISA region is always mapped WB in page table. No need to track */
if (is_ISA_range(start, end - 1)) {
- if (ret_type)
- *ret_type = _PAGE_CACHE_WB;
-
+ if (new_type)
+ *new_type = _PAGE_CACHE_WB;
return 0;
}

@@ -243,65 +240,65 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
actual_type = pat_x_mtrr_type(start, end, req_type);
}

- new_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
- if (!new_entry)
+ new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
+ if (!new)
return -ENOMEM;

- new_entry->start = start;
- new_entry->end = end;
- new_entry->type = actual_type;
+ new->start = start;
+ new->end = end;
+ new->type = actual_type;

- if (ret_type)
- *ret_type = actual_type;
+ if (new_type)
+ *new_type = actual_type;

spin_lock(&memtype_lock);

/* Search for existing mapping that overlaps the current range */
- list_for_each_entry(parse, &memtype_list, nd) {
+ list_for_each_entry(entry, &memtype_list, nd) {
struct memtype *saved_ptr;

- if (parse->start >= end) {
+ if (entry->start >= end) {
dprintk("New Entry\n");
- list_add(&new_entry->nd, parse->nd.prev);
- new_entry = NULL;
+ list_add(&new->nd, entry->nd.prev);
+ new = NULL;
break;
}

- if (start <= parse->start && end >= parse->start) {
- if (actual_type != parse->type && ret_type) {
- actual_type = parse->type;
- *ret_type = actual_type;
- new_entry->type = actual_type;
+ if (start <= entry->start && end >= entry->start) {
+ if (actual_type != entry->type && new_type) {
+ actual_type = entry->type;
+ *new_type = actual_type;
+ new->type = actual_type;
}

- if (actual_type != parse->type) {
+ if (actual_type != entry->type) {
printk(
KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
current->comm, current->pid,
start, end,
cattr_name(actual_type),
- cattr_name(parse->type));
+ cattr_name(entry->type));
err = -EBUSY;
break;
}

- saved_ptr = parse;
+ saved_ptr = entry;
/*
* Check to see whether the request overlaps more
* than one entry in the list
*/
- list_for_each_entry_continue(parse, &memtype_list, nd) {
- if (end <= parse->start) {
+ list_for_each_entry_continue(entry, &memtype_list, nd) {
+ if (end <= entry->start) {
break;
}

- if (actual_type != parse->type) {
+ if (actual_type != entry->type) {
printk(
KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
current->comm, current->pid,
start, end,
cattr_name(actual_type),
- cattr_name(parse->type));
+ cattr_name(entry->type));
err = -EBUSY;
break;
}
@@ -314,46 +311,46 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
dprintk("Overlap at 0x%Lx-0x%Lx\n",
saved_ptr->start, saved_ptr->end);
/* No conflict. Go ahead and add this new entry */
- list_add(&new_entry->nd, saved_ptr->nd.prev);
- new_entry = NULL;
+ list_add(&new->nd, saved_ptr->nd.prev);
+ new = NULL;
break;
}

- if (start < parse->end) {
- if (actual_type != parse->type && ret_type) {
- actual_type = parse->type;
- *ret_type = actual_type;
- new_entry->type = actual_type;
+ if (start < entry->end) {
+ if (actual_type != entry->type && new_type) {
+ actual_type = entry->type;
+ *new_type = actual_type;
+ new->type = actual_type;
}

- if (actual_type != parse->type) {
+ if (actual_type != entry->type) {
printk(
KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
current->comm, current->pid,
start, end,
cattr_name(actual_type),
- cattr_name(parse->type));
+ cattr_name(entry->type));
err = -EBUSY;
break;
}

- saved_ptr = parse;
+ saved_ptr = entry;
/*
* Check to see whether the request overlaps more
* than one entry in the list
*/
- list_for_each_entry_continue(parse, &memtype_list, nd) {
- if (end <= parse->start) {
+ list_for_each_entry_continue(entry, &memtype_list, nd) {
+ if (end <= entry->start) {
break;
}

- if (actual_type != parse->type) {
+ if (actual_type != entry->type) {
printk(
KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
current->comm, current->pid,
start, end,
cattr_name(actual_type),
- cattr_name(parse->type));
+ cattr_name(entry->type));
err = -EBUSY;
break;
}
@@ -366,8 +363,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
dprintk("Overlap at 0x%Lx-0x%Lx\n",
saved_ptr->start, saved_ptr->end);
/* No conflict. Go ahead and add this new entry */
- list_add(&new_entry->nd, &saved_ptr->nd);
- new_entry = NULL;
+ list_add(&new->nd, &saved_ptr->nd);
+ new = NULL;
break;
}
}
@@ -375,24 +372,24 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
if (err) {
printk(KERN_INFO
"reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n",
- start, end, cattr_name(new_entry->type),
+ start, end, cattr_name(new->type),
cattr_name(req_type));
- kfree(new_entry);
+ kfree(new);
spin_unlock(&memtype_lock);
return err;
}

- if (new_entry) {
+ if (new) {
/* No conflict. Not yet added to the list. Add to the tail */
- list_add_tail(&new_entry->nd, &memtype_list);
+ list_add_tail(&new->nd, &memtype_list);
dprintk("New Entry\n");
}

- if (ret_type) {
+ if (new_type) {
dprintk(
"reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
start, end, cattr_name(actual_type),
- cattr_name(req_type), cattr_name(*ret_type));
+ cattr_name(req_type), cattr_name(*new_type));
} else {
dprintk(
"reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n",
@@ -406,7 +403,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,

int free_memtype(u64 start, u64 end)
{
- struct memtype *ml;
+ struct memtype *entry;
int err = -EINVAL;

/* Only track when pat_enabled */
@@ -419,10 +416,10 @@ int free_memtype(u64 start, u64 end)
return 0;

spin_lock(&memtype_lock);
- list_for_each_entry(ml, &memtype_list, nd) {
- if (ml->start == start && ml->end == end) {
- list_del(&ml->nd);
- kfree(ml);
+ list_for_each_entry(entry, &memtype_list, nd) {
+ if (entry->start == start && entry->end == end) {
+ list_del(&entry->nd);
+ kfree(entry);
err = 0;
break;
}
--
1.5.5.4


Subject: [PATCH 3/7] x86: pat.c more trivial changes


- add BUG statement to catch invalid start and end parameters
- No need to track the actual type in both req_type and actual_type --
keep req_type unchanged.
- removed (IMHO) superfluous comments

Signed-off-by: Andreas Herrmann <[email protected]>
---
arch/x86/mm/pat.c | 21 ++++++++-------------
1 files changed, 8 insertions(+), 13 deletions(-)

diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index a6507bf..c996a36 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -200,7 +200,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
unsigned long actual_type;
int err = 0;

- /* Only track when pat_enabled */
+ BUG_ON(start >= end); /* end is exclusive */
+
if (!pat_enabled) {
/* This is identical to page table setting without PAT */
if (new_type) {
@@ -228,17 +229,13 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
*/
u8 mtrr_type = mtrr_type_lookup(start, end);

- if (mtrr_type == MTRR_TYPE_WRBACK) {
- req_type = _PAGE_CACHE_WB;
+ if (mtrr_type == MTRR_TYPE_WRBACK)
actual_type = _PAGE_CACHE_WB;
- } else {
- req_type = _PAGE_CACHE_UC_MINUS;
+ else
actual_type = _PAGE_CACHE_UC_MINUS;
- }
- } else {
- req_type &= _PAGE_CACHE_MASK;
- actual_type = pat_x_mtrr_type(start, end, req_type);
- }
+ } else
+ actual_type = pat_x_mtrr_type(start, end,
+ req_type & _PAGE_CACHE_MASK);

new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
if (!new)
@@ -406,10 +403,8 @@ int free_memtype(u64 start, u64 end)
struct memtype *entry;
int err = -EINVAL;

- /* Only track when pat_enabled */
- if (!pat_enabled) {
+ if (!pat_enabled)
return 0;
- }

/* Low ISA region is always mapped WB. No need to track */
if (is_ISA_range(start, end - 1))
--
1.5.5.4


2008-06-21 06:21:08

by Brice Goglin

[permalink] [raw]
Subject: Re: [PATCH 6/7] x86: pat.c introduce function to check for conflicts with existing memtypes

Andreas Herrmann wrote:
> ... to strip down loop body in reserve_memtype.
>
> Signed-off-by: Andreas Herrmann <[email protected]>
> ---
> arch/x86/mm/pat.c | 96 ++++++++++++++++++----------------------------------
> 1 files changed, 33 insertions(+), 63 deletions(-)
>
> diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
> index 49dcd96..281ac64 100644
> --- a/arch/x86/mm/pat.c
> +++ b/arch/x86/mm/pat.c
> @@ -178,6 +178,33 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
> return req_type;
> }
>
> +static int chk_conflict(struct memtype *new, struct memtype *entry,
> + unsigned long *type)
>

Looking at this, I thought it may be nice to add (and export to modules)
a function that checks whether a range of memory has some page attribute
applied, namely WC. The idea would be to help drivers call mtrr_add only
if ioremap_wc failed to setup WC. I am not sure it would work but it may
be easier than adding mtrr_add into ioremap_wc() as proposed in
http://lkml.org/lkml/2008/5/31/252

Brice

2008-06-24 11:08:46

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 0/7] x86: pat.c: reserve_memtype strip down


* Andreas Herrmann <[email protected]> wrote:

> This patch set will (first of all) cleanup reserve_memtype a little
> bit to make it easier to read and maintain.
>
> (... and hopefully it doesn't break anything.;-)

applied to tip/x86/pat - thanks Andreas.

> Instead of sending one large patch I broke it down into several pieces
> to make it easier to review.

thanks!

A nice splitup not only makes review easier but in the (i know, very
unlikely ;-) case of breakage it narrows down the problem change.

It also makes maintenance and integration easier.

Especially for architecture code it is very important to do as small
changes as possible.

Ingo

2008-06-24 21:56:05

by Pallipadi, Venkatesh

[permalink] [raw]
Subject: RE: [PATCH 0/7] x86: pat.c: reserve_memtype strip down


Thanks for all the cleanup patches. Overall we have lesser number of lines of code which is a nice bonus.

Acked-by: Venkatesh Pallipadi <[email protected]>

>-----Original Message-----
>From: Andreas Herrmann [mailto:[email protected]]
>Sent: Friday, June 20, 2008 12:55 PM
>To: Ingo Molnar; H. Peter Anvin; Thomas Gleixner
>Cc: [email protected]; Pallipadi, Venkatesh;
>Siddha, Suresh B
>Subject: [PATCH 0/7] x86: pat.c: reserve_memtype strip down
>
>This patch set will (first of all) cleanup reserve_memtype a little
>bit to make it easier to read and maintain.
>
>(... and hopefully it doesn't break anything.;-)
>
>Overall diffstat is
>
> arch/x86/mm/ioremap.c | 2
> arch/x86/mm/pat.c | 263
>++++++++++++++++---------------------------------
> include/asm-x86/e820.h | 1
> 3 files changed, 93 insertions(+), 173 deletions(-)
>
>Instead of sending one large patch I broke it down into several pieces
>to make it easier to review.
>
>Patches are against today's tip/x86/pat (v2.6.26-rc6-107-gdd0c7c4).
>Please apply.
>
>
>Thanks,
>
>Andreas
>
>
>