This patch is meant to improve overall system performance when making use of
the __phys_addr call on 64 bit x86 systems. To do this I have implemented
several changes.
First if CONFIG_DEBUG_VIRTUAL is not defined __phys_addr is made an inline,
similar to how this is currently handled in 32 bit. However in order to do
this it is required to export phys_base so that it is available if __phys_addr
is used in kernel modules.
The second change was to streamline the code by making use of the carry flag
on an add operation instead of performing a compare on a 64 bit value. The
advantage to this is that it allows us to reduce the overall size of the call.
On my Xeon E5 system the entire __phys_addr inline call consumes 30 bytes and
5 instructions. I also applied similar logic to the debug version of the
function. My testing shows that the debug version of the function with this
patch applied is slightly faster than the non-debug version without the patch.
Finally, when building the kernel with the first two changes applied I saw
build warnings about __START_KERNEL_map and PAGE_OFFSET constants not fitting
in their type. In order to resolve the build warning I changed their type
from UL to ULL.
Signed-off-by: Alexander Duyck <[email protected]>
---
arch/x86/include/asm/page_64_types.h | 16 ++++++++++++++--
arch/x86/kernel/x8664_ksyms_64.c | 3 +++
arch/x86/mm/physaddr.c | 20 ++++++++++++++------
3 files changed, 31 insertions(+), 8 deletions(-)
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 320f7bb..a951e4d 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -30,14 +30,14 @@
* hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
* what Xen requires.
*/
-#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
+#define __PAGE_OFFSET _AC(0xffff880000000000, ULL)
#define __PHYSICAL_START ((CONFIG_PHYSICAL_START + \
(CONFIG_PHYSICAL_ALIGN - 1)) & \
~(CONFIG_PHYSICAL_ALIGN - 1))
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
-#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
+#define __START_KERNEL_map _AC(0xffffffff80000000, ULL)
/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
#define __PHYSICAL_MASK_SHIFT 46
@@ -58,7 +58,19 @@ void copy_page(void *to, void *from);
extern unsigned long max_pfn;
extern unsigned long phys_base;
+#ifdef CONFIG_DEBUG_VIRTUAL
extern unsigned long __phys_addr(unsigned long);
+#else
+static inline unsigned long __phys_addr(unsigned long x)
+{
+ unsigned long y = x - __START_KERNEL_map;
+
+ /* use the carry flag to determine if x was < __START_KERNEL_map */
+ x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
+
+ return x;
+}
+#endif
#define __phys_reloc_hide(x) (x)
#define vmemmap ((struct page *)VMEMMAP_START)
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 1330dd1..b014d94 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -59,6 +59,9 @@ EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(memmove);
+#ifndef CONFIG_DEBUG_VIRTUAL
+EXPORT_SYMBOL(phys_base);
+#endif
EXPORT_SYMBOL(empty_zero_page);
#ifndef CONFIG_PARAVIRT
EXPORT_SYMBOL(native_load_gs_index);
diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
index d2e2735..f63bec5 100644
--- a/arch/x86/mm/physaddr.c
+++ b/arch/x86/mm/physaddr.c
@@ -8,20 +8,28 @@
#ifdef CONFIG_X86_64
+#ifdef CONFIG_DEBUG_VIRTUAL
unsigned long __phys_addr(unsigned long x)
{
- if (x >= __START_KERNEL_map) {
- x -= __START_KERNEL_map;
- VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
- x += phys_base;
+ unsigned long y = x - __START_KERNEL_map;
+
+ /* use the carry flag to determine if x was < __START_KERNEL_map */
+ if (unlikely(x > y)) {
+ x = y + phys_base;
+
+ VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE);
} else {
- VIRTUAL_BUG_ON(x < PAGE_OFFSET);
- x -= PAGE_OFFSET;
+ x = y + (__START_KERNEL_map - PAGE_OFFSET);
+
+ /* carry flag will be set if starting x was >= PAGE_OFFSET */
+ VIRTUAL_BUG_ON(x > y);
VIRTUAL_BUG_ON(!phys_addr_valid(x));
}
+
return x;
}
EXPORT_SYMBOL(__phys_addr);
+#endif
bool __virt_addr_valid(unsigned long x)
{
> The second change was to streamline the code by making use of the carry flag
> on an add operation instead of performing a compare on a 64 bit value. The
> advantage to this is that it allows us to reduce the overall size of the call.
> On my Xeon E5 system the entire __phys_addr inline call consumes 30 bytes and
> 5 instructions. I also applied similar logic to the debug version of the
> function. My testing shows that the debug version of the function with this
> patch applied is slightly faster than the non-debug version without the patch.
Looks good. Thanks.
Probably should still split the callers though (or have a pa_symbol_fast
that does not do the check)
-Andi
On 10/10/2012 06:58 AM, Andi Kleen wrote:
>> The second change was to streamline the code by making use of the carry flag
>> on an add operation instead of performing a compare on a 64 bit value. The
>> advantage to this is that it allows us to reduce the overall size of the call.
>> On my Xeon E5 system the entire __phys_addr inline call consumes 30 bytes and
>> 5 instructions. I also applied similar logic to the debug version of the
>> function. My testing shows that the debug version of the function with this
>> patch applied is slightly faster than the non-debug version without the patch.
> Looks good. Thanks.
>
> Probably should still split the callers though (or have a pa_symbol_fast
> that does not do the check)
>
> -Andi
I hadn't thought of that. I couldn't drop support for symbols from
__pa, but I can get away with dropping support for regular addresses
from __pa_symbol.
I just submitted a patch to drop support for standard virtual addresses
from __pa_symbol. I will also submit some patches tomorrow morning for
cleaning up a number of places I had found where we were calling
__pa/virt_to_phys when we should have been calling __pa_symbol.
Thanks,
Alex
* Alexander Duyck <[email protected]> wrote:
> This patch is meant to improve overall system performance when
> making use of the __phys_addr call on 64 bit x86 systems. To
> do this I have implemented several changes.
>
> First if CONFIG_DEBUG_VIRTUAL is not defined __phys_addr is
> made an inline, similar to how this is currently handled in 32
> bit. However in order to do this it is required to export
> phys_base so that it is available if __phys_addr is used in
> kernel modules.
>
> The second change was to streamline the code by making use of
> the carry flag on an add operation instead of performing a
> compare on a 64 bit value. The advantage to this is that it
> allows us to reduce the overall size of the call. On my Xeon
> E5 system the entire __phys_addr inline call consumes 30 bytes
> and 5 instructions. I also applied similar logic to the debug
> version of the function. My testing shows that the debug
> version of the function with this patch applied is slightly
> faster than the non-debug version without the patch.
>
> Finally, when building the kernel with the first two changes
> applied I saw build warnings about __START_KERNEL_map and
> PAGE_OFFSET constants not fitting in their type. In order to
> resolve the build warning I changed their type from UL to ULL.
>
> Signed-off-by: Alexander Duyck <[email protected]>
> ---
>
> arch/x86/include/asm/page_64_types.h | 16 ++++++++++++++--
> arch/x86/kernel/x8664_ksyms_64.c | 3 +++
> arch/x86/mm/physaddr.c | 20 ++++++++++++++------
> 3 files changed, 31 insertions(+), 8 deletions(-)
> +#ifdef CONFIG_DEBUG_VIRTUAL
> extern unsigned long __phys_addr(unsigned long);
> +#else
> +static inline unsigned long __phys_addr(unsigned long x)
> +{
> + unsigned long y = x - __START_KERNEL_map;
> +
> + /* use the carry flag to determine if x was < __START_KERNEL_map */
> + x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
> +
> + return x;
> +}
This is a rather frequently used primitive. By how much does
this patch increase a 'make defconfig' kernel's vmlinux, as
measured via 'size vmlinux'?
Thanks,
Ingo
On 10/24/2012 03:25 AM, Ingo Molnar wrote:
> * Alexander Duyck <[email protected]> wrote:
>
>> This patch is meant to improve overall system performance when
>> making use of the __phys_addr call on 64 bit x86 systems. To
>> do this I have implemented several changes.
>>
>> First if CONFIG_DEBUG_VIRTUAL is not defined __phys_addr is
>> made an inline, similar to how this is currently handled in 32
>> bit. However in order to do this it is required to export
>> phys_base so that it is available if __phys_addr is used in
>> kernel modules.
>>
>> The second change was to streamline the code by making use of
>> the carry flag on an add operation instead of performing a
>> compare on a 64 bit value. The advantage to this is that it
>> allows us to reduce the overall size of the call. On my Xeon
>> E5 system the entire __phys_addr inline call consumes 30 bytes
>> and 5 instructions. I also applied similar logic to the debug
>> version of the function. My testing shows that the debug
>> version of the function with this patch applied is slightly
>> faster than the non-debug version without the patch.
>>
>> Finally, when building the kernel with the first two changes
>> applied I saw build warnings about __START_KERNEL_map and
>> PAGE_OFFSET constants not fitting in their type. In order to
>> resolve the build warning I changed their type from UL to ULL.
>>
>> Signed-off-by: Alexander Duyck <[email protected]>
>> ---
>>
>> arch/x86/include/asm/page_64_types.h | 16 ++++++++++++++--
>> arch/x86/kernel/x8664_ksyms_64.c | 3 +++
>> arch/x86/mm/physaddr.c | 20 ++++++++++++++------
>> 3 files changed, 31 insertions(+), 8 deletions(-)
>> +#ifdef CONFIG_DEBUG_VIRTUAL
>> extern unsigned long __phys_addr(unsigned long);
>> +#else
>> +static inline unsigned long __phys_addr(unsigned long x)
>> +{
>> + unsigned long y = x - __START_KERNEL_map;
>> +
>> + /* use the carry flag to determine if x was < __START_KERNEL_map */
>> + x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
>> +
>> + return x;
>> +}
> This is a rather frequently used primitive. By how much does
> this patch increase a 'make defconfig' kernel's vmlinux, as
> measured via 'size vmlinux'?
>
> Thanks,
>
> Ingo
Here is the before and after:
Before
text data bss dec hex filename
10368528 1047480 1122304 12538312 bf51c8 vmlinux
After
text data bss dec hex filename
10372216 1047480 1122304 12542000 bf6030 vmlinux
I also have some patches are going into the swiotlb. With them in it
reduces the size a bit but still doesn't get us back to the original size:
After SWIOTLB
text data bss dec hex filename
10371860 1047480 1122304 12541644 bf5ecc vmlinux
The total increase in size amounts to about 3.6K without the SWIOTLB
changes, and about 3.3K with.
Thanks,
Alex