2015-05-04 02:45:41

by Jiang Liu

[permalink] [raw]
Subject: [Patch 0/2] Optimize CPU vector allocation for NUMA systems

Hi all,
This is a small patch set based on tip/x86/apic.
This first patch is a bugfix for tip/x86/apic. And the second patch
is an enhancement to optimize CPU vector allocation on NUMA systems.
It introduces a mechanism to allocate CPU vectors from device local
NUMA node and a kernel parameter to enable/disable the optimization.

Thanks!
Gerry

Jiang Liu (2):
irq_remapping/vt-d: Fix regression caused by commit b106ee63abcc
x86, irq: Support CPU vector allocation policies

Documentation/kernel-parameters.txt | 5 +++
arch/x86/kernel/apic/vector.c | 83 +++++++++++++++++++++++++++++++----
drivers/iommu/intel_irq_remapping.c | 16 ++++---
3 files changed, 90 insertions(+), 14 deletions(-)

--
1.7.10.4


2015-05-04 02:45:53

by Jiang Liu

[permalink] [raw]
Subject: [Patch 1/2] irq_remapping/vt-d: Fix regression caused by commit b106ee63abcc

Commit b106ee63abcc ("irq_remapping/vt-d: Enhance Intel IR driver to
support hierarchical irqdomains") caused a regression, which forgot
to initialize remapping data structures other than the first entry
when setting up remapping entries for multiple MSIs.

Code is written by Thomas and commit message is written by Jiang.

Signed-off-by: Thomas Gleixner <[email protected]>
Signed-off-by: Jiang Liu <[email protected]>
---
Hi Thomas,
I missed this patch when rebasing my patch set. It may be
fold into commit b106ee63abcc ("irq_remapping/vt-d: Enhance Intel IR
driver to support hierarchical irqdomains").
Thanks!
Gerry
---
drivers/iommu/intel_irq_remapping.c | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 14d95694fc1b..7ecc6b3180ba 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -1113,7 +1113,7 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
{
struct intel_iommu *iommu = domain->host_data;
struct irq_alloc_info *info = arg;
- struct intel_ir_data *data;
+ struct intel_ir_data *data, *ird;
struct irq_data *irq_data;
struct irq_cfg *irq_cfg;
int i, ret, index;
@@ -1158,14 +1158,20 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
}

if (i > 0) {
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
+ ird = kzalloc(sizeof(*ird), GFP_KERNEL);
+ if (!ird)
goto out_free_data;
+ /* Initialize the common data */
+ ird->irq_2_iommu = data->irq_2_iommu;
+ ird->irq_2_iommu.sub_handle = i;
+ } else {
+ ird = data;
}
+
irq_data->hwirq = (index << 16) + i;
- irq_data->chip_data = data;
+ irq_data->chip_data = ird;
irq_data->chip = &intel_ir_chip;
- intel_irq_remapping_prepare_irte(data, irq_cfg, info, index, i);
+ intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
}
return 0;
--
1.7.10.4

2015-05-04 02:46:18

by Jiang Liu

[permalink] [raw]
Subject: [Patch 2/2] x86, irq: Support CPU vector allocation policies

On NUMA systems, an IO device may be associated with a NUMA node.
It may improve IO performance to allocate resources, such as memory
and interrupts, from device local node.

This patch introduces a mechanism to support CPU vector allocation
policies, so users may choose the best suitable CPU vector allocation
policy. Currently there are two supported allocation policies:
1) allocate CPU vectors from CPUs on device local node
2) allocate CPU vectors from all online CPUs

This mechanism may be used to support NumaConnect systems to allocate
CPU vectors from device local node.

Signed-off-by: Jiang Liu <[email protected]>
Cc: Daniel J Blueman <[email protected]>
---
Documentation/kernel-parameters.txt | 5 +++
arch/x86/kernel/apic/vector.c | 83 +++++++++++++++++++++++++++++++----
2 files changed, 79 insertions(+), 9 deletions(-)

diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 274252f205b7..5e8b1c6f0677 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3840,6 +3840,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
vector= [IA-64,SMP]
vector=percpu: enable percpu vector domain

+ vector_alloc= [x86,SMP]
+ vector_alloc=node: try to allocate CPU vectors from CPUs on
+ device local node first, fallback to all online CPUs
+ vector_alloc=global: allocate CPU vector from all online CPUs
+
video= [FB] Frame buffer configuration
See Documentation/fb/modedb.txt.

diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 1c7dd42b98c1..96ce5068a926 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -28,6 +28,17 @@ struct apic_chip_data {
u8 move_in_progress : 1;
};

+enum {
+ /* Allocate CPU vectors from CPUs on device local node */
+ X86_VECTOR_POL_NODE = 0x1,
+ /* Allocate CPU vectors from all online CPUs */
+ X86_VECTOR_POL_GLOBAL = 0x2,
+ /* Allocate CPU vectors from caller specified CPUs */
+ X86_VECTOR_POL_CALLER = 0x4,
+ X86_VECTOR_POL_MIN = X86_VECTOR_POL_NODE,
+ X86_VECTOR_POL_MAX = X86_VECTOR_POL_CALLER,
+};
+
struct irq_domain *x86_vector_domain;
static DEFINE_RAW_SPINLOCK(vector_lock);
static cpumask_var_t vector_cpumask;
@@ -35,6 +46,9 @@ static struct irq_chip lapic_controller;
#ifdef CONFIG_X86_IO_APIC
static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
#endif
+static unsigned int vector_alloc_policy = X86_VECTOR_POL_NODE |
+ X86_VECTOR_POL_GLOBAL |
+ X86_VECTOR_POL_CALLER;

void lock_vector_lock(void)
{
@@ -258,12 +272,6 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
memset(dst, 0, sizeof(*dst));
}

-static inline const struct cpumask *
-irq_alloc_info_get_mask(struct irq_alloc_info *info)
-{
- return (!info || !info->mask) ? apic->target_cpus() : info->mask;
-}
-
static void x86_vector_free_irqs(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
@@ -284,12 +292,58 @@ static void x86_vector_free_irqs(struct irq_domain *domain,
}
}

+static int assign_irq_vector_policy(int irq, int node,
+ struct apic_chip_data *data,
+ struct irq_alloc_info *info)
+{
+ int err = -EBUSY;
+ unsigned int policy;
+ const struct cpumask *mask;
+
+ if (info && info->mask)
+ policy = X86_VECTOR_POL_CALLER;
+ else
+ policy = X86_VECTOR_POL_MIN;
+
+ for (; policy <= X86_VECTOR_POL_MAX; policy <<= 1) {
+ if (!(vector_alloc_policy & policy))
+ continue;
+
+ switch (policy) {
+ case X86_VECTOR_POL_NODE:
+ if (node >= 0)
+ mask = cpumask_of_node(node);
+ else
+ mask = NULL;
+ break;
+ case X86_VECTOR_POL_GLOBAL:
+ mask = apic->target_cpus();
+ break;
+ case X86_VECTOR_POL_CALLER:
+ if (info && info->mask)
+ mask = info->mask;
+ else
+ mask = NULL;
+ break;
+ default:
+ mask = NULL;
+ break;
+ }
+ if (mask) {
+ err = assign_irq_vector(irq, data, mask);
+ if (!err)
+ return 0;
+ }
+ }
+
+ return err;
+}
+
static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct irq_alloc_info *info = arg;
struct apic_chip_data *data;
- const struct cpumask *mask;
struct irq_data *irq_data;
int i, err;

@@ -300,7 +354,6 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
return -ENOSYS;

- mask = irq_alloc_info_get_mask(info);
for (i = 0; i < nr_irqs; i++) {
irq_data = irq_domain_get_irq_data(domain, virq + i);
BUG_ON(!irq_data);
@@ -318,7 +371,8 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
irq_data->chip = &lapic_controller;
irq_data->chip_data = data;
irq_data->hwirq = virq + i;
- err = assign_irq_vector(virq, data, mask);
+ err = assign_irq_vector_policy(virq, irq_data->node, data,
+ info);
if (err)
goto error;
}
@@ -809,6 +863,17 @@ static __init int setup_show_lapic(char *arg)
}
__setup("show_lapic=", setup_show_lapic);

+static int __init apic_parse_vector_policy(char *str)
+{
+ if (!strncmp(str, "node", 4))
+ vector_alloc_policy |= X86_VECTOR_POL_NODE;
+ else if (!strncmp(str, "global", 6))
+ vector_alloc_policy &= ~X86_VECTOR_POL_NODE;
+
+ return 1;
+}
+__setup("vector_alloc=", apic_parse_vector_policy);
+
static int __init print_ICs(void)
{
if (apic_verbosity == APIC_QUIET)
--
1.7.10.4

Subject: [tip:x86/apic] irq_remapping/vt-d: Init all MSI entries not just the first one

Commit-ID: 9d4c0313f24a05e5252e7106636bf3c5b6318f5d
Gitweb: http://git.kernel.org/tip/9d4c0313f24a05e5252e7106636bf3c5b6318f5d
Author: Thomas Gleixner <[email protected]>
AuthorDate: Mon, 4 May 2015 10:47:40 +0800
Committer: Thomas Gleixner <[email protected]>
CommitDate: Tue, 5 May 2015 11:14:48 +0200

irq_remapping/vt-d: Init all MSI entries not just the first one

Commit b106ee63abcc ("irq_remapping/vt-d: Enhance Intel IR driver to
support hierarchical irqdomains") caused a regression, which forgot
to initialize remapping data structures other than the first entry
when setting up remapping entries for multiple MSIs.

[ Jiang: Commit message ]

Fixes: b106ee63abcc ("irq_remapping/vt-d: Enhance Intel IR driver to support hierarchical irqdomains")
Signed-off-by: Thomas Gleixner <[email protected]>
Signed-off-by: Jiang Liu <[email protected]>
Cc: Konrad Rzeszutek Wilk <[email protected]>
Cc: David Cohen <[email protected]>
Cc: Sander Eikelenboom <[email protected]>
Cc: David Vrabel <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: [email protected]
Cc: Bjorn Helgaas <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Rafael J. Wysocki <[email protected]>
Cc: Randy Dunlap <[email protected]>
Cc: Yinghai Lu <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Dimitri Sivanich <[email protected]>
Cc: Joerg Roedel <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Thomas Gleixner <[email protected]>
---
drivers/iommu/intel_irq_remapping.c | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 14d9569..7ecc6b3 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -1113,7 +1113,7 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
{
struct intel_iommu *iommu = domain->host_data;
struct irq_alloc_info *info = arg;
- struct intel_ir_data *data;
+ struct intel_ir_data *data, *ird;
struct irq_data *irq_data;
struct irq_cfg *irq_cfg;
int i, ret, index;
@@ -1158,14 +1158,20 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
}

if (i > 0) {
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
+ ird = kzalloc(sizeof(*ird), GFP_KERNEL);
+ if (!ird)
goto out_free_data;
+ /* Initialize the common data */
+ ird->irq_2_iommu = data->irq_2_iommu;
+ ird->irq_2_iommu.sub_handle = i;
+ } else {
+ ird = data;
}
+
irq_data->hwirq = (index << 16) + i;
- irq_data->chip_data = data;
+ irq_data->chip_data = ird;
irq_data->chip = &intel_ir_chip;
- intel_irq_remapping_prepare_irte(data, irq_cfg, info, index, i);
+ intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
}
return 0;

2015-05-05 19:25:35

by Thomas Gleixner

[permalink] [raw]
Subject: Re: [Patch 2/2] x86, irq: Support CPU vector allocation policies

On Mon, 4 May 2015, Jiang Liu wrote:
> +enum {
> + /* Allocate CPU vectors from CPUs on device local node */
> + X86_VECTOR_POL_NODE = 0x1,
> + /* Allocate CPU vectors from all online CPUs */
> + X86_VECTOR_POL_GLOBAL = 0x2,
> + /* Allocate CPU vectors from caller specified CPUs */
> + X86_VECTOR_POL_CALLER = 0x4,
> + X86_VECTOR_POL_MIN = X86_VECTOR_POL_NODE,
> + X86_VECTOR_POL_MAX = X86_VECTOR_POL_CALLER,
> +}

> +static unsigned int vector_alloc_policy = X86_VECTOR_POL_NODE |
> + X86_VECTOR_POL_GLOBAL |
> + X86_VECTOR_POL_CALLER;

> +static int __init apic_parse_vector_policy(char *str)
> +{
> + if (!strncmp(str, "node", 4))
> + vector_alloc_policy |= X86_VECTOR_POL_NODE;

This does not make sense. X86_VECTOR_POL_NODE is already set.

> + else if (!strncmp(str, "global", 6))
> + vector_alloc_policy &= ~X86_VECTOR_POL_NODE;

Why would one disable node aware allocation? We fall back to the
global allocation anyway, if the node aware allocation fails.

I'm completely missing the value of this command line option.

Thanks,

tglx

2015-05-06 05:18:09

by Jiang Liu

[permalink] [raw]
Subject: Re: [Patch 2/2] x86, irq: Support CPU vector allocation policies

On 2015/5/6 3:25, Thomas Gleixner wrote:
> On Mon, 4 May 2015, Jiang Liu wrote:
>> +enum {
>> + /* Allocate CPU vectors from CPUs on device local node */
>> + X86_VECTOR_POL_NODE = 0x1,
>> + /* Allocate CPU vectors from all online CPUs */
>> + X86_VECTOR_POL_GLOBAL = 0x2,
>> + /* Allocate CPU vectors from caller specified CPUs */
>> + X86_VECTOR_POL_CALLER = 0x4,
>> + X86_VECTOR_POL_MIN = X86_VECTOR_POL_NODE,
>> + X86_VECTOR_POL_MAX = X86_VECTOR_POL_CALLER,
>> +}
>
>> +static unsigned int vector_alloc_policy = X86_VECTOR_POL_NODE |
>> + X86_VECTOR_POL_GLOBAL |
>> + X86_VECTOR_POL_CALLER;
>
>> +static int __init apic_parse_vector_policy(char *str)
>> +{
>> + if (!strncmp(str, "node", 4))
>> + vector_alloc_policy |= X86_VECTOR_POL_NODE;
>
> This does not make sense. X86_VECTOR_POL_NODE is already set.
>
>> + else if (!strncmp(str, "global", 6))
>> + vector_alloc_policy &= ~X86_VECTOR_POL_NODE;
>
> Why would one disable node aware allocation? We fall back to the
> global allocation anyway, if the node aware allocation fails.
>
> I'm completely missing the value of this command line option.
Hi Thomas,
You are right. Originally I want a method to disable the
per-node allocation policy. Think it twice, it seems unnecessary
at all. Enabling per-node allocation policy by default shouldn't
cause serious issues, and user may change irq affinity setting
if the default affinity isn't desired.
So we don't need the kernel parameter at all. Will update
the patch.

Thanks!
Gerry

>
> Thanks,
>
> tglx
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
>

2015-05-06 08:34:04

by Jiang Liu

[permalink] [raw]
Subject: [Patch v2] x86, irq: Support CPU vector allocation policies

On NUMA systems, an IO device may be associated with a NUMA node.
It may improve IO performance to allocate resources, such as memory
and interrupts, from device local node.

This patch introduces a mechanism to support CPU vector allocation
policies. It tries to allocate CPU vectors from CPUs on device local
node first, and then fallback to all online(global) CPUs.

This mechanism may be used to support NumaConnect systems to allocate
CPU vectors from device local node.

Signed-off-by: Jiang Liu <[email protected]>
Cc: Daniel J Blueman <[email protected]>
---
Hi Thomas,
This is the simplified version, which removed the kernel parameter.
Seems much simpler:)

Thanks!
Gerry
---
arch/x86/kernel/apic/vector.c | 66 +++++++++++++++++++++++++++++++++++------
1 file changed, 57 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 1c7dd42b98c1..44363ccce9b5 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -28,6 +28,17 @@ struct apic_chip_data {
u8 move_in_progress : 1;
};

+enum {
+ /* Allocate CPU vectors from CPUs on device local node */
+ X86_VECTOR_POL_NODE = 0x1,
+ /* Allocate CPU vectors from all online CPUs */
+ X86_VECTOR_POL_GLOBAL = 0x2,
+ /* Allocate CPU vectors from caller specified CPUs */
+ X86_VECTOR_POL_CALLER = 0x4,
+ X86_VECTOR_POL_MIN = X86_VECTOR_POL_NODE,
+ X86_VECTOR_POL_MAX = X86_VECTOR_POL_CALLER,
+};
+
struct irq_domain *x86_vector_domain;
static DEFINE_RAW_SPINLOCK(vector_lock);
static cpumask_var_t vector_cpumask;
@@ -258,12 +269,6 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
memset(dst, 0, sizeof(*dst));
}

-static inline const struct cpumask *
-irq_alloc_info_get_mask(struct irq_alloc_info *info)
-{
- return (!info || !info->mask) ? apic->target_cpus() : info->mask;
-}
-
static void x86_vector_free_irqs(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
@@ -284,12 +289,55 @@ static void x86_vector_free_irqs(struct irq_domain *domain,
}
}

+static int assign_irq_vector_policy(int irq, int node,
+ struct apic_chip_data *data,
+ struct irq_alloc_info *info)
+{
+ int err = -EBUSY;
+ unsigned int policy;
+ const struct cpumask *mask;
+
+ if (info && info->mask)
+ policy = X86_VECTOR_POL_CALLER;
+ else
+ policy = X86_VECTOR_POL_MIN;
+
+ for (; policy <= X86_VECTOR_POL_MAX; policy <<= 1) {
+ switch (policy) {
+ case X86_VECTOR_POL_NODE:
+ if (node >= 0)
+ mask = cpumask_of_node(node);
+ else
+ mask = NULL;
+ break;
+ case X86_VECTOR_POL_GLOBAL:
+ mask = apic->target_cpus();
+ break;
+ case X86_VECTOR_POL_CALLER:
+ if (info && info->mask)
+ mask = info->mask;
+ else
+ mask = NULL;
+ break;
+ default:
+ mask = NULL;
+ break;
+ }
+ if (mask) {
+ err = assign_irq_vector(irq, data, mask);
+ if (!err)
+ return 0;
+ }
+ }
+
+ return err;
+}
+
static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct irq_alloc_info *info = arg;
struct apic_chip_data *data;
- const struct cpumask *mask;
struct irq_data *irq_data;
int i, err;

@@ -300,7 +348,6 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
return -ENOSYS;

- mask = irq_alloc_info_get_mask(info);
for (i = 0; i < nr_irqs; i++) {
irq_data = irq_domain_get_irq_data(domain, virq + i);
BUG_ON(!irq_data);
@@ -318,7 +365,8 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
irq_data->chip = &lapic_controller;
irq_data->chip_data = data;
irq_data->hwirq = virq + i;
- err = assign_irq_vector(virq, data, mask);
+ err = assign_irq_vector_policy(virq, irq_data->node, data,
+ info);
if (err)
goto error;
}
--
1.7.10.4

2015-05-06 10:22:22

by Thomas Gleixner

[permalink] [raw]
Subject: Re: [Patch v2] x86, irq: Support CPU vector allocation policies

On Wed, 6 May 2015, Jiang Liu wrote:
> Hi Thomas,
> This is the simplified version, which removed the kernel parameter.
> Seems much simpler:)

But it can be made even simpler. :)

> +enum {
> + /* Allocate CPU vectors from CPUs on device local node */
> + X86_VECTOR_POL_NODE = 0x1,
> + /* Allocate CPU vectors from all online CPUs */
> + X86_VECTOR_POL_GLOBAL = 0x2,
> + /* Allocate CPU vectors from caller specified CPUs */
> + X86_VECTOR_POL_CALLER = 0x4,
> + X86_VECTOR_POL_MIN = X86_VECTOR_POL_NODE,
> + X86_VECTOR_POL_MAX = X86_VECTOR_POL_CALLER,
> +};


> +static int assign_irq_vector_policy(int irq, int node,
> + struct apic_chip_data *data,
> + struct irq_alloc_info *info)
> +{
> + int err = -EBUSY;
> + unsigned int policy;
> + const struct cpumask *mask;
> +
> + if (info && info->mask)
> + policy = X86_VECTOR_POL_CALLER;
> + else
> + policy = X86_VECTOR_POL_MIN;
> +
> + for (; policy <= X86_VECTOR_POL_MAX; policy <<= 1) {
> + switch (policy) {
> + case X86_VECTOR_POL_NODE:
> + if (node >= 0)
> + mask = cpumask_of_node(node);
> + else
> + mask = NULL;
> + break;
> + case X86_VECTOR_POL_GLOBAL:
> + mask = apic->target_cpus();
> + break;
> + case X86_VECTOR_POL_CALLER:
> + if (info && info->mask)
> + mask = info->mask;
> + else
> + mask = NULL;
> + break;
> + default:
> + mask = NULL;
> + break;
> + }
> + if (mask) {
> + err = assign_irq_vector(irq, data, mask);
> + if (!err)
> + return 0;
> + }
> + }

This looks pretty overengineered now that you don't have that parameter check.

if (info && info->mask)
return assign_irq_vector(irq, data, info->mask);

if (node >= 0) {
err = assign_irq_vector(irq, data, cpumask_of_node(node));
if (!err)
return 0;
}

return assign_irq_vector(irq, data, apic->target_cpus());

Should do the same, right?

Thanks,

tglx

2015-05-07 02:52:20

by Jiang Liu

[permalink] [raw]
Subject: [Patch v3] x86, irq: Allocate CPU vectors from device local CPUs if possible

On NUMA systems, an IO device may be associated with a NUMA node.
It may improve IO performance to allocate resources, such as memory
and interrupts, from device local node.

This patch introduces a mechanism to support CPU vector allocation
policies. It tries to allocate CPU vectors from CPUs on device local
node first, and then fallback to all online(global) CPUs.

This mechanism may be used to support NumaConnect systems to allocate
CPU vectors from device local node.

Signed-off-by: Jiang Liu <[email protected]>
Cc: Daniel J Blueman <[email protected]>
---
Hi Thomas,
I feel this should be simpliest version now:)
Thanks!
Gerry
---
arch/x86/kernel/apic/vector.c | 23 ++++++++++++++---------
1 file changed, 14 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 1c7dd42b98c1..eb65c6b98de0 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -210,6 +210,18 @@ static int assign_irq_vector(int irq, struct apic_chip_data *data,
return err;
}

+static int assign_irq_vector_policy(int irq, int node,
+ struct apic_chip_data *data,
+ struct irq_alloc_info *info)
+{
+ if (info && info->mask)
+ return assign_irq_vector(irq, data, info->mask);
+ if (node != NUMA_NO_NODE &&
+ assign_irq_vector(irq, data, cpumask_of_node(node)) == 0)
+ return 0;
+ return assign_irq_vector(irq, data, apic->target_cpus());
+}
+
static void clear_irq_vector(int irq, struct apic_chip_data *data)
{
int cpu, vector;
@@ -258,12 +270,6 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
memset(dst, 0, sizeof(*dst));
}

-static inline const struct cpumask *
-irq_alloc_info_get_mask(struct irq_alloc_info *info)
-{
- return (!info || !info->mask) ? apic->target_cpus() : info->mask;
-}
-
static void x86_vector_free_irqs(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
@@ -289,7 +295,6 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
{
struct irq_alloc_info *info = arg;
struct apic_chip_data *data;
- const struct cpumask *mask;
struct irq_data *irq_data;
int i, err;

@@ -300,7 +305,6 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
return -ENOSYS;

- mask = irq_alloc_info_get_mask(info);
for (i = 0; i < nr_irqs; i++) {
irq_data = irq_domain_get_irq_data(domain, virq + i);
BUG_ON(!irq_data);
@@ -318,7 +322,8 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
irq_data->chip = &lapic_controller;
irq_data->chip_data = data;
irq_data->hwirq = virq + i;
- err = assign_irq_vector(virq, data, mask);
+ err = assign_irq_vector_policy(virq, irq_data->node, data,
+ info);
if (err)
goto error;
}
--
1.7.10.4

2015-05-08 07:22:01

by Daniel J Blueman

[permalink] [raw]
Subject: Re: [Patch v3] x86, irq: Allocate CPU vectors from device local CPUs if possible

On Thu, May 7, 2015 at 10:53 AM, Jiang Liu <[email protected]>
wrote:
> On NUMA systems, an IO device may be associated with a NUMA node.
> It may improve IO performance to allocate resources, such as memory
> and interrupts, from device local node.
>
> This patch introduces a mechanism to support CPU vector allocation
> policies. It tries to allocate CPU vectors from CPUs on device local
> node first, and then fallback to all online(global) CPUs.
>
> This mechanism may be used to support NumaConnect systems to allocate
> CPU vectors from device local node.
>
> Signed-off-by: Jiang Liu <[email protected]>
> Cc: Daniel J Blueman <[email protected]>
> ---
> Hi Thomas,
> I feel this should be simpliest version now:)
> Thanks!
> Gerry
> ---
> arch/x86/kernel/apic/vector.c | 23 ++++++++++++++---------
> 1 file changed, 14 insertions(+), 9 deletions(-)
>
> diff --git a/arch/x86/kernel/apic/vector.c
> b/arch/x86/kernel/apic/vector.c
> index 1c7dd42b98c1..eb65c6b98de0 100644
> --- a/arch/x86/kernel/apic/vector.c
> +++ b/arch/x86/kernel/apic/vector.c
> @@ -210,6 +210,18 @@ static int assign_irq_vector(int irq, struct
> apic_chip_data *data,
> return err;
> }
>
> +static int assign_irq_vector_policy(int irq, int node,
> + struct apic_chip_data *data,
> + struct irq_alloc_info *info)
> +{
> + if (info && info->mask)
> + return assign_irq_vector(irq, data, info->mask);
> + if (node != NUMA_NO_NODE &&
> + assign_irq_vector(irq, data, cpumask_of_node(node)) == 0)
> + return 0;
> + return assign_irq_vector(irq, data, apic->target_cpus());
> +}
> +
> static void clear_irq_vector(int irq, struct apic_chip_data *data)
> {
> int cpu, vector;
> @@ -258,12 +270,6 @@ void copy_irq_alloc_info(struct irq_alloc_info
> *dst, struct irq_alloc_info *src)
> memset(dst, 0, sizeof(*dst));
> }
>
> -static inline const struct cpumask *
> -irq_alloc_info_get_mask(struct irq_alloc_info *info)
> -{
> - return (!info || !info->mask) ? apic->target_cpus() : info->mask;
> -}
> -
> static void x86_vector_free_irqs(struct irq_domain *domain,
> unsigned int virq, unsigned int nr_irqs)
> {
> @@ -289,7 +295,6 @@ static int x86_vector_alloc_irqs(struct
> irq_domain *domain, unsigned int virq,
> {
> struct irq_alloc_info *info = arg;
> struct apic_chip_data *data;
> - const struct cpumask *mask;
> struct irq_data *irq_data;
> int i, err;
>
> @@ -300,7 +305,6 @@ static int x86_vector_alloc_irqs(struct
> irq_domain *domain, unsigned int virq,
> if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
> return -ENOSYS;
>
> - mask = irq_alloc_info_get_mask(info);
> for (i = 0; i < nr_irqs; i++) {
> irq_data = irq_domain_get_irq_data(domain, virq + i);
> BUG_ON(!irq_data);
> @@ -318,7 +322,8 @@ static int x86_vector_alloc_irqs(struct
> irq_domain *domain, unsigned int virq,
> irq_data->chip = &lapic_controller;
> irq_data->chip_data = data;
> irq_data->hwirq = virq + i;
> - err = assign_irq_vector(virq, data, mask);
> + err = assign_irq_vector_policy(virq, irq_data->node, data,
> + info);
> if (err)
> goto error;
> }

Testing x86/tip/apic with this patch on a 192 core/24 node NumaConnect
system, all the PCIe bridge, GPU, SATA, NIC etc interrupts are
allocated on the correct NUMA nodes, so it works great. Tested-by:
Daniel J Blueman <[email protected]>

Many thanks!
Daniel

Subject: [tip:x86/apic] x86, irq: Allocate CPU vectors from device local CPUs if possible

Commit-ID: 486ca539caa082c7f2929c207af1b3ce2a304489
Gitweb: http://git.kernel.org/tip/486ca539caa082c7f2929c207af1b3ce2a304489
Author: Jiang Liu <[email protected]>
AuthorDate: Thu, 7 May 2015 10:53:56 +0800
Committer: Thomas Gleixner <[email protected]>
CommitDate: Wed, 13 May 2015 09:50:24 +0200

x86, irq: Allocate CPU vectors from device local CPUs if possible

On NUMA systems, an IO device may be associated with a NUMA node.
It may improve IO performance to allocate resources, such as memory
and interrupts, from device local node.

This patch introduces a mechanism to support CPU vector allocation
policies. It tries to allocate CPU vectors from CPUs on device local
node first, and then fallback to all online(global) CPUs.

This mechanism may be used to support NumaConnect systems to allocate
CPU vectors from device local node.

Signed-off-by: Jiang Liu <[email protected]>
Tested-by: Daniel J Blueman <[email protected]>
Cc: Konrad Rzeszutek Wilk <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: Bjorn Helgaas <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Rafael J. Wysocki <[email protected]>
Cc: Randy Dunlap <[email protected]>
Cc: Yinghai Lu <[email protected]>
Cc: Borislav Petkov <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Thomas Gleixner <[email protected]>
---
arch/x86/kernel/apic/vector.c | 23 ++++++++++++++---------
1 file changed, 14 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 2766747..b590c9d 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -210,6 +210,18 @@ static int assign_irq_vector(int irq, struct apic_chip_data *data,
return err;
}

+static int assign_irq_vector_policy(int irq, int node,
+ struct apic_chip_data *data,
+ struct irq_alloc_info *info)
+{
+ if (info && info->mask)
+ return assign_irq_vector(irq, data, info->mask);
+ if (node != NUMA_NO_NODE &&
+ assign_irq_vector(irq, data, cpumask_of_node(node)) == 0)
+ return 0;
+ return assign_irq_vector(irq, data, apic->target_cpus());
+}
+
static void clear_irq_vector(int irq, struct apic_chip_data *data)
{
int cpu, vector;
@@ -258,12 +270,6 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
memset(dst, 0, sizeof(*dst));
}

-static inline const struct cpumask *
-irq_alloc_info_get_mask(struct irq_alloc_info *info)
-{
- return (!info || !info->mask) ? apic->target_cpus() : info->mask;
-}
-
static void x86_vector_free_irqs(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
@@ -289,7 +295,6 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
{
struct irq_alloc_info *info = arg;
struct apic_chip_data *data;
- const struct cpumask *mask;
struct irq_data *irq_data;
int i, err;

@@ -300,7 +305,6 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
return -ENOSYS;

- mask = irq_alloc_info_get_mask(info);
for (i = 0; i < nr_irqs; i++) {
irq_data = irq_domain_get_irq_data(domain, virq + i);
BUG_ON(!irq_data);
@@ -318,7 +322,8 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
irq_data->chip = &lapic_controller;
irq_data->chip_data = data;
irq_data->hwirq = virq + i;
- err = assign_irq_vector(virq, data, mask);
+ err = assign_irq_vector_policy(virq, irq_data->node, data,
+ info);
if (err)
goto error;
}