2007-08-23 07:36:10

by Laurent Vivier

[permalink] [raw]
Subject: [PATCH] Implement missing x86_64 function smp_call_function_mask()

Index: kvm/arch/x86_64/kernel/smp.c
===================================================================
--- kvm.orig/arch/x86_64/kernel/smp.c 2007-08-22 18:31:37.000000000 +0200
+++ kvm/arch/x86_64/kernel/smp.c 2007-08-22 18:34:32.000000000 +0200
@@ -321,42 +321,6 @@ void unlock_ipi_call_lock(void)
}

/*
- * this function sends a 'generic call function' IPI to one other CPU
- * in the system.
- *
- * cpu is a standard Linux logical CPU number.
- */
-static void
-__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- struct call_data_struct data;
- int cpus = 1;
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- wmb();
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (!wait)
- return;
-
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
-}
-
-/*
* smp_call_function_single - Run a function on a specific CPU
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
@@ -373,6 +337,7 @@ int smp_call_function_single (int cpu, v
int nonatomic, int wait)
{
/* prevent preemption and reschedule on another processor */
+ int ret;
int me = get_cpu();

/* Can deadlock when called with interrupts disabled */
@@ -386,9 +351,8 @@ int smp_call_function_single (int cpu, v
return 0;
}

- spin_lock(&call_lock);
- __smp_call_function_single(cpu, func, info, nonatomic, wait);
- spin_unlock(&call_lock);
+ ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
+
put_cpu();
return 0;
}
@@ -430,6 +394,75 @@ static void __smp_call_function (void (*
cpu_relax();
}

+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on. Must not include the current cpu.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
+{
+ struct call_data_struct data;
+ cpumask_t allbutself;
+ int cpus;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
+ /* Holding any lock stops cpus from going down. */
+ spin_lock(&call_lock);
+
+ allbutself = cpu_online_map;
+ cpu_clear(smp_processor_id(), allbutself);
+
+ cpus_and(mask, mask, allbutself);
+ cpus = cpus_weight(mask);
+
+ if (!cpus) {
+ spin_unlock(&call_lock);
+ return 0;
+ }
+
+ data.func = func;
+ data.info = info;
+ atomic_set(&data.started, 0);
+ data.wait = wait;
+ if (wait)
+ atomic_set(&data.finished, 0);
+
+ call_data = &data;
+ wmb();
+
+ /* Send a message to other CPUs */
+ if (cpus_equal(mask, allbutself))
+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+ else
+ send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
+
+ /* Wait for response */
+ while (atomic_read(&data.started) != cpus)
+ cpu_relax();
+
+ if (wait)
+ while (atomic_read(&data.finished) != cpus)
+ cpu_relax();
+ spin_unlock(&call_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(smp_call_function_mask);
+
/*
* smp_call_function - run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
@@ -448,10 +481,7 @@ static void __smp_call_function (void (*
int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
int wait)
{
- spin_lock(&call_lock);
- __smp_call_function(func,info,nonatomic,wait);
- spin_unlock(&call_lock);
- return 0;
+ return smp_call_function_mask(cpu_online_map, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function);

Index: kvm/include/asm-x86_64/smp.h
===================================================================
--- kvm.orig/include/asm-x86_64/smp.h 2007-08-22 18:31:37.000000000 +0200
+++ kvm/include/asm-x86_64/smp.h 2007-08-22 18:31:48.000000000 +0200
@@ -37,6 +37,8 @@ extern void lock_ipi_call_lock(void);
extern void unlock_ipi_call_lock(void);
extern int smp_num_siblings;
extern void smp_send_reschedule(int cpu);
+extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
+ void *info, int wait);

extern cpumask_t cpu_sibling_map[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];


Attachments:
smp_call_function_mask-x86_64 (4.76 kB)

2007-08-23 09:25:55

by Andi Kleen

[permalink] [raw]
Subject: Re: [PATCH] Implement missing x86_64 function smp_call_function_mask()

On Thursday 23 August 2007 09:36:05 Laurent Vivier wrote:
> This patch defines the missing function smp_call_function_mask() for x86_64,
> this is more or less a cut&paste of i386 function.

What do you need it for?

-Andi

2007-08-23 09:34:53

by Laurent Vivier

[permalink] [raw]
Subject: Re: [PATCH] Implement missing x86_64 function smp_call_function_mask()

Andi Kleen wrote:
> On Thursday 23 August 2007 09:36:05 Laurent Vivier wrote:
>> This patch defines the missing function smp_call_function_mask() for x86_64,
>> this is more or less a cut&paste of i386 function.
>
> What do you need it for?

Hi Andi,

it is for KVM:

drivers/kvm/kvm_main.c:

203 void kvm_flush_remote_tlbs(struct kvm *kvm)
204 {
...
227 /*
228 * We really want smp_call_function_mask() here. But that's not
229 * available, so ipi all cpus in parallel and wait for them
230 * to complete.
231 */
232 for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
233 smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
234 while (atomic_read(&completed) != needed) {
235 cpu_relax();
236 barrier();
237 }
238 }

Regards,
Laurent
--
------------- [email protected] --------------
"Software is hard" - Donald Knuth


Attachments:
signature.asc (189.00 B)
OpenPGP digital signature

2007-08-23 10:02:49

by Andi Kleen

[permalink] [raw]
Subject: Re: [discuss] [PATCH] Implement missing x86_64 function smp_call_function_mask()

On Thu, Aug 23, 2007 at 11:34:45AM +0200, Laurent Vivier wrote:
> Andi Kleen wrote:
> > On Thursday 23 August 2007 09:36:05 Laurent Vivier wrote:
> >> This patch defines the missing function smp_call_function_mask() for x86_64,
> >> this is more or less a cut&paste of i386 function.
> >
> > What do you need it for?
>
> Hi Andi,
>
> it is for KVM:

So put that into the description please.

-Andi

2007-08-24 11:22:01

by Laurent Vivier

[permalink] [raw]
Subject: [PATCH][RESEND] Implement missing x86_64 function smp_call_function_mask()

Index: kvm/arch/x86_64/kernel/smp.c
===================================================================
--- kvm.orig/arch/x86_64/kernel/smp.c 2007-08-23 17:36:13.000000000 +0200
+++ kvm/arch/x86_64/kernel/smp.c 2007-08-23 17:37:56.000000000 +0200
@@ -321,17 +321,30 @@ void unlock_ipi_call_lock(void)
}

/*
- * this function sends a 'generic call function' IPI to one other CPU
- * in the system.
- *
- * cpu is a standard Linux logical CPU number.
+ * this function sends a 'generic call function' IPI to all other CPU
+ * of the system defined in the mask.
*/
-static void
-__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
+
+static int
+__smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
{
struct call_data_struct data;
- int cpus = 1;
+ cpumask_t allbutself;
+ int cpus;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
+ allbutself = cpu_online_map;
+ cpu_clear(smp_processor_id(), allbutself);
+
+ cpus_and(mask, mask, allbutself);
+ cpus = cpus_weight(mask);
+
+ if (!cpus)
+ return 0;

data.func = func;
data.info = info;
@@ -342,19 +355,51 @@ __smp_call_function_single(int cpu, void

call_data = &data;
wmb();
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
+
+ /* Send a message to other CPUs */
+ if (cpus_equal(mask, allbutself))
+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+ else
+ send_IPI_mask(mask, CALL_FUNCTION_VECTOR);

/* Wait for response */
while (atomic_read(&data.started) != cpus)
cpu_relax();

if (!wait)
- return;
+ return 0;

while (atomic_read(&data.finished) != cpus)
cpu_relax();
+
+ return 0;
+}
+/**
+ * smp_call_function_mask(): Run a function on a set of other CPUs.
+ * @mask: The set of cpus to run on. Must not include the current cpu.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code.
+ *
+ * If @wait is true, then returns once @func has returned; otherwise
+ * it returns just before the target cpu calls @func.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function_mask(cpumask_t mask,
+ void (*func)(void *), void *info,
+ int wait)
+{
+ int ret;
+ spin_lock(&call_lock);
+ ret = __smp_call_function_mask(mask, func, info, wait);
+ spin_unlock(&call_lock);
+ return ret;
}
+EXPORT_SYMBOL(smp_call_function_mask);

/*
* smp_call_function_single - Run a function on a specific CPU
@@ -373,6 +418,7 @@ int smp_call_function_single (int cpu, v
int nonatomic, int wait)
{
/* prevent preemption and reschedule on another processor */
+ int ret;
int me = get_cpu();

/* Can deadlock when called with interrupts disabled */
@@ -386,51 +432,14 @@ int smp_call_function_single (int cpu, v
return 0;
}

- spin_lock(&call_lock);
- __smp_call_function_single(cpu, func, info, nonatomic, wait);
- spin_unlock(&call_lock);
+ ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
+
put_cpu();
- return 0;
+ return ret;
}
EXPORT_SYMBOL(smp_call_function_single);

/*
- * this function sends a 'generic call function' IPI to all other CPUs
- * in the system.
- */
-static void __smp_call_function (void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- struct call_data_struct data;
- int cpus = num_online_cpus()-1;
-
- if (!cpus)
- return;
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- call_data = &data;
- wmb();
- /* Send a message to all other CPUs and wait for them to respond */
- send_IPI_allbutself(CALL_FUNCTION_VECTOR);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- cpu_relax();
-
- if (!wait)
- return;
-
- while (atomic_read(&data.finished) != cpus)
- cpu_relax();
-}
-
-/*
* smp_call_function - run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
@@ -448,10 +457,7 @@ static void __smp_call_function (void (*
int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
int wait)
{
- spin_lock(&call_lock);
- __smp_call_function(func,info,nonatomic,wait);
- spin_unlock(&call_lock);
- return 0;
+ return smp_call_function_mask(cpu_online_map, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function);

@@ -478,7 +484,7 @@ void smp_send_stop(void)
/* Don't deadlock on the call lock in panic */
nolock = !spin_trylock(&call_lock);
local_irq_save(flags);
- __smp_call_function(stop_this_cpu, NULL, 0, 0);
+ __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0);
if (!nolock)
spin_unlock(&call_lock);
disable_local_APIC();
Index: kvm/include/asm-x86_64/smp.h
===================================================================
--- kvm.orig/include/asm-x86_64/smp.h 2007-08-23 17:36:13.000000000 +0200
+++ kvm/include/asm-x86_64/smp.h 2007-08-23 17:37:56.000000000 +0200
@@ -37,6 +37,8 @@ extern void lock_ipi_call_lock(void);
extern void unlock_ipi_call_lock(void);
extern int smp_num_siblings;
extern void smp_send_reschedule(int cpu);
+extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
+ void *info, int wait);

extern cpumask_t cpu_sibling_map[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];


Attachments:
smp_call_function_mask-x86_64 (5.58 kB)

2007-08-24 12:33:36

by Andi Kleen

[permalink] [raw]
Subject: Re: [PATCH][RESEND] Implement missing x86_64 function smp_call_function_mask()

On Friday 24 August 2007 13:21:57 Laurent Vivier wrote:
> This patch defines the missing function smp_call_function_mask() for x86_64,
> this is more or less a cut&paste of i386 function. It removes also some
> duplicate code.
>
> This function is needed by KVM to execute a function on all CPUs.
>
> arch/x86_64/kernel/smp.c | 118 ++++++++++++++++++++++++-----------------------
> include/asm-x86_64/smp.h | 2
> 2 files changed, 64 insertions(+), 56 deletions(-)
>
> Signed-off-by: Laurent Vivier <[email protected]>


Applied thanks
-Andi