Rename the current larch_insn_patch_text() to patch_text_nosync(),
then redefine larch_insn_patch_text() with stop_machine_cpuslocked()
to modify instruction to avoid CPU race, this is preparation for later
patch.
Signed-off-by: Tiezhu Yang <[email protected]>
---
arch/loongarch/kernel/inst.c | 33 ++++++++++++++++++++++++++++++++-
1 file changed, 32 insertions(+), 1 deletion(-)
diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
index aaaf9de..d842405 100644
--- a/arch/loongarch/kernel/inst.c
+++ b/arch/loongarch/kernel/inst.c
@@ -3,6 +3,7 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/sizes.h>
+#include <linux/stop_machine.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
@@ -10,6 +11,12 @@
static DEFINE_RAW_SPINLOCK(patch_lock);
+struct patch_insn {
+ void *addr;
+ u32 insn;
+ atomic_t cpu_count;
+};
+
int larch_insn_read(void *addr, u32 *insnp)
{
int ret;
@@ -34,7 +41,7 @@ int larch_insn_write(void *addr, u32 insn)
return ret;
}
-int larch_insn_patch_text(void *addr, u32 insn)
+static int patch_text_nosync(void *addr, u32 insn)
{
int ret;
u32 *tp = addr;
@@ -50,6 +57,30 @@ int larch_insn_patch_text(void *addr, u32 insn)
return ret;
}
+static int patch_text_cb(void *data)
+{
+ struct patch_insn *param = data;
+ int ret = 0;
+
+ if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) {
+ ret = patch_text_nosync(param->addr, param->insn);
+ atomic_inc(¶m->cpu_count);
+ } else {
+ while (atomic_read(¶m->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ smp_mb();
+ }
+
+ return ret;
+}
+
+int larch_insn_patch_text(void *addr, u32 insn)
+{
+ struct patch_insn param = { addr, insn, ATOMIC_INIT(0) };
+
+ return stop_machine_cpuslocked(patch_text_cb, ¶m, cpu_online_mask);
+}
+
u32 larch_insn_gen_nop(void)
{
return INSN_NOP;
--
2.1.0
Hi, Tiezhu,
On Wed, Nov 30, 2022 at 4:19 PM Tiezhu Yang <[email protected]> wrote:
>
> Rename the current larch_insn_patch_text() to patch_text_nosync(),
> then redefine larch_insn_patch_text() with stop_machine_cpuslocked()
> to modify instruction to avoid CPU race, this is preparation for later
> patch.
I don't think stop_machine is a good idea for this, because it is too
expensive, maybe Masami can give us a better suggestion.
Huacai
>
> Signed-off-by: Tiezhu Yang <[email protected]>
> ---
> arch/loongarch/kernel/inst.c | 33 ++++++++++++++++++++++++++++++++-
> 1 file changed, 32 insertions(+), 1 deletion(-)
>
> diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
> index aaaf9de..d842405 100644
> --- a/arch/loongarch/kernel/inst.c
> +++ b/arch/loongarch/kernel/inst.c
> @@ -3,6 +3,7 @@
> * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
> */
> #include <linux/sizes.h>
> +#include <linux/stop_machine.h>
> #include <linux/uaccess.h>
>
> #include <asm/cacheflush.h>
> @@ -10,6 +11,12 @@
>
> static DEFINE_RAW_SPINLOCK(patch_lock);
>
> +struct patch_insn {
> + void *addr;
> + u32 insn;
> + atomic_t cpu_count;
> +};
> +
> int larch_insn_read(void *addr, u32 *insnp)
> {
> int ret;
> @@ -34,7 +41,7 @@ int larch_insn_write(void *addr, u32 insn)
> return ret;
> }
>
> -int larch_insn_patch_text(void *addr, u32 insn)
> +static int patch_text_nosync(void *addr, u32 insn)
> {
> int ret;
> u32 *tp = addr;
> @@ -50,6 +57,30 @@ int larch_insn_patch_text(void *addr, u32 insn)
> return ret;
> }
>
> +static int patch_text_cb(void *data)
> +{
> + struct patch_insn *param = data;
> + int ret = 0;
> +
> + if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) {
> + ret = patch_text_nosync(param->addr, param->insn);
> + atomic_inc(¶m->cpu_count);
> + } else {
> + while (atomic_read(¶m->cpu_count) <= num_online_cpus())
> + cpu_relax();
> + smp_mb();
> + }
> +
> + return ret;
> +}
> +
> +int larch_insn_patch_text(void *addr, u32 insn)
> +{
> + struct patch_insn param = { addr, insn, ATOMIC_INIT(0) };
> +
> + return stop_machine_cpuslocked(patch_text_cb, ¶m, cpu_online_mask);
> +}
> +
> u32 larch_insn_gen_nop(void)
> {
> return INSN_NOP;
> --
> 2.1.0
>
>