2014-04-14 17:44:59

by Sasha Levin

[permalink] [raw]
Subject: [PATCH 1/4] kmemcheck: add additional selfchecks

kmemcheck has it's own tiny opcode decoder, and is not using the
kernel's decoder for historic reasons.

While the decoder works for more cases, it fails on quite a few
opcodes and returns incorrect values, which leads to either a
failure to detect an issue, or a false positive.

This patch adds a few of those opcodes: setne, btsl and callq,
thus causing selfcheck to fail during boot.

Signed-off-by: Sasha Levin <[email protected]>
---
arch/x86/mm/kmemcheck/selftest.c | 9 +++++++++
1 file changed, 9 insertions(+)

diff --git a/arch/x86/mm/kmemcheck/selftest.c b/arch/x86/mm/kmemcheck/selftest.c
index aef7140..c898d33 100644
--- a/arch/x86/mm/kmemcheck/selftest.c
+++ b/arch/x86/mm/kmemcheck/selftest.c
@@ -23,6 +23,12 @@ static const struct selftest_opcode selftest_opcodes[] = {
{1, "\x66\x0f\xbe\x51\xf8", "movswq <mem8>, <reg16>"},
{1, "\x0f\xbe\x51\xf8", "movswq <mem8>, <reg32>"},

+ /* SETNE */
+ {1, "\x0f\x95\xc0", "setne <reg8>"},
+
+ /* BTSL */
+ {4, "\x0f\xba\x6b\x10\x00", "btsl <imm8>, <mem32>"},
+
#ifdef CONFIG_X86_64
/* MOVZX / MOVZXD */
{1, "\x49\x0f\xb6\x51\xf8", "movzbq <mem8>, <reg64>"},
@@ -32,6 +38,9 @@ static const struct selftest_opcode selftest_opcodes[] = {
{1, "\x49\x0f\xbe\x51\xf8", "movsbq <mem8>, <reg64>"},
{2, "\x49\x0f\xbf\x51\xf8", "movsbq <mem16>, <reg64>"},
{4, "\x49\x63\x51\xf8", "movslq <mem32>, <reg64>"},
+
+ /* CALLQ */
+ {8, "\xe8\x00\x00\x00", "call <mem64>"},
#endif
};

--
1.7.10.4


2014-04-14 17:45:01

by Sasha Levin

[permalink] [raw]
Subject: [PATCH 4/4] kmemcheck: Switch to using kernel disassembler

kmemcheck used to do it's own basic instruction decoding, which is
just a duplication of the work done in arch/x86/lib/insn.c.

Instead, switch it to using the already existing dissasembler, and
switch the magic opcode numbers into something meaningful.

Signed-off-by: Sasha Levin <[email protected]>
---
arch/x86/mm/kmemcheck/Makefile | 2 +-
arch/x86/mm/kmemcheck/kmemcheck.c | 105 ++++++++++++++++++------------------
arch/x86/mm/kmemcheck/opcode.c | 106 -------------------------------------
arch/x86/mm/kmemcheck/opcode.h | 9 ----
arch/x86/mm/kmemcheck/selftest.c | 12 +++--
arch/x86/mm/kmemcheck/selftest.h | 1 +
6 files changed, 61 insertions(+), 174 deletions(-)
delete mode 100644 arch/x86/mm/kmemcheck/opcode.c
delete mode 100644 arch/x86/mm/kmemcheck/opcode.h

diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile
index 520b3bc..f1749ad 100644
--- a/arch/x86/mm/kmemcheck/Makefile
+++ b/arch/x86/mm/kmemcheck/Makefile
@@ -1 +1 @@
-obj-y := error.o kmemcheck.o opcode.o pte.o selftest.o shadow.o
+obj-y := error.o kmemcheck.o pte.o selftest.o shadow.o
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
index dd89a13..571664d 100644
--- a/arch/x86/mm/kmemcheck/kmemcheck.c
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -25,9 +25,10 @@
#include <asm/kmemcheck.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
+#include <asm/insn.h>
+#include <asm/inat-tables.h>

#include "error.h"
-#include "opcode.h"
#include "pte.h"
#include "selftest.h"
#include "shadow.h"
@@ -521,13 +522,30 @@ enum kmemcheck_method {
KMEMCHECK_WRITE,
};

+char kmemcheck_get_addr_size(struct insn *insn)
+{
+ switch (insn->opcode.value) {
+ case 0xa4:
+ case 0xbe0f:
+ case 0xb60f:
+ return 1;
+
+ case 0xbf0f:
+ case 0xb70f:
+ return 2;
+ case 0x63:
+ if (!X86_REX_W(insn->rex_prefix.value))
+ break;
+ return 4;
+ }
+
+ return insn->mem_bytes;
+}
+
static void kmemcheck_access(struct pt_regs *regs,
unsigned long fallback_address, enum kmemcheck_method fallback_method)
{
- const uint8_t *insn;
- const uint8_t *insn_primary;
- unsigned int size;
-
+ struct insn insn;
struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);

/* Recursive fault -- ouch. */
@@ -539,63 +557,42 @@ static void kmemcheck_access(struct pt_regs *regs,

data->busy = true;

- insn = (const uint8_t *) regs->ip;
- insn_primary = kmemcheck_opcode_get_primary(insn);
-
- kmemcheck_opcode_decode(insn, &size);
+ kernel_insn_init(&insn, (const void *)regs->ip);
+ insn_get_length(&insn);

- switch (insn_primary[0]) {
+ switch (insn.mnemonic) {
#ifdef CONFIG_KMEMCHECK_BITOPS_OK
- /* AND, OR, XOR */
- /*
- * Unfortunately, these instructions have to be excluded from
- * our regular checking since they access only some (and not
- * all) bits. This clears out "bogus" bitfield-access warnings.
- */
- case 0x80:
- case 0x81:
- case 0x82:
- case 0x83:
- switch ((insn_primary[1] >> 3) & 7) {
- /* OR */
- case 1:
- /* AND */
- case 4:
- /* XOR */
- case 6:
- kmemcheck_write(regs, fallback_address, size);
- goto out;
-
- /* ADD */
- case 0:
- /* ADC */
- case 2:
- /* SBB */
- case 3:
- /* SUB */
- case 5:
- /* CMP */
- case 7:
- break;
- }
+ /*
+ * Unfortunately, these instructions have to be excluded from
+ * our regular checking since they access only some (and not
+ * all) bits. This clears out "bogus" bitfield-access warnings.
+ */
+ case INSN_OPC_OR:
+ case INSN_OPC_AND:
+ case INSN_OPC_XOR:
+ kmemcheck_write(regs, fallback_address, insn.mem_bytes);
+ goto out;
+
+ case INSN_OPC_ADD:
+ case INSN_OPC_ADC:
+ case INSN_OPC_SBB:
+ case INSN_OPC_SUB:
+ case INSN_OPC_CMP:
break;
#endif
-
- /* MOVS, MOVSB, MOVSW, MOVSD */
- case 0xa4:
- case 0xa5:
+ case INSN_OPC_MOVS_B:
+ case INSN_OPC_MOVS_W_D_Q:
/*
* These instructions are special because they take two
* addresses, but we only get one page fault.
*/
- kmemcheck_copy(regs, regs->si, regs->di, size);
+ kmemcheck_copy(regs, regs->si, regs->di, insn.mem_bytes);
goto out;

- /* CMPS, CMPSB, CMPSW, CMPSD */
- case 0xa6:
- case 0xa7:
- kmemcheck_read(regs, regs->si, size);
- kmemcheck_read(regs, regs->di, size);
+ case INSN_OPC_CMPS_B:
+ case INSN_OPC_CMPS_W_D:
+ kmemcheck_read(regs, regs->si, insn.mem_bytes);
+ kmemcheck_read(regs, regs->di, insn.mem_bytes);
goto out;
}

@@ -606,10 +603,10 @@ static void kmemcheck_access(struct pt_regs *regs,
*/
switch (fallback_method) {
case KMEMCHECK_READ:
- kmemcheck_read(regs, fallback_address, size);
+ kmemcheck_read(regs, fallback_address, insn.mem_bytes);
goto out;
case KMEMCHECK_WRITE:
- kmemcheck_write(regs, fallback_address, size);
+ kmemcheck_write(regs, fallback_address, insn.mem_bytes);
goto out;
}

diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c
deleted file mode 100644
index 324aa3f..0000000
--- a/arch/x86/mm/kmemcheck/opcode.c
+++ /dev/null
@@ -1,106 +0,0 @@
-#include <linux/types.h>
-
-#include "opcode.h"
-
-static bool opcode_is_prefix(uint8_t b)
-{
- return
- /* Group 1 */
- b == 0xf0 || b == 0xf2 || b == 0xf3
- /* Group 2 */
- || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26
- || b == 0x64 || b == 0x65
- /* Group 3 */
- || b == 0x66
- /* Group 4 */
- || b == 0x67;
-}
-
-#ifdef CONFIG_X86_64
-static bool opcode_is_rex_prefix(uint8_t b)
-{
- return (b & 0xf0) == 0x40;
-}
-#else
-static bool opcode_is_rex_prefix(uint8_t b)
-{
- return false;
-}
-#endif
-
-#define REX_W (1 << 3)
-
-/*
- * This is a VERY crude opcode decoder. We only need to find the size of the
- * load/store that caused our #PF and this should work for all the opcodes
- * that we care about. Moreover, the ones who invented this instruction set
- * should be shot.
- */
-void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size)
-{
- /* Default operand size */
- int operand_size_override = 4;
-
- /* prefixes */
- for (; opcode_is_prefix(*op); ++op) {
- if (*op == 0x66)
- operand_size_override = 2;
- }
-
- /* REX prefix */
- if (opcode_is_rex_prefix(*op)) {
- uint8_t rex = *op;
-
- ++op;
- if (rex & REX_W) {
- switch (*op) {
- case 0x63:
- *size = 4;
- return;
- case 0x0f:
- ++op;
-
- switch (*op) {
- case 0xb6:
- case 0xbe:
- *size = 1;
- return;
- case 0xb7:
- case 0xbf:
- *size = 2;
- return;
- }
-
- break;
- }
-
- *size = 8;
- return;
- }
- }
-
- /* escape opcode */
- if (*op == 0x0f) {
- ++op;
-
- /*
- * This is move with zero-extend and sign-extend, respectively;
- * we don't have to think about 0xb6/0xbe, because this is
- * already handled in the conditional below.
- */
- if (*op == 0xb7 || *op == 0xbf)
- operand_size_override = 2;
- }
-
- *size = (*op & 1) ? operand_size_override : 1;
-}
-
-const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op)
-{
- /* skip prefixes */
- while (opcode_is_prefix(*op))
- ++op;
- if (opcode_is_rex_prefix(*op))
- ++op;
- return op;
-}
diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h
deleted file mode 100644
index 6956aad..0000000
--- a/arch/x86/mm/kmemcheck/opcode.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef ARCH__X86__MM__KMEMCHECK__OPCODE_H
-#define ARCH__X86__MM__KMEMCHECK__OPCODE_H
-
-#include <linux/types.h>
-
-void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size);
-const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op);
-
-#endif
diff --git a/arch/x86/mm/kmemcheck/selftest.c b/arch/x86/mm/kmemcheck/selftest.c
index c898d33..8976938 100644
--- a/arch/x86/mm/kmemcheck/selftest.c
+++ b/arch/x86/mm/kmemcheck/selftest.c
@@ -1,7 +1,9 @@
#include <linux/bug.h>
#include <linux/kernel.h>

-#include "opcode.h"
+#include <asm/insn.h>
+#include <asm/inat-tables.h>
+
#include "selftest.h"

struct selftest_opcode {
@@ -46,10 +48,12 @@ static const struct selftest_opcode selftest_opcodes[] = {

static bool selftest_opcode_one(const struct selftest_opcode *op)
{
- unsigned size;
-
- kmemcheck_opcode_decode(op->insn, &size);
+ struct insn insn;
+ char size;

+ kernel_insn_init(&insn, op->insn);
+ insn_get_length(&insn);
+ size = kmemcheck_get_addr_size(&insn);
if (size == op->expected_size)
return true;

diff --git a/arch/x86/mm/kmemcheck/selftest.h b/arch/x86/mm/kmemcheck/selftest.h
index 8fed4fe..4cd1c5e 100644
--- a/arch/x86/mm/kmemcheck/selftest.h
+++ b/arch/x86/mm/kmemcheck/selftest.h
@@ -2,5 +2,6 @@
#define ARCH_X86_MM_KMEMCHECK_SELFTEST_H

bool kmemcheck_selftest(void);
+extern char kmemcheck_get_addr_size(struct insn *insn);

#endif
--
1.7.10.4

2014-04-14 17:44:54

by Sasha Levin

[permalink] [raw]
Subject: [PATCH 2/4] x86: Move instruction decoder data into header

Right now we generate data for the instruction decoder and place it
as a code file which gets #included directly (yuck).

Instead, make it a header which will also be usable by other code
that wants to use the data in there.

Signed-off-by: Sasha Levin <[email protected]>
---
arch/x86/Makefile | 6 ++++++
arch/x86/lib/Makefile | 8 +++++---
arch/x86/lib/inat.c | 2 +-
arch/x86/tools/Makefile | 8 ++++----
4 files changed, 16 insertions(+), 8 deletions(-)

diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 602f57e..26eee4e 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -178,6 +178,12 @@ archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/x86/tools relocs

###
+# inat instruction table generation
+
+archprepare:
+ $(Q)$(MAKE) $(build)=arch/x86/lib inat_tables
+
+###
# Syscall table generation

archheaders:
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index eabcb6e..3014da8 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -7,12 +7,12 @@ inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
quiet_cmd_inat_tables = GEN $@
cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@

-$(obj)/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
+$(obj)/../include/generated/asm/inat-tables.h: $(inat_tables_script) $(inat_tables_maps)
$(call cmd,inat_tables)

-$(obj)/inat.o: $(obj)/inat-tables.c
+$(obj)/inat.o: $(obj)/../include/generated/asm/inat-tables.h

-clean-files := inat-tables.c
+clean-files := ../include/asm/inat-tables.h

obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o

@@ -44,3 +44,5 @@ else
lib-y += copy_user_64.o copy_user_nocache_64.o
lib-y += cmpxchg16b_emu.o
endif
+
+inat_tables: $(obj)/../include/generated/asm/inat-tables.h
diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c
index c1f01a8..641a996 100644
--- a/arch/x86/lib/inat.c
+++ b/arch/x86/lib/inat.c
@@ -21,7 +21,7 @@
#include <asm/insn.h>

/* Attribute tables are generated from opcode map */
-#include "inat-tables.c"
+#include <asm/inat-tables.h>

/* Attribute search APIs */
insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode)
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index e812034..e34e7e3 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -28,14 +28,14 @@ posttest: $(obj)/test_get_len vmlinux $(obj)/insn_sanity
hostprogs-y += test_get_len insn_sanity

# -I needed for generated C source and C source which in the kernel tree.
-HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/
+HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/ -I$(srctree)/arch/x86/include/generated/

-HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/
+HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/ -I$(srctree)/arch/x86/include/generated/

# Dependencies are also needed.
-$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
+$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/include/generated/asm/inat-tables.h

-$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
+$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/include/generated/asm/inat-tables.h

HOST_EXTRACFLAGS += -I$(srctree)/tools/include
hostprogs-y += relocs
--
1.7.10.4

2014-04-14 17:46:23

by Sasha Levin

[permalink] [raw]
Subject: [PATCH 3/4] x86/insn: Extract more information about instructions

arch/x86/lib/x86-opcode-map.txt provides us quite a lot of information about
instructions. So far we've discarded information we didn't need to use
elsewhere.

This patch extracts two more bits of information about instructions:

- Mnemonic. We'd like to refer to instructions by their mnemonic, and not
by their opcode. This both makes code readable, and less confusing and
prone to typos since a single mnemonic may have quite a few different
opcodes representing it.

- Memory access size. We're currently decoding the size (in bytes) of an
address size, and operand size. kmemcheck would like to know in addition
how many bytes were read/written from/to an address by a given instruction,
so we also keep the size of the memory access.

To sum it up, this patch translates more bits from
arch/x86/lib/x86-opcode-map.txt into C. There's no new additional information
being added to instructions, only what was there before.

Signed-off-by: Sasha Levin <[email protected]>
---
arch/x86/include/asm/inat.h | 106 +++++++++++++++++-----------------
arch/x86/include/asm/inat_types.h | 9 ++-
arch/x86/include/asm/insn.h | 2 +
arch/x86/kernel/kprobes/core.c | 10 ++--
arch/x86/lib/inat.c | 65 ++++++++++++---------
arch/x86/lib/insn.c | 91 ++++++++++++++++++-----------
arch/x86/tools/gen-insn-attr-x86.awk | 99 ++++++++++++++++++++++++-------
arch/x86/tools/insn_sanity.c | 8 +--
8 files changed, 248 insertions(+), 142 deletions(-)

diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h
index 74a2e31..38de08a 100644
--- a/arch/x86/include/asm/inat.h
+++ b/arch/x86/include/asm/inat.h
@@ -96,126 +96,128 @@
#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)

/* Attribute search APIs */
-extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
+extern const insn_attr_t *inat_get_opcode(insn_byte_t opcode);
extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
-extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
- int lpfx_id,
- insn_attr_t esc_attr);
-extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
+extern const insn_attr_t *inat_get_escape(insn_byte_t opcode, int lpfx_id,
+ insn_flags_t esc_flags);
+extern insn_flags_t inat_get_group_flags(insn_byte_t modrm,
int lpfx_id,
- insn_attr_t esc_attr);
-extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
+ insn_flags_t esc_flags);
+extern const insn_attr_t *inat_get_group(insn_byte_t modrm,
+ int lpfx_id,
+ insn_flags_t esc_flags);
+extern const insn_attr_t *inat_get_avx(insn_byte_t opcode,
insn_byte_t vex_m,
insn_byte_t vex_pp);

/* Attribute checking functions */
-static inline int inat_is_legacy_prefix(insn_attr_t attr)
+static inline int inat_is_legacy_prefix(insn_flags_t flags)
{
- attr &= INAT_PFX_MASK;
- return attr && attr <= INAT_LGCPFX_MAX;
+ flags &= INAT_PFX_MASK;
+ return flags && flags <= INAT_LGCPFX_MAX;
}

-static inline int inat_is_address_size_prefix(insn_attr_t attr)
+static inline int inat_is_address_size_prefix(insn_flags_t flags)
{
- return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ;
+ return (flags & INAT_PFX_MASK) == INAT_PFX_ADDRSZ;
}

-static inline int inat_is_operand_size_prefix(insn_attr_t attr)
+static inline int inat_is_operand_size_prefix(insn_flags_t flags)
{
- return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ;
+ return (flags & INAT_PFX_MASK) == INAT_PFX_OPNDSZ;
}

-static inline int inat_is_rex_prefix(insn_attr_t attr)
+static inline int inat_is_rex_prefix(insn_flags_t flags)
{
- return (attr & INAT_PFX_MASK) == INAT_PFX_REX;
+ return (flags & INAT_PFX_MASK) == INAT_PFX_REX;
}

-static inline int inat_last_prefix_id(insn_attr_t attr)
+static inline int inat_last_prefix_id(insn_flags_t flags)
{
- if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX)
+ if ((flags & INAT_PFX_MASK) > INAT_LSTPFX_MAX)
return 0;
else
- return attr & INAT_PFX_MASK;
+ return flags & INAT_PFX_MASK;
}

-static inline int inat_is_vex_prefix(insn_attr_t attr)
+static inline int inat_is_vex_prefix(insn_flags_t flags)
{
- attr &= INAT_PFX_MASK;
- return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3;
+ flags &= INAT_PFX_MASK;
+ return flags == INAT_PFX_VEX2 || flags == INAT_PFX_VEX3;
}

-static inline int inat_is_vex3_prefix(insn_attr_t attr)
+static inline int inat_is_vex3_prefix(insn_flags_t flags)
{
- return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3;
+ return (flags & INAT_PFX_MASK) == INAT_PFX_VEX3;
}

-static inline int inat_is_escape(insn_attr_t attr)
+static inline int inat_is_escape(insn_flags_t flags)
{
- return attr & INAT_ESC_MASK;
+ return flags & INAT_ESC_MASK;
}

-static inline int inat_escape_id(insn_attr_t attr)
+static inline int inat_escape_id(insn_flags_t flags)
{
- return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS;
+ return (flags & INAT_ESC_MASK) >> INAT_ESC_OFFS;
}

-static inline int inat_is_group(insn_attr_t attr)
+static inline int inat_is_group(insn_flags_t flags)
{
- return attr & INAT_GRP_MASK;
+ return flags & INAT_GRP_MASK;
}

-static inline int inat_group_id(insn_attr_t attr)
+static inline int inat_group_id(insn_flags_t flags)
{
- return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS;
+ return (flags & INAT_GRP_MASK) >> INAT_GRP_OFFS;
}

-static inline int inat_group_common_attribute(insn_attr_t attr)
+static inline int inat_group_common_flags(insn_flags_t flags)
{
- return attr & ~INAT_GRP_MASK;
+ return flags & ~INAT_GRP_MASK;
}

-static inline int inat_has_immediate(insn_attr_t attr)
+static inline int inat_has_immediate(insn_flags_t flags)
{
- return attr & INAT_IMM_MASK;
+ return flags & INAT_IMM_MASK;
}

-static inline int inat_immediate_size(insn_attr_t attr)
+static inline int inat_immediate_size(insn_flags_t flags)
{
- return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS;
+ return (flags & INAT_IMM_MASK) >> INAT_IMM_OFFS;
}

-static inline int inat_has_modrm(insn_attr_t attr)
+static inline int inat_has_modrm(insn_flags_t flags)
{
- return attr & INAT_MODRM;
+ return flags & INAT_MODRM;
}

-static inline int inat_is_force64(insn_attr_t attr)
+static inline int inat_is_force64(insn_flags_t flags)
{
- return attr & INAT_FORCE64;
+ return flags & INAT_FORCE64;
}

-static inline int inat_has_second_immediate(insn_attr_t attr)
+static inline int inat_has_second_immediate(insn_flags_t flags)
{
- return attr & INAT_SCNDIMM;
+ return flags & INAT_SCNDIMM;
}

-static inline int inat_has_moffset(insn_attr_t attr)
+static inline int inat_has_moffset(insn_flags_t flags)
{
- return attr & INAT_MOFFSET;
+ return flags & INAT_MOFFSET;
}

-static inline int inat_has_variant(insn_attr_t attr)
+static inline int inat_has_variant(insn_flags_t flags)
{
- return attr & INAT_VARIANT;
+ return flags & INAT_VARIANT;
}

-static inline int inat_accept_vex(insn_attr_t attr)
+static inline int inat_accept_vex(insn_flags_t flags)
{
- return attr & INAT_VEXOK;
+ return flags & INAT_VEXOK;
}

-static inline int inat_must_vex(insn_attr_t attr)
+static inline int inat_must_vex(insn_flags_t flags)
{
- return attr & INAT_VEXONLY;
+ return flags & INAT_VEXONLY;
}
#endif
diff --git a/arch/x86/include/asm/inat_types.h b/arch/x86/include/asm/inat_types.h
index cb3c20c..028275a 100644
--- a/arch/x86/include/asm/inat_types.h
+++ b/arch/x86/include/asm/inat_types.h
@@ -22,7 +22,14 @@
*/

/* Instruction attributes */
-typedef unsigned int insn_attr_t;
+typedef unsigned int insn_flags_t;
+
+typedef struct {
+ insn_flags_t flags;
+ unsigned int mnemonic;
+ char mem_bytes;
+} insn_attr_t;
+
typedef unsigned char insn_byte_t;
typedef signed int insn_value_t;

diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 48eb30a..c4076f8 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -59,8 +59,10 @@ struct insn {
};

insn_attr_t attr;
+ unsigned int mnemonic;
unsigned char opnd_bytes;
unsigned char addr_bytes;
+ char mem_bytes;
unsigned char length;
unsigned char x86_64;

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 79a3f96..c9102b6 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -141,15 +141,15 @@ void __kprobes synthesize_relcall(void *from, void *to)
*/
static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
{
- insn_attr_t attr;
+ insn_flags_t flags;

- attr = inat_get_opcode_attribute((insn_byte_t)*insn);
- while (inat_is_legacy_prefix(attr)) {
+ flags = inat_get_opcode((insn_byte_t)*insn)->flags;
+ while (inat_is_legacy_prefix(flags)) {
insn++;
- attr = inat_get_opcode_attribute((insn_byte_t)*insn);
+ flags = inat_get_opcode((insn_byte_t)*insn)->flags;
}
#ifdef CONFIG_X86_64
- if (inat_is_rex_prefix(attr))
+ if (inat_is_rex_prefix(flags))
insn++;
#endif
return insn;
diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c
index 641a996..dddb9ff 100644
--- a/arch/x86/lib/inat.c
+++ b/arch/x86/lib/inat.c
@@ -19,26 +19,27 @@
*
*/
#include <asm/insn.h>
+#include <linux/stddef.h>

/* Attribute tables are generated from opcode map */
#include <asm/inat-tables.h>

/* Attribute search APIs */
-insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode)
+const insn_attr_t *inat_get_opcode(insn_byte_t opcode)
{
- return inat_primary_table[opcode];
+ return &inat_primary_table[opcode];
}

int inat_get_last_prefix_id(insn_byte_t last_pfx)
{
- insn_attr_t lpfx_attr;
+ insn_flags_t lpfx_flags;

- lpfx_attr = inat_get_opcode_attribute(last_pfx);
- return inat_last_prefix_id(lpfx_attr);
+ lpfx_flags = inat_get_opcode(last_pfx)->flags;
+ return inat_last_prefix_id(lpfx_flags);
}

-insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, int lpfx_id,
- insn_attr_t esc_attr)
+const insn_attr_t *inat_get_escape(insn_byte_t opcode, int lpfx_id,
+ insn_flags_t esc_attr)
{
const insn_attr_t *table;
int n;
@@ -47,51 +48,61 @@ insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, int lpfx_id,

table = inat_escape_tables[n][0];
if (!table)
- return 0;
- if (inat_has_variant(table[opcode]) && lpfx_id) {
+ return NULL;
+ if (inat_has_variant(table[opcode].flags) && lpfx_id) {
table = inat_escape_tables[n][lpfx_id];
if (!table)
- return 0;
+ return NULL;
}
- return table[opcode];
+ return &table[opcode];
}

-insn_attr_t inat_get_group_attribute(insn_byte_t modrm, int lpfx_id,
- insn_attr_t grp_attr)
+const insn_attr_t *inat_get_group(insn_byte_t modrm, int lpfx_id,
+ insn_flags_t grp_flags)
{
const insn_attr_t *table;
int n;

- n = inat_group_id(grp_attr);
+ n = inat_group_id(grp_flags);

table = inat_group_tables[n][0];
if (!table)
- return inat_group_common_attribute(grp_attr);
- if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) {
+ return NULL;
+ if (inat_has_variant(table[X86_MODRM_REG(modrm)].flags) && lpfx_id) {
table = inat_group_tables[n][lpfx_id];
if (!table)
- return inat_group_common_attribute(grp_attr);
+ return NULL;
}
- return table[X86_MODRM_REG(modrm)] |
- inat_group_common_attribute(grp_attr);
+ return &table[X86_MODRM_REG(modrm)];
}

-insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m,
- insn_byte_t vex_p)
+insn_flags_t inat_get_group_flags(insn_byte_t modrm, int lpfx_id,
+ insn_flags_t grp_flags)
+{
+ const insn_attr_t *attr = inat_get_group(modrm, lpfx_id, grp_flags);
+ insn_flags_t insn_flags = inat_group_common_flags(grp_flags);
+
+ if (attr)
+ insn_flags |= attr->flags;
+
+ return insn_flags;
+}
+
+const insn_attr_t *inat_get_avx(insn_byte_t opcode, insn_byte_t vex_m,
+ insn_byte_t vex_p)
{
const insn_attr_t *table;
if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX)
- return 0;
+ return NULL;
/* At first, this checks the master table */
table = inat_avx_tables[vex_m][0];
if (!table)
- return 0;
- if (!inat_is_group(table[opcode]) && vex_p) {
+ return NULL;
+ if (!inat_is_group(table[opcode].flags) && vex_p) {
/* If this is not a group, get attribute directly */
table = inat_avx_tables[vex_m][vex_p];
if (!table)
- return 0;
+ return NULL;
}
- return table[opcode];
+ return &table[opcode];
}
-
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 54fcffe..9005450 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -74,7 +74,7 @@ void insn_init(struct insn *insn, const void *kaddr, int x86_64)
void insn_get_prefixes(struct insn *insn)
{
struct insn_field *prefixes = &insn->prefixes;
- insn_attr_t attr;
+ insn_flags_t flags;
insn_byte_t b, lb;
int i, nb;

@@ -84,8 +84,8 @@ void insn_get_prefixes(struct insn *insn)
nb = 0;
lb = 0;
b = peek_next(insn_byte_t, insn);
- attr = inat_get_opcode_attribute(b);
- while (inat_is_legacy_prefix(attr)) {
+ flags = inat_get_opcode(b)->flags;
+ while (inat_is_legacy_prefix(flags)) {
/* Skip if same prefix */
for (i = 0; i < nb; i++)
if (prefixes->bytes[i] == b)
@@ -94,13 +94,13 @@ void insn_get_prefixes(struct insn *insn)
/* Invalid instruction */
break;
prefixes->bytes[nb++] = b;
- if (inat_is_address_size_prefix(attr)) {
+ if (inat_is_address_size_prefix(flags)) {
/* address size switches 2/4 or 4/8 */
if (insn->x86_64)
insn->addr_bytes ^= 12;
else
insn->addr_bytes ^= 6;
- } else if (inat_is_operand_size_prefix(attr)) {
+ } else if (inat_is_operand_size_prefix(flags)) {
/* oprand size switches 2/4 */
insn->opnd_bytes ^= 6;
}
@@ -109,7 +109,7 @@ found:
insn->next_byte++;
lb = b;
b = peek_next(insn_byte_t, insn);
- attr = inat_get_opcode_attribute(b);
+ flags = inat_get_opcode(b)->flags;
}
/* Set the last prefix */
if (lb && lb != insn->prefixes.bytes[3]) {
@@ -126,22 +126,24 @@ found:
/* Decode REX prefix */
if (insn->x86_64) {
b = peek_next(insn_byte_t, insn);
- attr = inat_get_opcode_attribute(b);
- if (inat_is_rex_prefix(attr)) {
+ flags = inat_get_opcode(b)->flags;
+ if (inat_is_rex_prefix(flags)) {
insn->rex_prefix.value = b;
insn->rex_prefix.nbytes = 1;
insn->next_byte++;
- if (X86_REX_W(b))
+ if (X86_REX_W(b)) {
/* REX.W overrides opnd_size */
insn->opnd_bytes = 8;
+ insn->mem_bytes = 8;
+ }
}
}
insn->rex_prefix.got = 1;

/* Decode VEX prefix */
b = peek_next(insn_byte_t, insn);
- attr = inat_get_opcode_attribute(b);
- if (inat_is_vex_prefix(attr)) {
+ flags = inat_get_opcode(b)->flags;
+ if (inat_is_vex_prefix(flags)) {
insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1);
if (!insn->x86_64) {
/*
@@ -154,14 +156,16 @@ found:
}
insn->vex_prefix.bytes[0] = b;
insn->vex_prefix.bytes[1] = b2;
- if (inat_is_vex3_prefix(attr)) {
+ if (inat_is_vex3_prefix(flags)) {
b2 = peek_nbyte_next(insn_byte_t, insn, 2);
insn->vex_prefix.bytes[2] = b2;
insn->vex_prefix.nbytes = 3;
insn->next_byte += 3;
- if (insn->x86_64 && X86_VEX_W(b2))
+ if (insn->x86_64 && X86_VEX_W(b2)) {
/* VEX.W overrides opnd_size */
insn->opnd_bytes = 8;
+ insn->mem_bytes = 8;
+ }
} else {
insn->vex_prefix.nbytes = 2;
insn->next_byte += 2;
@@ -181,7 +185,7 @@ err_out:
* @insn: &struct insn containing instruction
*
* Populates @insn->opcode, updates @insn->next_byte to point past the
- * opcode byte(s), and set @insn->attr (except for groups).
+ * opcode byte(s), and set @insn->attr.flags (except for groups).
* If necessary, first collects any preceding (prefix) bytes.
* Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
* is already 1.
@@ -206,25 +210,38 @@ void insn_get_opcode(struct insn *insn)
insn_byte_t m, p;
m = insn_vex_m_bits(insn);
p = insn_vex_p_bits(insn);
- insn->attr = inat_get_avx_attribute(op, m, p);
- if (!inat_accept_vex(insn->attr) && !inat_is_group(insn->attr))
- insn->attr = 0; /* This instruction is bad */
+ insn->attr.flags = inat_get_avx(op, m, p)->flags;
+ insn->mnemonic = inat_get_avx(op, m, p)->mnemonic;
+ if (!insn->mem_bytes)
+ insn->mem_bytes = inat_get_avx(op, m, p)->mem_bytes;
+ if (!inat_accept_vex(insn->attr.flags) &&
+ !inat_is_group(insn->attr.flags))
+ insn->attr.flags = 0; /* This instruction is bad */
goto end; /* VEX has only 1 byte for opcode */
}

- insn->attr = inat_get_opcode_attribute(op);
- while (inat_is_escape(insn->attr)) {
+ insn->attr.flags = inat_get_opcode(op)->flags;
+ if (!insn->mem_bytes)
+ insn->mem_bytes = inat_get_opcode(op)->mem_bytes;
+ insn->mnemonic = inat_get_opcode(op)->mnemonic;
+ while (inat_is_escape(insn->attr.flags)) {
+ insn_flags_t flags = insn->attr.flags;
/* Get escaped opcode */
op = get_next(insn_byte_t, insn);
opcode->bytes[opcode->nbytes++] = op;
pfx_id = insn_last_prefix_id(insn);
- insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
+ insn->attr.flags =
+ inat_get_escape(op, pfx_id, insn->attr.flags)->flags;
+ insn->mnemonic = inat_get_escape(op, pfx_id, flags)->mnemonic;
+ if (!insn->mem_bytes)
+ insn->mem_bytes = inat_get_escape(op, pfx_id, flags)->mem_bytes;
}
- if (inat_must_vex(insn->attr))
- insn->attr = 0; /* This instruction is bad */
+ if (inat_must_vex(insn->attr.flags))
+ insn->attr.flags = 0; /* This instruction is bad */
end:
opcode->got = 1;

+
err_out:
return;
}
@@ -246,21 +263,27 @@ void insn_get_modrm(struct insn *insn)
if (!insn->opcode.got)
insn_get_opcode(insn);

- if (inat_has_modrm(insn->attr)) {
+ if (inat_has_modrm(insn->attr.flags)) {
mod = get_next(insn_byte_t, insn);
modrm->value = mod;
modrm->nbytes = 1;
- if (inat_is_group(insn->attr)) {
+ if (inat_is_group(insn->attr.flags)) {
+ insn_flags_t flags = insn->attr.flags;
pfx_id = insn_last_prefix_id(insn);
- insn->attr = inat_get_group_attribute(mod, pfx_id,
- insn->attr);
- if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
- insn->attr = 0; /* This is bad */
+ insn->attr.flags = inat_get_group(mod, pfx_id, insn->attr.flags)->flags;
+ insn->mnemonic = inat_get_group(mod, pfx_id, flags)->mnemonic;
+ if (!insn->mem_bytes)
+ insn->mem_bytes = inat_get_group(mod, pfx_id, flags)->mem_bytes;
+ if (insn_is_avx(insn) &&
+ !inat_accept_vex(insn->attr.flags))
+ insn->attr.flags = 0; /* This is bad */
}
}

- if (insn->x86_64 && inat_is_force64(insn->attr))
+ if (insn->x86_64 && inat_is_force64(insn->attr.flags)) {
insn->opnd_bytes = 8;
+ insn->mem_bytes = 8;
+ }
modrm->got = 1;

err_out:
@@ -506,17 +529,17 @@ void insn_get_immediate(struct insn *insn)
if (!insn->displacement.got)
insn_get_displacement(insn);

- if (inat_has_moffset(insn->attr)) {
+ if (inat_has_moffset(insn->attr.flags)) {
if (!__get_moffset(insn))
goto err_out;
goto done;
}

- if (!inat_has_immediate(insn->attr))
+ if (!inat_has_immediate(insn->attr.flags))
/* no immediates */
goto done;

- switch (inat_immediate_size(insn->attr)) {
+ switch (inat_immediate_size(insn->attr.flags)) {
case INAT_IMM_BYTE:
insn->immediate.value = get_next(char, insn);
insn->immediate.nbytes = 1;
@@ -551,7 +574,7 @@ void insn_get_immediate(struct insn *insn)
/* Here, insn must have an immediate, but failed */
goto err_out;
}
- if (inat_has_second_immediate(insn->attr)) {
+ if (inat_has_second_immediate(insn->attr.flags)) {
insn->immediate2.value = get_next(char, insn);
insn->immediate2.nbytes = 1;
}
@@ -575,6 +598,8 @@ void insn_get_length(struct insn *insn)
return;
if (!insn->immediate.got)
insn_get_immediate(insn);
+ if (insn->mem_bytes == -1)
+ insn->mem_bytes = (insn->opnd_bytes < 4)?insn->opnd_bytes:4;
insn->length = (unsigned char)((unsigned long)insn->next_byte
- (unsigned long)insn->kaddr);
}
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index 093a892..aa753ae 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -41,6 +41,8 @@ BEGIN {
delete etable
delete gtable
delete atable
+ delete opcode_list
+ opcode_cnt = 1

opnd_expr = "^[A-Za-z/]"
ext_expr = "^\\("
@@ -61,6 +63,17 @@ BEGIN {
imm_flag["Ov"] = "INAT_MOFFSET"
imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"

+ mem_expr = "^[EQXY][a-z]"
+ mem_flag["Ev"] = "-1"
+ mem_flag["Eb"] = "1"
+ mem_flag["Ew"] = "2"
+ mem_flag["Ed"] = "4"
+ mem_flag["Yb"] = "1"
+ mem_flag["Xb"] = "1"
+ mem_flag["Yv"] = "-1"
+ mem_flag["Xv"] = "-1"
+ mem_flag["Qd"] = "8"
+
modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
force64_expr = "\\([df]64\\)"
rex_expr = "^REX(\\.[XRWB]+)*"
@@ -155,11 +168,22 @@ function array_size(arr, i,c) {

function print_table(tbl,name,fmt,n)
{
- print "const insn_attr_t " name " = {"
+ print "static const insn_attr_t " name " = {"
for (i = 0; i < n; i++) {
id = sprintf(fmt, i)
- if (tbl[id])
- print " [" id "] = " tbl[id] ","
+ if (!tbl[id,"mnem"] && !tbl[id,"flags"])
+ continue
+ OLD_ORS = ORS
+ ORS = ""
+ print " [" id "] = { "
+ if (tbl[id,"flags"])
+ print ".flags = " tbl[id,"flags"] ", "
+ if (tbl[id,"mnem"])
+ print ".mnemonic = " tbl[id,"mnem"] ", "
+ if (tbl[id,"mem"])
+ print ".mem_bytes = " tbl[id,"mem"] ", "
+ ORS = OLD_ORS
+ print "} ,"
}
print "};"
}
@@ -232,7 +256,7 @@ function add_flags(old,new) {
}

# convert operands to flags.
-function convert_operands(count,opnd, i,j,imm,mod)
+function convert_operands(count,opnd,i,j,imm,mod)
{
imm = null
mod = null
@@ -247,12 +271,25 @@ function convert_operands(count,opnd, i,j,imm,mod)
imm = add_flags(imm, "INAT_SCNDIMM")
} else
imm = imm_flag[i]
- } else if (match(i, modrm_expr))
+ } else if (match(i, modrm_expr)) {
mod = "INAT_MODRM"
+ } else if (match(i, mem_expr)) {
+ mem = mem_flag[i]
+ }
}
return add_flags(imm, mod)
}

+function get_mem_bytes(count,opnd,i,j,imm,mod)
+{
+ for (j = 1; j <= count; j++) {
+ i = opnd[j]
+ if (match(i, mem_expr))
+ return mem_flag[i];
+ }
+ return "0"
+}
+
/^[0-9a-f]+\:/ {
if (NR == 1)
next
@@ -272,7 +309,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
semantic_error("Redefine escape (" ref ")")
escape[ref] = geid
geid++
- table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")"
+ table[idx,"flags"] = "INAT_MAKE_ESCAPE(" escape[ref] ")"
next
}

@@ -281,15 +318,23 @@ function convert_operands(count,opnd, i,j,imm,mod)
i = 2
while (i <= NF) {
opcode = $(i++)
+ if (!(opcode in opcode_list)) {
+ opcode_list[opcode] = opcode
+ gsub(/[^A-Za-z0-9 \t]/, "_", opcode_list[opcode])
+ print "#define INSN_OPC_" opcode_list[opcode] " " opcode_cnt
+ opcode_cnt++
+ }
delete opnds
ext = null
flags = null
opnd = null
+ mem_bytes = 0
# parse one opcode
if (match($i, opnd_expr)) {
opnd = $i
count = split($(i++), opnds, ",")
flags = convert_operands(count, opnds)
+ mem_bytes = get_mem_bytes(count, opnds)
}
if (match($i, ext_expr))
ext = $(i++)
@@ -330,27 +375,41 @@ function convert_operands(count,opnd, i,j,imm,mod)
semantic_error("Unknown prefix: " opcode)
flags = add_flags(flags, "INAT_MAKE_PREFIX(" prefix_num[opcode] ")")
}
- if (length(flags) == 0)
- continue
# check if last prefix
if (match(ext, lprefix1_expr)) {
- lptable1[idx] = add_flags(lptable1[idx],flags)
- variant = "INAT_VARIANT"
+ lptable1[idx,"mnem"] = "INSN_OPC_" opcode_list[opcode]
+ lptable1[idx,"mem"] = mem_bytes
+ if (length(flags)) {
+ lptable1[idx,"flags"] = add_flags(lptable1[idx,"flags"],flags)
+ variant = "INAT_VARIANT"
+ }
}
if (match(ext, lprefix2_expr)) {
- lptable2[idx] = add_flags(lptable2[idx],flags)
- variant = "INAT_VARIANT"
+ lptable2[idx,"mnem"] = "INSN_OPC_" opcode_list[opcode]
+ lptable2[idx,"mem"] = mem_bytes
+ if (length(flags)) {
+ lptable2[idx,"flags"] = add_flags(lptable2[idx,"flags"],flags)
+ variant = "INAT_VARIANT"
+ }
}
if (match(ext, lprefix3_expr)) {
- lptable3[idx] = add_flags(lptable3[idx],flags)
- variant = "INAT_VARIANT"
+ lptable3[idx,"mnem"] = "INSN_OPC_" opcode_list[opcode]
+ lptable3[idx,"mem"] = mem_bytes
+ if (length(flags)) {
+ lptable3[idx,"flags"] = add_flags(lptable3[idx,"flags"],flags)
+ variant = "INAT_VARIANT"
+ }
}
- if (!match(ext, lprefix_expr)){
- table[idx] = add_flags(table[idx],flags)
+ if (!match(ext, lprefix_expr)) {
+ table[idx,"mnem"] = "INSN_OPC_" opcode_list[opcode]
+ table[idx,"mem"] = mem_bytes
+ if (length(flags)) {
+ table[idx,"flags"] = add_flags(table[idx,"flags"],flags)
+ }
}
}
if (variant)
- table[idx] = add_flags(table[idx],variant)
+ table[idx,"flags"] = add_flags(table[idx,"flags"],variant)
}

END {
@@ -358,7 +417,7 @@ END {
exit 1
# print escape opcode map's array
print "/* Escape opcode map array */"
- print "const insn_attr_t * const inat_escape_tables[INAT_ESC_MAX + 1]" \
+ print "static const insn_attr_t * const inat_escape_tables[INAT_ESC_MAX + 1]" \
"[INAT_LSTPFX_MAX + 1] = {"
for (i = 0; i < geid; i++)
for (j = 0; j < max_lprefix; j++)
@@ -367,7 +426,7 @@ END {
print "};\n"
# print group opcode map's array
print "/* Group opcode map array */"
- print "const insn_attr_t * const inat_group_tables[INAT_GRP_MAX + 1]"\
+ print "static const insn_attr_t * const inat_group_tables[INAT_GRP_MAX + 1]"\
"[INAT_LSTPFX_MAX + 1] = {"
for (i = 0; i < ggid; i++)
for (j = 0; j < max_lprefix; j++)
@@ -376,7 +435,7 @@ END {
print "};\n"
# print AVX opcode map's array
print "/* AVX opcode map array */"
- print "const insn_attr_t * const inat_avx_tables[X86_VEX_M_MAX + 1]"\
+ print "static const insn_attr_t * const inat_avx_tables[X86_VEX_M_MAX + 1]"\
"[INAT_LSTPFX_MAX + 1] = {"
for (i = 0; i < gaid; i++)
for (j = 0; j < max_lprefix; j++)
diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c
index 872eb60..377d273 100644
--- a/arch/x86/tools/insn_sanity.c
+++ b/arch/x86/tools/insn_sanity.c
@@ -89,10 +89,10 @@ static void dump_insn(FILE *fp, struct insn *insn)
dump_field(fp, "displacement", "\t", &insn->displacement);
dump_field(fp, "immediate1", "\t", &insn->immediate1);
dump_field(fp, "immediate2", "\t", &insn->immediate2);
- fprintf(fp, "\t.attr = %x, .opnd_bytes = %d, .addr_bytes = %d,\n",
- insn->attr, insn->opnd_bytes, insn->addr_bytes);
- fprintf(fp, "\t.length = %d, .x86_64 = %d, .kaddr = %p}\n",
- insn->length, insn->x86_64, insn->kaddr);
+ fprintf(fp, "\t.attr.flags = %x, .opnd_bytes = %d, .addr_bytes = %d, .mem_bytes = %d,\n",
+ insn->attr.flags, insn->opnd_bytes, insn->addr_bytes, insn->mem_bytes);
+ fprintf(fp, "\t.length = %d, t.mnemonic = %d, .x86_64 = %d, .kaddr = %p}\n",
+ insn->length, insn->mnemonic, insn->x86_64, insn->kaddr);
}

static void dump_stream(FILE *fp, const char *msg, unsigned long nr_iter,
--
1.7.10.4

Subject: Re: [PATCH 2/4] x86: Move instruction decoder data into header

(2014/04/15 2:44), Sasha Levin wrote:
> Right now we generate data for the instruction decoder and place it
> as a code file which gets #included directly (yuck).
>
> Instead, make it a header which will also be usable by other code
> that wants to use the data in there.

Hmm, making the generated data into a header file may clone
the data table instances for each object file. Since the inat
table is not so small, I think we'd better just export the tables.

Thank you,

>
> Signed-off-by: Sasha Levin <[email protected]>
> ---
> arch/x86/Makefile | 6 ++++++
> arch/x86/lib/Makefile | 8 +++++---
> arch/x86/lib/inat.c | 2 +-
> arch/x86/tools/Makefile | 8 ++++----
> 4 files changed, 16 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/Makefile b/arch/x86/Makefile
> index 602f57e..26eee4e 100644
> --- a/arch/x86/Makefile
> +++ b/arch/x86/Makefile
> @@ -178,6 +178,12 @@ archscripts: scripts_basic
> $(Q)$(MAKE) $(build)=arch/x86/tools relocs
>
> ###
> +# inat instruction table generation
> +
> +archprepare:
> + $(Q)$(MAKE) $(build)=arch/x86/lib inat_tables
> +
> +###
> # Syscall table generation
>
> archheaders:
> diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
> index eabcb6e..3014da8 100644
> --- a/arch/x86/lib/Makefile
> +++ b/arch/x86/lib/Makefile
> @@ -7,12 +7,12 @@ inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
> quiet_cmd_inat_tables = GEN $@
> cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@
>
> -$(obj)/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
> +$(obj)/../include/generated/asm/inat-tables.h: $(inat_tables_script) $(inat_tables_maps)
> $(call cmd,inat_tables)
>
> -$(obj)/inat.o: $(obj)/inat-tables.c
> +$(obj)/inat.o: $(obj)/../include/generated/asm/inat-tables.h
>
> -clean-files := inat-tables.c
> +clean-files := ../include/asm/inat-tables.h
>
> obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
>
> @@ -44,3 +44,5 @@ else
> lib-y += copy_user_64.o copy_user_nocache_64.o
> lib-y += cmpxchg16b_emu.o
> endif
> +
> +inat_tables: $(obj)/../include/generated/asm/inat-tables.h
> diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c
> index c1f01a8..641a996 100644
> --- a/arch/x86/lib/inat.c
> +++ b/arch/x86/lib/inat.c
> @@ -21,7 +21,7 @@
> #include <asm/insn.h>
>
> /* Attribute tables are generated from opcode map */
> -#include "inat-tables.c"
> +#include <asm/inat-tables.h>
>
> /* Attribute search APIs */
> insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode)
> diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
> index e812034..e34e7e3 100644
> --- a/arch/x86/tools/Makefile
> +++ b/arch/x86/tools/Makefile
> @@ -28,14 +28,14 @@ posttest: $(obj)/test_get_len vmlinux $(obj)/insn_sanity
> hostprogs-y += test_get_len insn_sanity
>
> # -I needed for generated C source and C source which in the kernel tree.
> -HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/
> +HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/ -I$(srctree)/arch/x86/include/generated/
>
> -HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/
> +HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/ -I$(srctree)/arch/x86/include/generated/
>
> # Dependencies are also needed.
> -$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
> +$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/include/generated/asm/inat-tables.h
>
> -$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
> +$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/include/generated/asm/inat-tables.h
>
> HOST_EXTRACFLAGS += -I$(srctree)/tools/include
> hostprogs-y += relocs
>


--
Masami HIRAMATSU
Software Platform Research Dept. Linux Technology Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]

2014-04-15 02:29:08

by Sasha Levin

[permalink] [raw]
Subject: Re: [PATCH 2/4] x86: Move instruction decoder data into header

On 04/14/2014 09:41 PM, Masami Hiramatsu wrote:
> (2014/04/15 2:44), Sasha Levin wrote:
>> > Right now we generate data for the instruction decoder and place it
>> > as a code file which gets #included directly (yuck).
>> >
>> > Instead, make it a header which will also be usable by other code
>> > that wants to use the data in there.
> Hmm, making the generated data into a header file may clone
> the data table instances for each object file. Since the inat
> table is not so small, I think we'd better just export the tables.

The tables are defined as static, so the compiler drops them
once it detects they are not used.

I feel it would be easier to let the compiler do it's job rather
than do optimizations we don't need to do and which will complicate
the code quite a bit.


Thanks,
Sasha

Subject: Re: Re: [PATCH 2/4] x86: Move instruction decoder data into header

(2014/04/15 11:28), Sasha Levin wrote:
> On 04/14/2014 09:41 PM, Masami Hiramatsu wrote:
>> (2014/04/15 2:44), Sasha Levin wrote:
>>>> Right now we generate data for the instruction decoder and place it
>>>> as a code file which gets #included directly (yuck).
>>>>
>>>> Instead, make it a header which will also be usable by other code
>>>> that wants to use the data in there.
>> Hmm, making the generated data into a header file may clone
>> the data table instances for each object file. Since the inat
>> table is not so small, I think we'd better just export the tables.
>
> The tables are defined as static, so the compiler drops them
> once it detects they are not used.

No, I meant that if the table is used in the different object files,
will the copies of the tables be compiled in several different
instances?

And I can't see the part which makes the tables static in this patch...

> I feel it would be easier to let the compiler do it's job rather
> than do optimizations we don't need to do and which will complicate
> the code quite a bit.

I haven't tend to optimize it, but just encapsulate it, to hide from other parts.

Thank you,


--
Masami HIRAMATSU
Software Platform Research Dept. Linux Technology Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]

Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

(2014/04/15 2:44), Sasha Levin wrote:
> arch/x86/lib/x86-opcode-map.txt provides us quite a lot of information about
> instructions. So far we've discarded information we didn't need to use
> elsewhere.
>
> This patch extracts two more bits of information about instructions:

These information looks obscure to me. What information (documents)
does it based on? Could you give me how would you get it?

> - Mnemonic. We'd like to refer to instructions by their mnemonic, and not
> by their opcode. This both makes code readable, and less confusing and
> prone to typos since a single mnemonic may have quite a few different
> opcodes representing it.

I don't like to call it as "mnemonic", it is just "operation".

> - Memory access size. We're currently decoding the size (in bytes) of an
> address size, and operand size. kmemcheck would like to know in addition
> how many bytes were read/written from/to an address by a given instruction,
> so we also keep the size of the memory access.

And also, at least in this time, since the operation/mem_size are
only used in kmemcheck, you should generate another table for that in kmemcheck
from x86-opcode-map.txt.

Hm, could you check out my private project repository of in-kernel disasm?
https://github.com/mhiramat/linux/tree/inkernel-disasm-20130414

it's a bit outdated, but I think you can do similar thing. :)


> /* Attribute checking functions */
> -static inline int inat_is_legacy_prefix(insn_attr_t attr)
> +static inline int inat_is_legacy_prefix(insn_flags_t flags)
> {
> - attr &= INAT_PFX_MASK;
> - return attr && attr <= INAT_LGCPFX_MAX;
> + flags &= INAT_PFX_MASK;
> + return flags && flags <= INAT_LGCPFX_MAX;
> }

Since "inat" stands for "instruction-attribute", it should have insn_attr_t.
And,

> @@ -141,15 +141,15 @@ void __kprobes synthesize_relcall(void *from, void *to)
> */
> static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
> {
> - insn_attr_t attr;
> + insn_flags_t flags;
>
> - attr = inat_get_opcode_attribute((insn_byte_t)*insn);
> - while (inat_is_legacy_prefix(attr)) {
> + flags = inat_get_opcode((insn_byte_t)*insn)->flags;

Do not refer a member from the return value directly. If it returns NULL,
the kernel just crashes!

> @@ -61,6 +63,17 @@ BEGIN {
> imm_flag["Ov"] = "INAT_MOFFSET"
> imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
>
> + mem_expr = "^[EQXY][a-z]"
> + mem_flag["Ev"] = "-1"
> + mem_flag["Eb"] = "1"
> + mem_flag["Ew"] = "2"
> + mem_flag["Ed"] = "4"
> + mem_flag["Yb"] = "1"
> + mem_flag["Xb"] = "1"
> + mem_flag["Yv"] = "-1"
> + mem_flag["Xv"] = "-1"
> + mem_flag["Qd"] = "8"
> +

mem_flag? mem_size?

> @@ -232,7 +256,7 @@ function add_flags(old,new) {
> }
>
> # convert operands to flags.
> -function convert_operands(count,opnd, i,j,imm,mod)
> +function convert_operands(count,opnd,i,j,imm,mod)

Don't remove this space, that separates input arguments and local variables.


> @@ -281,15 +318,23 @@ function convert_operands(count,opnd, i,j,imm,mod)
> i = 2
> while (i <= NF) {
> opcode = $(i++)
> + if (!(opcode in opcode_list)) {
> + opcode_list[opcode] = opcode
> + gsub(/[^A-Za-z0-9 \t]/, "_", opcode_list[opcode])
> + print "#define INSN_OPC_" opcode_list[opcode] " " opcode_cnt
> + opcode_cnt++
> + }

No, don't do that. Defining some immediate macros in auto-generated header makes
code maintenance hard.


BTW, could you make a cover mail for the series?

Thank you,

--
Masami HIRAMATSU
Software Platform Research Dept. Linux Technology Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]

Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

(2014/04/15 12:12), Masami Hiramatsu wrote:
> (2014/04/15 2:44), Sasha Levin wrote:
>> arch/x86/lib/x86-opcode-map.txt provides us quite a lot of information about
>> instructions. So far we've discarded information we didn't need to use
>> elsewhere.
>>
>> This patch extracts two more bits of information about instructions:
>
> These information looks obscure to me. What information (documents)
> does it based on? Could you give me how would you get it?
>
>> - Mnemonic. We'd like to refer to instructions by their mnemonic, and not
>> by their opcode. This both makes code readable, and less confusing and
>> prone to typos since a single mnemonic may have quite a few different
>> opcodes representing it.
>
> I don't like to call it as "mnemonic", it is just "operation".

Ah, I see what you are doing now. Hmm, you'd like to generate a mnemonic-ID
and its macros for each opcode, wouldn't you?
Even though, it seems waste of memory that we have both opcode and mnemonic-ID,
can you integrate that? for example, you can use insn->opcode.value instead of
checking each opcode bytes.

Thank you,

--
Masami HIRAMATSU
Software Platform Research Dept. Linux Technology Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]

2014-04-15 08:17:26

by Pekka Enberg

[permalink] [raw]
Subject: Re: [PATCH 4/4] kmemcheck: Switch to using kernel disassembler

On 04/14/2014 08:44 PM, Sasha Levin wrote:
> kmemcheck used to do it's own basic instruction decoding, which is
> just a duplication of the work done in arch/x86/lib/insn.c.
>
> Instead, switch it to using the already existing dissasembler, and
> switch the magic opcode numbers into something meaningful.
>
> Signed-off-by: Sasha Levin <[email protected]>

Vegard probably should take a closer look at this but:

Acked-by: Pekka Enberg <[email protected]>

2014-04-15 14:25:07

by Sasha Levin

[permalink] [raw]
Subject: Re: [PATCH 2/4] x86: Move instruction decoder data into header

On 04/14/2014 11:10 PM, Masami Hiramatsu wrote:
> (2014/04/15 11:28), Sasha Levin wrote:
>> On 04/14/2014 09:41 PM, Masami Hiramatsu wrote:
>>> (2014/04/15 2:44), Sasha Levin wrote:
>>>>> Right now we generate data for the instruction decoder and place it
>>>>> as a code file which gets #included directly (yuck).
>>>>>
>>>>> Instead, make it a header which will also be usable by other code
>>>>> that wants to use the data in there.
>>> Hmm, making the generated data into a header file may clone
>>> the data table instances for each object file. Since the inat
>>> table is not so small, I think we'd better just export the tables.
>>
>> The tables are defined as static, so the compiler drops them
>> once it detects they are not used.
>
> No, I meant that if the table is used in the different object files,
> will the copies of the tables be compiled in several different
> instances?

That's true, but there was and after this patchset there will still
be only one user that touches the table. I also doubt that more users
will appear since users of the table should be going through the API
and not touching it directly, so I don't think it should be a concern
at this point.

> And I can't see the part which makes the tables static in this patch...

Right, it sneaked to the next patch in this patchset. I'll pull it
into this one in the next version.

>> I feel it would be easier to let the compiler do it's job rather
>> than do optimizations we don't need to do and which will complicate
>> the code quite a bit.
>
> I haven't tend to optimize it, but just encapsulate it, to hide from other parts.

We could hide it under #ifdef, but that wouldn't change anything for
the user or for the generated code itself.

Splitting code generation into two different files would complicate
everything IMO.


Thanks,
Sasha

2014-04-15 15:11:13

by Sasha Levin

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

Thanks for the review!

Some comments below.

On 04/14/2014 11:12 PM, Masami Hiramatsu wrote:
> (2014/04/15 2:44), Sasha Levin wrote:
>> arch/x86/lib/x86-opcode-map.txt provides us quite a lot of information about
>> instructions. So far we've discarded information we didn't need to use
>> elsewhere.
>>
>> This patch extracts two more bits of information about instructions:
>
> These information looks obscure to me. What information (documents)
> does it based on? Could you give me how would you get it?

It's the two bits described below. Both are extracted from
arch/x86/lib/x86-opcode-map.txt based on the Intel instruction
manual.

>> - Mnemonic. We'd like to refer to instructions by their mnemonic, and not
>> by their opcode. This both makes code readable, and less confusing and
>> prone to typos since a single mnemonic may have quite a few different
>> opcodes representing it.
>
> I don't like to call it as "mnemonic", it is just "operation".

> Ah, I see what you are doing now. Hmm, you'd like to generate a mnemonic-ID
> and its macros for each opcode, wouldn't you?
> Even though, it seems waste of memory that we have both opcode and mnemonic-ID,
> can you integrate that? for example, you can use insn->opcode.value instead of
> checking each opcode bytes.

Mnemonics don't have 1:1 relationship with opcodes. So, for example,
if kmemcheck needs to check (and it does) whether a given instruction
is an "ADD", it would need to compare it to 9 different opcodes.

The translation table/code to do that would be bigger than just adding
a mnemonic field in the instruction.

>> - Memory access size. We're currently decoding the size (in bytes) of an
>> address size, and operand size. kmemcheck would like to know in addition
>> how many bytes were read/written from/to an address by a given instruction,
>> so we also keep the size of the memory access.
>
> And also, at least in this time, since the operation/mem_size are
> only used in kmemcheck, you should generate another table for that in kmemcheck
> from x86-opcode-map.txt.

I don't want to "teach" kmemcheck to parse x86-opcode-map.txt, that
should be something that the instruction API does.

kmemcheck would also be the 3rd in-kernel user of that API, so it's
not fair to push it as an exception :)

It's also just one more byte in 'struct insn'...

> Hm, could you check out my private project repository of in-kernel disasm?
> https://github.com/mhiramat/linux/tree/inkernel-disasm-20130414
>
> it's a bit outdated, but I think you can do similar thing. :)

Checked it out, it seems to be close to what we need. Some of the
differences between that and this patchset are due to the use of
the data.

For example, you save mnemonics as strings because you need to print
them nicely for the user, while I save them as an ordinal number
because I need to match mnemonics to instructions.

>> /* Attribute checking functions */
>> -static inline int inat_is_legacy_prefix(insn_attr_t attr)
>> +static inline int inat_is_legacy_prefix(insn_flags_t flags)
>> {
>> - attr &= INAT_PFX_MASK;
>> - return attr && attr <= INAT_LGCPFX_MAX;
>> + flags &= INAT_PFX_MASK;
>> + return flags && flags <= INAT_LGCPFX_MAX;
>> }
>
> Since "inat" stands for "instruction-attribute", it should have insn_attr_t.
> And,
>
>> @@ -141,15 +141,15 @@ void __kprobes synthesize_relcall(void *from, void *to)
>> */
>> static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
>> {
>> - insn_attr_t attr;
>> + insn_flags_t flags;
>>
>> - attr = inat_get_opcode_attribute((insn_byte_t)*insn);
>> - while (inat_is_legacy_prefix(attr)) {
>> + flags = inat_get_opcode((insn_byte_t)*insn)->flags;
>
> Do not refer a member from the return value directly. If it returns NULL,
> the kernel just crashes!

Right, I'll fix that. Probably by adding a dummy "empty" instruction
just so we won't have to deal with too many NULL checks.

>> @@ -61,6 +63,17 @@ BEGIN {
>> imm_flag["Ov"] = "INAT_MOFFSET"
>> imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
>>
>> + mem_expr = "^[EQXY][a-z]"
>> + mem_flag["Ev"] = "-1"
>> + mem_flag["Eb"] = "1"
>> + mem_flag["Ew"] = "2"
>> + mem_flag["Ed"] = "4"
>> + mem_flag["Yb"] = "1"
>> + mem_flag["Xb"] = "1"
>> + mem_flag["Yv"] = "-1"
>> + mem_flag["Xv"] = "-1"
>> + mem_flag["Qd"] = "8"
>> +
>
> mem_flag? mem_size?

Yup, that makes more sense. I'll change that.

>> @@ -232,7 +256,7 @@ function add_flags(old,new) {
>> }
>>
>> # convert operands to flags.
>> -function convert_operands(count,opnd, i,j,imm,mod)
>> +function convert_operands(count,opnd,i,j,imm,mod)
>
> Don't remove this space, that separates input arguments and local variables.

Will fix.

>> @@ -281,15 +318,23 @@ function convert_operands(count,opnd, i,j,imm,mod)
>> i = 2
>> while (i <= NF) {
>> opcode = $(i++)
>> + if (!(opcode in opcode_list)) {
>> + opcode_list[opcode] = opcode
>> + gsub(/[^A-Za-z0-9 \t]/, "_", opcode_list[opcode])
>> + print "#define INSN_OPC_" opcode_list[opcode] " " opcode_cnt
>> + opcode_cnt++
>> + }
>
> No, don't do that. Defining some immediate macros in auto-generated header makes
> code maintenance hard.

Do you have a better suggestion to do the above? The definitions
are pretty simple and straightforward ("INSN_OPC_[mnemonic]") so
I don't feel they will be difficult to maintain. I'm not generating
complex macros here.

> BTW, could you make a cover mail for the series?

Sure.


Thanks,
Sasha

Subject: Re: [PATCH 2/4] x86: Move instruction decoder data into header

(2014/04/15 23:24), Sasha Levin wrote:
> On 04/14/2014 11:10 PM, Masami Hiramatsu wrote:
>> (2014/04/15 11:28), Sasha Levin wrote:
>>> On 04/14/2014 09:41 PM, Masami Hiramatsu wrote:
>>>> (2014/04/15 2:44), Sasha Levin wrote:
>>>>>> Right now we generate data for the instruction decoder and place it
>>>>>> as a code file which gets #included directly (yuck).
>>>>>>
>>>>>> Instead, make it a header which will also be usable by other code
>>>>>> that wants to use the data in there.
>>>> Hmm, making the generated data into a header file may clone
>>>> the data table instances for each object file. Since the inat
>>>> table is not so small, I think we'd better just export the tables.
>>>
>>> The tables are defined as static, so the compiler drops them
>>> once it detects they are not used.
>>
>> No, I meant that if the table is used in the different object files,
>> will the copies of the tables be compiled in several different
>> instances?
>
> That's true, but there was and after this patchset there will still
> be only one user that touches the table. I also doubt that more users
> will appear since users of the table should be going through the API
> and not touching it directly, so I don't think it should be a concern
> at this point.

Agreed, so I don't like to expose it. If no one need them, then give
it to the owner. That must be the safest.

>> And I can't see the part which makes the tables static in this patch...
>
> Right, it sneaked to the next patch in this patchset. I'll pull it
> into this one in the next version.
>
>>> I feel it would be easier to let the compiler do it's job rather
>>> than do optimizations we don't need to do and which will complicate
>>> the code quite a bit.
>>
>> I haven't tend to optimize it, but just encapsulate it, to hide from other parts.
>
> We could hide it under #ifdef, but that wouldn't change anything for
> the user or for the generated code itself.
>
> Splitting code generation into two different files would complicate
> everything IMO.

No, mixing data (definitions) and declarations on one header should be
avoided, especially that is generated in the build path. I'd like to ask
you to split them into header and body. I know it will be harder to
implement, but worth to maintain.

Thank you,

--
Masami HIRAMATSU
Software Platform Research Dept. Linux Technology Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]

2014-04-16 03:27:09

by H. Peter Anvin

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

On 04/15/2014 08:10 AM, Sasha Levin wrote:
>
> Mnemonics don't have 1:1 relationship with opcodes. So, for example,
> if kmemcheck needs to check (and it does) whether a given instruction
> is an "ADD", it would need to compare it to 9 different opcodes.
>

Excuse me, but on what planet does, for example, it makes sense if a
particular instruction is a "MOV", for example? The trend in x86
opcodes have varied over the years and at some points it seems to have
been trendy to have very general mnemonics (consider MOV CR, MOV DR) and
at some points quite the opposite (hence MOVD, MOVQ, MOVDQA, MOVDQU,
MOVAPS, MOVUPS, MOVAPD, MOVUPD, VMOVxxx).

So it is not at all clear that this makes any kind of sense whatsoever,
and is more likely just going to be abused.

-hpa

2014-04-16 03:48:07

by Sasha Levin

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

On 04/15/2014 11:26 PM, H. Peter Anvin wrote:
> On 04/15/2014 08:10 AM, Sasha Levin wrote:
>>
>> Mnemonics don't have 1:1 relationship with opcodes. So, for example,
>> if kmemcheck needs to check (and it does) whether a given instruction
>> is an "ADD", it would need to compare it to 9 different opcodes.
>>
>
> Excuse me, but on what planet does, for example, it makes sense if a
> particular instruction is a "MOV", for example? The trend in x86
> opcodes have varied over the years and at some points it seems to have
> been trendy to have very general mnemonics (consider MOV CR, MOV DR) and
> at some points quite the opposite (hence MOVD, MOVQ, MOVDQA, MOVDQU,
> MOVAPS, MOVUPS, MOVAPD, MOVUPD, VMOVxxx).
>
> So it is not at all clear that this makes any kind of sense whatsoever,
> and is more likely just going to be abused.

Looking at kmemcheck, and "AND" vs "MOV" for example, we need to know if a
given instruction is AND because AND may operate on only part of the memory
it's accessing to. So some accesses to what kmemcheck sees as "uninitialized
memory" are actually valid ones because we don't touch the uninitialized
part.

So for kmemcheck, AND and MOV (for example) are different because ANDing
a value and MOVing a value mean different things wrt to uninitialized memory.

Yes, if kmemcheck for some reason needs to figure out if an instruction
is a MOV variant we'll need to list quite a few mnemonics, but that list
will be much shorter and more readable than a corresponding list of opcodes.


Thanks,
Sasha

2014-04-16 03:54:34

by H. Peter Anvin

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

On 04/15/2014 08:47 PM, Sasha Levin wrote:
>
> Yes, if kmemcheck for some reason needs to figure out if an instruction
> is a MOV variant we'll need to list quite a few mnemonics, but that list
> will be much shorter and more readable than a corresponding list of opcodes.
>

You're completely missing my point. If you are looking at MOV, with
80%+ probability you're doing something very, very wrong, because you
will be including instructions that do something completely different
from what you thought.

This is true for a lot of the x86 instructions.

-hpa

2014-04-16 04:04:09

by Sasha Levin

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

On 04/15/2014 11:54 PM, H. Peter Anvin wrote:
> On 04/15/2014 08:47 PM, Sasha Levin wrote:
>> >
>> > Yes, if kmemcheck for some reason needs to figure out if an instruction
>> > is a MOV variant we'll need to list quite a few mnemonics, but that list
>> > will be much shorter and more readable than a corresponding list of opcodes.
>> >
> You're completely missing my point. If you are looking at MOV, with
> 80%+ probability you're doing something very, very wrong, because you
> will be including instructions that do something completely different
> from what you thought.
>
> This is true for a lot of the x86 instructions.

Right, but assuming that the AND example I presented earlier makes sense, I
can't create mnemonic entries only for instructions where doing so would
"probably" be right.

If there are use cases where working with mnemonics is correct, we should
be doing that in kmemcheck. If the way kmemcheck deals with mnemonics is
incorrect we should go ahead and fix kmemcheck.


Thanks,
Sasha

2014-04-16 04:32:27

by H. Peter Anvin

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

I really wonder if it makes sense...

On April 15, 2014 9:03:48 PM PDT, Sasha Levin <[email protected]> wrote:
>On 04/15/2014 11:54 PM, H. Peter Anvin wrote:
>> On 04/15/2014 08:47 PM, Sasha Levin wrote:
>>> >
>>> > Yes, if kmemcheck for some reason needs to figure out if an
>instruction
>>> > is a MOV variant we'll need to list quite a few mnemonics, but
>that list
>>> > will be much shorter and more readable than a corresponding list
>of opcodes.
>>> >
>> You're completely missing my point. If you are looking at MOV, with
>> 80%+ probability you're doing something very, very wrong, because you
>> will be including instructions that do something completely different
>> from what you thought.
>>
>> This is true for a lot of the x86 instructions.
>
>Right, but assuming that the AND example I presented earlier makes
>sense, I
>can't create mnemonic entries only for instructions where doing so
>would
>"probably" be right.
>
>If there are use cases where working with mnemonics is correct, we
>should
>be doing that in kmemcheck. If the way kmemcheck deals with mnemonics
>is
>incorrect we should go ahead and fix kmemcheck.
>
>
>Thanks,
>Sasha

--
Sent from my mobile phone. Please pardon brevity and lack of formatting.

Subject: Re: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

(2014/04/16 13:03), Sasha Levin wrote:
> On 04/15/2014 11:54 PM, H. Peter Anvin wrote:
>> On 04/15/2014 08:47 PM, Sasha Levin wrote:
>>>>
>>>> Yes, if kmemcheck for some reason needs to figure out if an instruction
>>>> is a MOV variant we'll need to list quite a few mnemonics, but that list
>>>> will be much shorter and more readable than a corresponding list of opcodes.
>>>>
>> You're completely missing my point. If you are looking at MOV, with
>> 80%+ probability you're doing something very, very wrong, because you
>> will be including instructions that do something completely different
>> from what you thought.
>>
>> This is true for a lot of the x86 instructions.
>
> Right, but assuming that the AND example I presented earlier makes sense, I
> can't create mnemonic entries only for instructions where doing so would
> "probably" be right.
>
> If there are use cases where working with mnemonics is correct, we should
> be doing that in kmemcheck. If the way kmemcheck deals with mnemonics is
> incorrect we should go ahead and fix kmemcheck.

In that case, as I said, the mnemonics classifier should be build in
kmemcheck at this point, since we cannot provide any general mnemonic
classifier for that purpose. If it becomes enough generic, and accurate,
it would be better consolidate both, I think.

Thank you,

--
Masami HIRAMATSU
Software Platform Research Dept. Linux Technology Research Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]

Subject: Re: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

(2014/04/16 0:10), Sasha Levin wrote:
>>> - Memory access size. We're currently decoding the size (in bytes) of an
>>> address size, and operand size. kmemcheck would like to know in addition
>>> how many bytes were read/written from/to an address by a given instruction,
>>> so we also keep the size of the memory access.
>>
>> And also, at least in this time, since the operation/mem_size are
>> only used in kmemcheck, you should generate another table for that in kmemcheck
>> from x86-opcode-map.txt.
>
> I don't want to "teach" kmemcheck to parse x86-opcode-map.txt, that
> should be something that the instruction API does.
>
> kmemcheck would also be the 3rd in-kernel user of that API, so it's
> not fair to push it as an exception :)

OK, I think we can push the size information bits into current insn_attr_t.
I don't think we should have another byte for that.

For example, here I pulled the operand size detector from my disasm code,

----
static int get_operand_size(struct insn *insn, const char *opnd)
{
int size = insn->opnd_bytes;

switch (opnd[1]) {
case 'b':
case 'B':
size = 1;
break;
case 'w':
size = 2;
break;
case 'd':
if (opnd[2] == 'q')
size = 16;
else
size = 4;
break;
case 'p':
if (opnd[2] == 's' || opnd[2] == 'd')
size = insn_vex_l_bit(insn) ? 32 : 16;
break;
case 'q':
if (opnd[2] == 'q')
size = 32;
else
size = 8;
break;
case 's':
if (opnd[2] == 's' || opnd[2] == 'd')
size = 16;
break;
case 'x':
size = insn_vex_l_bit(insn) ? 32 : 16;
break;
case 'z':
if (size == 8)
size = 4;
break;
}
return size;
}
----

Same thing can be done in awk part and insn.c, and we can encode it by

#define INAT_MAKE_MEMSZ(size) (size << INAT_MEMSZ_OFFS)

And decode it by

insn->memsz_bytes = 1 << ((attr & INAT_MEMSZ_MASK) >> INAT_MEMSZ_OFFS)

Thus, we only need 3 bits to represent 1, 2, 4, 8, 16 and 32. :)

> It's also just one more byte in 'struct insn'...

I actually don't like to expand struct insn_attr_t, I'd like to keep it in
an immediate value.

[...]
>>> @@ -141,15 +141,15 @@ void __kprobes synthesize_relcall(void *from, void *to)
>>> */
>>> static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
>>> {
>>> - insn_attr_t attr;
>>> + insn_flags_t flags;
>>>
>>> - attr = inat_get_opcode_attribute((insn_byte_t)*insn);
>>> - while (inat_is_legacy_prefix(attr)) {
>>> + flags = inat_get_opcode((insn_byte_t)*insn)->flags;
>>
>> Do not refer a member from the return value directly. If it returns NULL,
>> the kernel just crashes!
>
> Right, I'll fix that. Probably by adding a dummy "empty" instruction
> just so we won't have to deal with too many NULL checks.

Note that if we can put them all in one value, we can avoid such ugly NULL checks.


Thank you,

--
Masami HIRAMATSU
Software Platform Research Dept. Linux Technology Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]

2014-04-17 15:20:51

by Sasha Levin

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

On 04/16/2014 01:30 AM, Masami Hiramatsu wrote:
> (2014/04/16 13:03), Sasha Levin wrote:
>> > On 04/15/2014 11:54 PM, H. Peter Anvin wrote:
>>> >> On 04/15/2014 08:47 PM, Sasha Levin wrote:
>>>>> >>>>
>>>>> >>>> Yes, if kmemcheck for some reason needs to figure out if an instruction
>>>>> >>>> is a MOV variant we'll need to list quite a few mnemonics, but that list
>>>>> >>>> will be much shorter and more readable than a corresponding list of opcodes.
>>>>> >>>>
>>> >> You're completely missing my point. If you are looking at MOV, with
>>> >> 80%+ probability you're doing something very, very wrong, because you
>>> >> will be including instructions that do something completely different
>>> >> from what you thought.
>>> >>
>>> >> This is true for a lot of the x86 instructions.
>> >
>> > Right, but assuming that the AND example I presented earlier makes sense, I
>> > can't create mnemonic entries only for instructions where doing so would
>> > "probably" be right.
>> >
>> > If there are use cases where working with mnemonics is correct, we should
>> > be doing that in kmemcheck. If the way kmemcheck deals with mnemonics is
>> > incorrect we should go ahead and fix kmemcheck.
> In that case, as I said, the mnemonics classifier should be build in
> kmemcheck at this point, since we cannot provide any general mnemonic
> classifier for that purpose. If it becomes enough generic, and accurate,
> it would be better consolidate both, I think.

kmemcheck isn't an instruction decoder, it should not be parsing x86-opcode-map.txt,
that's why there's an API to access that in inat.c.

Basically you're saying that you don't want to extend the API to extract the extra
mnemonic field which already exists in the data we're extracting from anyways, and
that kmemcheck should go around the API and do it itself.

It's not like the instruction decoder is a generic piece of code right now anyways,
it only serves mostly [k,u]probes and was built around the their requirements, and
now everybody are surprised that kmemcheck has different requirements than kprobes.


Thanks,
Sasha

2014-04-17 15:29:19

by H. Peter Anvin

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

On 04/17/2014 08:20 AM, Sasha Levin wrote:
>
> It's not like the instruction decoder is a generic piece of code right now anyways,
> it only serves mostly [k,u]probes and was built around the their requirements, and
> now everybody are surprised that kmemcheck has different requirements than kprobes.
>

What *ARE* kmemcheck's requirements? That's the real issue, I believe.

I also have seen several attempts at using the generic instruction
decoder which has resulted in more complexity, not less, because of
excess generality, so it is not an obvious thing.

-hpa

2014-04-17 15:33:40

by Sasha Levin

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

On 04/16/2014 01:44 AM, Masami Hiramatsu wrote:
> Same thing can be done in awk part and insn.c, and we can encode it by
>
> #define INAT_MAKE_MEMSZ(size) (size << INAT_MEMSZ_OFFS)
>
> And decode it by
>
> insn->memsz_bytes = 1 << ((attr & INAT_MEMSZ_MASK) >> INAT_MEMSZ_OFFS)
>
> Thus, we only need 3 bits to represent 1, 2, 4, 8, 16 and 32. :)

We'll need 4 so that we could do 64 too :)

btw, why aren't we using regular bitfields? this manual encoding
thingie seems to be a bit confusing (try figuring out how many
bits are left...).


Thanks,
Sasha

2014-04-17 17:32:00

by Sasha Levin

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

On 04/17/2014 11:28 AM, H. Peter Anvin wrote:
> On 04/17/2014 08:20 AM, Sasha Levin wrote:
>>
>> It's not like the instruction decoder is a generic piece of code right now anyways,
>> it only serves mostly [k,u]probes and was built around the their requirements, and
>> now everybody are surprised that kmemcheck has different requirements than kprobes.
>>
>
> What *ARE* kmemcheck's requirements? That's the real issue, I believe.

As far as kmemcheck is concerned, it needs to decode instructions so that
it could learn whether an instruction will access memory, and if it does -
the size of memory an instruction attempts to access.

Note that right now kmemcheck implements a dumb instruction decoder (see
arch/x86/mm/kmemcheck/opcode.c) which works for most cases, but breaks on
more than few. The first patch in this series adds a few cases where that
decoder will fail.

This is the main purpose of the patch series. We want to switch kmemcheck
to use the generic instruction decoder to avoid bugs found in the dumbed
down instruction decoder used by kmemcheck.

On top of that, there are several features which need to know more about
instructions:

1. kmemcheck wants to know if a given instruction may act on only part of
the memory it addresses, or all of it. This solves the problem of partially
initialized bitfields where uninitialized bits are never accessed.

For the above, kmemcheck needs to recognize whether an instruction is AND,
OR or XOR.

2. There are two sets of instructions, MOVS and CMPS which work a bit
differently than the rest. They take two memory addresses but will
generate only one page fault on access, which will make kmemcheck miss
a memory access.

kmemcheck needs to know if an instruction is a MOVS or CMPS to deal with
that corner case properly.

> I also have seen several attempts at using the generic instruction
> decoder which has resulted in more complexity, not less, because of
> excess generality, so it is not an obvious thing.

Let's split this patchset into two:

We have one part which moves kmemcheck to the generic instruction decoder
and adds memory access size to the instruction decoder. There seems to be
no objection to that part beyond technical issues regarding how we store
the new size value.

The other part is adding mnemonics to the instruction decoder. If my
explanation above makes sense, and kmemcheck does need to know about AND,
OR, XOR, MOVS and CMPS then let me know how to proceed about changing
the instruction decoder to add that functionality.

If I'm wrong about needing to know about those mnemonics, I'd be happy
to fix that issue in kmemcheck, but for that I need to know why I'm
wrong.


Thanks,
Sasha

Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

(2014/04/18 0:33), Sasha Levin wrote:
> On 04/16/2014 01:44 AM, Masami Hiramatsu wrote:
>> Same thing can be done in awk part and insn.c, and we can encode it by
>>
>> #define INAT_MAKE_MEMSZ(size) (size << INAT_MEMSZ_OFFS)
>>
>> And decode it by
>>
>> insn->memsz_bytes = 1 << ((attr & INAT_MEMSZ_MASK) >> INAT_MEMSZ_OFFS)
>>
>> Thus, we only need 3 bits to represent 1, 2, 4, 8, 16 and 32. :)
>
> We'll need 4 so that we could do 64 too :)

Would you mean AVX512? Actually it's not supported currently :(
anyway, that's ok for me, and also, we need another 2 bits for
the operands which depends on address-size prefix and operand-size prefix.

> btw, why aren't we using regular bitfields? this manual encoding
> thingie seems to be a bit confusing (try figuring out how many
> bits are left...).

Ah, right. OK, I'll try to do that :)

Thank you!

--
Masami HIRAMATSU
Software Platform Research Dept. Linux Technology Research Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]

Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

(2014/04/18 2:31), Sasha Levin wrote:
>> I also have seen several attempts at using the generic instruction
>> decoder which has resulted in more complexity, not less, because of
>> excess generality, so it is not an obvious thing.
>
> Let's split this patchset into two:
>
> We have one part which moves kmemcheck to the generic instruction decoder
> and adds memory access size to the instruction decoder. There seems to be
> no objection to that part beyond technical issues regarding how we store
> the new size value.

This looks OK to me.

> The other part is adding mnemonics to the instruction decoder. If my
> explanation above makes sense, and kmemcheck does need to know about AND,
> OR, XOR, MOVS and CMPS then let me know how to proceed about changing
> the instruction decoder to add that functionality.

I don't think we need to add such things to instruction decoder.
You'd better start from clarifying the bit pattern of those instructions
and making macros or inlines which evaluate insn->opcode.value.

Using automatic generated macros for immediate in the source code always
leads misunderstanding and abuse, and is hard to fix if a bug is there.
I strongly recommend you to define instruction classification macros
for their use by hand. That's easy to review too.
Actually x86 has a long history and its mnemonics are not so simple...

Thank you,

--
Masami HIRAMATSU
Software Platform Research Dept. Linux Technology Research Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: [email protected]

2014-04-18 03:46:01

by H. Peter Anvin

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

On 04/17/2014 08:40 PM, Masami Hiramatsu wrote:
> (2014/04/18 2:31), Sasha Levin wrote:
>>> I also have seen several attempts at using the generic instruction
>>> decoder which has resulted in more complexity, not less, because of
>>> excess generality, so it is not an obvious thing.
>>
>> Let's split this patchset into two:
>>
>> We have one part which moves kmemcheck to the generic instruction decoder
>> and adds memory access size to the instruction decoder. There seems to be
>> no objection to that part beyond technical issues regarding how we store
>> the new size value.
>
> This looks OK to me.
>
>> The other part is adding mnemonics to the instruction decoder. If my
>> explanation above makes sense, and kmemcheck does need to know about AND,
>> OR, XOR, MOVS and CMPS then let me know how to proceed about changing
>> the instruction decoder to add that functionality.
>
> I don't think we need to add such things to instruction decoder.
> You'd better start from clarifying the bit pattern of those instructions
> and making macros or inlines which evaluate insn->opcode.value.
>
> Using automatic generated macros for immediate in the source code always
> leads misunderstanding and abuse, and is hard to fix if a bug is there.
> I strongly recommend you to define instruction classification macros
> for their use by hand. That's easy to review too.
> Actually x86 has a long history and its mnemonics are not so simple...
>

What it sounds like it really wants is a "bitwise" flag on the instruction.

-hpa

2014-04-18 15:48:14

by Sasha Levin

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

On 04/17/2014 11:45 PM, H. Peter Anvin wrote:
>>> The other part is adding mnemonics to the instruction decoder. If my
>>> >> explanation above makes sense, and kmemcheck does need to know about AND,
>>> >> OR, XOR, MOVS and CMPS then let me know how to proceed about changing
>>> >> the instruction decoder to add that functionality.
>> >
>> > I don't think we need to add such things to instruction decoder.
>> > You'd better start from clarifying the bit pattern of those instructions
>> > and making macros or inlines which evaluate insn->opcode.value.

There are very specific mnemonics that kmemchecks wants to detect and treat
as a corner case.

What you're saying here is that while the instruction decoder already has the
knowledge of mnemonics, kmemcheck shouldn't use it and instead write it's own
opcode -> mnemonic parser and use that instead.

Note that it won't be enough to decode just the opcodes, as variants of the
instructions we need to detect are hidden inside groups, so we'd need to parse
mod/rm in addition to the opcode.

This means we're adding a tiny instruction parser to kmemcheck, which is
exactly the thing we're trying to remove with the previous part of this
patchset.

>> > Using automatic generated macros for immediate in the source code always
>> > leads misunderstanding and abuse, and is hard to fix if a bug is there.
>> > I strongly recommend you to define instruction classification macros
>> > for their use by hand. That's easy to review too.
>> > Actually x86 has a long history and its mnemonics are not so simple...

If the issue is that they get dynamically generated I'm fine with making that
a static list and updating it by hand whenever new instructions or
mnemonics are introduced.

> What it sounds like it really wants is a "bitwise" flag on the instruction.

A flag like that would solve part of the problem (we'd still need to work with
CMPS and MOVS), and sounds very kmemcheck specific.


Thanks,
Sasha

2014-04-18 16:49:32

by H. Peter Anvin

[permalink] [raw]
Subject: Re: [PATCH 3/4] x86/insn: Extract more information about instructions

On 04/18/2014 08:47 AM, Sasha Levin wrote:
>
> There are very specific mnemonics that kmemchecks wants to detect and treat
> as a corner case.
>
> What you're saying here is that while the instruction decoder already has the
> knowledge of mnemonics, kmemcheck shouldn't use it and instead write it's own
> opcode -> mnemonic parser and use that instead.
>

I think that involving mnemonics is "bonghits" level of crazy. It's a
solution in search of the problem, but it is a hack, and a pretty
horrific one.

>
>> What it sounds like it really wants is a "bitwise" flag on the instruction.
>
> A flag like that would solve part of the problem (we'd still need to work with
> CMPS and MOVS), and sounds very kmemcheck specific.
>

Well, guess what, the whole point is to export information to the users
that need it.

-hpa