2023-11-29 07:36:35

by M K, Muralidhara

[permalink] [raw]
Subject: [PATCH v2 0/6] Address Translation support for MI200 and MI300 models

From: Muralidhara M K <[email protected]>

This patchset adds support for MI200 heterogeneous address translation support
and MI300A address translation support, Few fixups on HBM3 memory address maps to
convert on-die(MCA decoded) address to Normalized address.

The patch set depends on the Yazen's patches submitted "AMD Address Translation Library"
https://lore.kernel.org/r/[email protected]

The patchset does the following

Patch 1:
MI200 heterogeneous address translation support.

Patch 2:
MI300 heterogeneous address translation support.

Patch 3:
Convert HBM3 MCA Decoded address to Normalized address.

Patch 4:
lookup table to get the correct cs instance id for HBM3.

Patch 5:
Convert physical cs id to logical cs id by static lookup
table.

Patch 6:
Identify all 8 column system physical addresses from each HBM3 row and retire all
column addresses when the error is injected to avoid future errors.

Muralidhara M K (6):
RAS: Add Address Translation support for MI200
RAS: Add Address Translation support for MI300
RAS: Add MCA Error address conversion for UMC
RAS: Add static lookup table to get CS physical ID
RAS: Add fixed Physical to logical CS ID mapping table
RAS: EDAC/amd64: Retire all system physical address from HBM3 row

drivers/edac/amd64_edac.c | 3 +
drivers/ras/amd/atl/core.c | 5 +-
drivers/ras/amd/atl/dehash.c | 149 ++++++++++++++++
drivers/ras/amd/atl/denormalize.c | 110 +++++++++++-
drivers/ras/amd/atl/internal.h | 27 ++-
drivers/ras/amd/atl/map.c | 158 ++++++++++++++---
drivers/ras/amd/atl/reg_fields.h | 34 ++++
drivers/ras/amd/atl/system.c | 4 +
drivers/ras/amd/atl/umc.c | 284 +++++++++++++++++++++++++++++-
include/linux/amd-atl.h | 2 +
10 files changed, 747 insertions(+), 29 deletions(-)

--
2.25.1


2023-11-29 07:36:37

by M K, Muralidhara

[permalink] [raw]
Subject: [PATCH v2 2/6] RAS: Add Address Translation support for MI300

From: Muralidhara M K <[email protected]>

Add support for address translation on Data Fabric version 4.5
for MI300 systems.
Add new interleaving modes for APU model support and adjust how
the DRAM address maps are found early in the translation for
certain cases.

Signed-off-by: Muralidhara M K <[email protected]>
Co-developed-by: Yazen Ghannam <[email protected]>
Signed-off-by: Yazen Ghannam <[email protected]>
---
Changes:
v1 -> v2 : None

drivers/ras/amd/atl/core.c | 5 +-
drivers/ras/amd/atl/dehash.c | 89 +++++++++++++++++++++++++++++++
drivers/ras/amd/atl/denormalize.c | 79 +++++++++++++++++++++++++++
drivers/ras/amd/atl/internal.h | 12 ++++-
drivers/ras/amd/atl/map.c | 53 +++++++++++-------
drivers/ras/amd/atl/reg_fields.h | 5 ++
drivers/ras/amd/atl/system.c | 3 ++
drivers/ras/amd/atl/umc.c | 28 +++++++++-
8 files changed, 250 insertions(+), 24 deletions(-)

diff --git a/drivers/ras/amd/atl/core.c b/drivers/ras/amd/atl/core.c
index 8c997c7ae8a6..cbbaf82f1ee1 100644
--- a/drivers/ras/amd/atl/core.c
+++ b/drivers/ras/amd/atl/core.c
@@ -56,7 +56,7 @@ static int add_legacy_hole(struct addr_ctx *ctx)
if (df_cfg.rev >= DF4)
func = 7;

- if (df_indirect_read_broadcast(ctx->node_id, func, 0x104, &dram_hole_base))
+ if (df_indirect_read_broadcast(ctx->df_acc_id, func, 0x104, &dram_hole_base))
return -EINVAL;

dram_hole_base &= DF_DRAM_HOLE_BASE_MASK;
@@ -103,7 +103,7 @@ static bool late_hole_remove(struct addr_ctx *ctx)
return false;
}

-int norm_to_sys_addr(u8 socket_id, u8 die_id, u8 cs_inst_id, u64 *addr)
+int norm_to_sys_addr(u16 df_acc_id, u8 socket_id, u8 die_id, u8 cs_inst_id, u64 *addr)
{
struct addr_ctx ctx;

@@ -115,6 +115,7 @@ int norm_to_sys_addr(u8 socket_id, u8 die_id, u8 cs_inst_id, u64 *addr)
/* We start from the normalized address */
ctx.ret_addr = *addr;
ctx.inst_id = cs_inst_id;
+ ctx.df_acc_id = df_acc_id;

if (determine_node_id(&ctx, socket_id, die_id)) {
pr_warn("Failed to determine Node ID");
diff --git a/drivers/ras/amd/atl/dehash.c b/drivers/ras/amd/atl/dehash.c
index 5760e6bca194..ddfada2eb7b4 100644
--- a/drivers/ras/amd/atl/dehash.c
+++ b/drivers/ras/amd/atl/dehash.c
@@ -450,6 +450,90 @@ static int mi200_dehash_addr(struct addr_ctx *ctx)
return 0;
}

+/*
+ * MI300 hash bits
+ * 4K 64K 2M 1G 1T 1T
+ * CSSelect[0] = XOR of addr{8, 12, 15, 22, 29, 36, 43}
+ * CSSelect[1] = XOR of addr{9, 13, 16, 23, 30, 37, 44}
+ * CSSelect[2] = XOR of addr{10, 14, 17, 24, 31, 38, 45}
+ * CSSelect[3] = XOR of addr{11, 18, 25, 32, 39, 46}
+ * CSSelect[4] = XOR of addr{14, 19, 26, 33, 40, 47} aka Stack
+ * DieID[0] = XOR of addr{12, 20, 27, 34, 41 }
+ * DieID[1] = XOR of addr{13, 21, 28, 35, 42 }
+ */
+static int mi300_dehash_addr(struct addr_ctx *ctx)
+{
+ bool hash_ctl_4k, hash_ctl_64k, hash_ctl_2M, hash_ctl_1G, hash_ctl_1T;
+ u8 hashed_bit, intlv_bit, num_intlv_bits, base_bit, i;
+
+ if (ctx->map.intlv_bit_pos != 8) {
+ pr_warn("%s: Invalid interleave bit: %u",
+ __func__, ctx->map.intlv_bit_pos);
+ return -EINVAL;
+ }
+
+ if (ctx->map.num_intlv_sockets > 1) {
+ pr_warn("%s: Invalid number of interleave sockets: %u",
+ __func__, ctx->map.num_intlv_sockets);
+ return -EINVAL;
+ }
+
+ hash_ctl_4k = FIELD_GET(DF4p5_HASH_CTL_4K, ctx->map.ctl);
+ hash_ctl_64k = FIELD_GET(DF4p5_HASH_CTL_64K, ctx->map.ctl);
+ hash_ctl_2M = FIELD_GET(DF4p5_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF4p5_HASH_CTL_1G, ctx->map.ctl);
+ hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl);
+
+ /* Channel bits */
+ num_intlv_bits = ilog2(ctx->map.num_intlv_chan);
+
+ for (i = 0; i < num_intlv_bits; i++) {
+ base_bit = 8 + i;
+
+ /* CSSelect[4] jumps to a base bit of 14. */
+ if (i == 4)
+ base_bit = 14;
+
+ intlv_bit = atl_get_bit(base_bit, ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+
+ /* 4k hash bit only applies to the first 3 bits. */
+ if (i <= 2)
+ hashed_bit ^= atl_get_bit(12 + i, ctx->ret_addr) & hash_ctl_4k;
+
+ hashed_bit ^= atl_get_bit(15 + i, ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= atl_get_bit(22 + i, ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= atl_get_bit(29 + i, ctx->ret_addr) & hash_ctl_1G;
+ hashed_bit ^= atl_get_bit(36 + i, ctx->ret_addr) & hash_ctl_1T;
+ hashed_bit ^= atl_get_bit(43 + i, ctx->ret_addr) & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(base_bit);
+ }
+
+ /* Die bits */
+ num_intlv_bits = ilog2(ctx->map.num_intlv_dies);
+
+ for (i = 0; i < num_intlv_bits; i++) {
+ base_bit = 12 + i;
+
+ intlv_bit = atl_get_bit(base_bit, ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+
+ hashed_bit ^= atl_get_bit(20 + i, ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= atl_get_bit(27 + i, ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= atl_get_bit(34 + i, ctx->ret_addr) & hash_ctl_1G;
+ hashed_bit ^= atl_get_bit(41 + i, ctx->ret_addr) & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(base_bit);
+ }
+
+ return 0;
+}
+
int dehash_address(struct addr_ctx *ctx)
{
switch (ctx->map.intlv_mode) {
@@ -512,6 +596,11 @@ int dehash_address(struct addr_ctx *ctx)
case MI2_HASH_32CHAN:
return mi200_dehash_addr(ctx);

+ case MI3_HASH_8CHAN:
+ case MI3_HASH_16CHAN:
+ case MI3_HASH_32CHAN:
+ return mi300_dehash_addr(ctx);
+
default:
ATL_BAD_INTLV_MODE(ctx->map.intlv_mode);
return -EINVAL;
diff --git a/drivers/ras/amd/atl/denormalize.c b/drivers/ras/amd/atl/denormalize.c
index 03eb1eea68f9..b233a26f68fc 100644
--- a/drivers/ras/amd/atl/denormalize.c
+++ b/drivers/ras/amd/atl/denormalize.c
@@ -85,6 +85,46 @@ static u64 make_space_for_cs_id_split_2_1(struct addr_ctx *ctx)
return expand_bits(12, ctx->map.total_intlv_bits - 1, denorm_addr);
}

+/*
+ * Make space for CS ID at bits [14:8] as follows:
+ *
+ * 8 channels -> bits [10:8]
+ * 16 channels -> bits [11:8]
+ * 32 channels -> bits [14,11:8]
+ *
+ * 1 die -> N/A
+ * 2 dies -> bit [12]
+ * 4 dies -> bits [13:12]
+ */
+static u64 make_space_for_cs_id_mi300(struct addr_ctx *ctx)
+{
+ u8 num_intlv_bits = order_base_2(ctx->map.num_intlv_chan);
+ u64 denorm_addr;
+
+ if (ctx->map.intlv_bit_pos != 8) {
+ pr_warn("%s: Invalid interleave bit: %u", __func__, ctx->map.intlv_bit_pos);
+ return -1;
+ }
+
+ /* Channel bits. Covers up to 4 bits at [11:8]. */
+ if (num_intlv_bits > 4)
+ denorm_addr = expand_bits(8, 4, ctx->ret_addr);
+ else
+ denorm_addr = expand_bits(ctx->map.intlv_bit_pos, num_intlv_bits, ctx->ret_addr);
+
+ /* Die bits. Always starts at [12]. */
+ if (ctx->map.num_intlv_dies > 1)
+ denorm_addr = expand_bits(12,
+ ctx->map.total_intlv_bits - num_intlv_bits,
+ denorm_addr);
+
+ /* Additional channel bit at [14]. */
+ if (num_intlv_bits > 4)
+ denorm_addr = expand_bits(14, 1, denorm_addr);
+
+ return denorm_addr;
+}
+
/*
* Take the current calculated address and shift enough bits in the middle
* to make a gap where the interleave bits will be inserted.
@@ -116,6 +156,11 @@ static u64 make_space_for_cs_id(struct addr_ctx *ctx)
case DF4p5_NPS1_16CHAN_2K_HASH:
return make_space_for_cs_id_split_2_1(ctx);

+ case MI3_HASH_8CHAN:
+ case MI3_HASH_16CHAN:
+ case MI3_HASH_32CHAN:
+ return make_space_for_cs_id_mi300(ctx);
+
case DF4p5_NPS2_4CHAN_1K_HASH:
//TODO
case DF4p5_NPS1_8CHAN_1K_HASH:
@@ -219,6 +264,32 @@ static u16 get_cs_id_df4(struct addr_ctx *ctx)
return cs_id;
}

+/*
+ * MI300 hash has:
+ * (C)hannel[3:0] = cs_id[3:0]
+ * (S)tack[0] = cs_id[4]
+ * (D)ie[1:0] = cs_id[6:5]
+ *
+ * Hashed cs_id is swizzled so that Stack bit is at the end.
+ * cs_id = SDDCCCC
+ */
+static u16 get_cs_id_mi300(struct addr_ctx *ctx)
+{
+ u8 channel_bits, die_bits, stack_bit;
+ u16 die_id;
+
+ /* Subtract the "base" Destination Fabric ID. */
+ ctx->cs_fabric_id -= get_dst_fabric_id(ctx);
+
+ die_id = (ctx->cs_fabric_id & df_cfg.die_id_mask) >> df_cfg.die_id_shift;
+
+ channel_bits = FIELD_GET(GENMASK(3, 0), ctx->cs_fabric_id);
+ stack_bit = FIELD_GET(BIT(4), ctx->cs_fabric_id) << 6;
+ die_bits = die_id << 4;
+
+ return stack_bit | die_bits | channel_bits;
+}
+
/*
* Derive the correct CS ID that represents the interleave bits
* used within the system physical address. This accounts for the
@@ -252,6 +323,11 @@ static u16 calculate_cs_id(struct addr_ctx *ctx)
case DF4p5_NPS1_16CHAN_2K_HASH:
return get_cs_id_df4(ctx);

+ case MI3_HASH_8CHAN:
+ case MI3_HASH_16CHAN:
+ case MI3_HASH_32CHAN:
+ return get_cs_id_mi300(ctx);
+
/* CS ID is simply the CS Fabric ID adjusted by the Destination Fabric ID. */
case DF4p5_NPS2_4CHAN_1K_HASH:
case DF4p5_NPS1_8CHAN_1K_HASH:
@@ -305,6 +381,9 @@ static u64 insert_cs_id(struct addr_ctx *ctx, u64 denorm_addr, u16 cs_id)
case MI2_HASH_8CHAN:
case MI2_HASH_16CHAN:
case MI2_HASH_32CHAN:
+ case MI3_HASH_8CHAN:
+ case MI3_HASH_16CHAN:
+ case MI3_HASH_32CHAN:
case DF2_2CHAN_HASH:
return insert_cs_id_at_intlv_bit(ctx, denorm_addr, cs_id);

diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h
index 0ce3fd9ef4d4..3747d91a52f5 100644
--- a/drivers/ras/amd/atl/internal.h
+++ b/drivers/ras/amd/atl/internal.h
@@ -27,8 +27,12 @@
/* PCI IDs for Genoa DF Function 0. */
#define DF_FUNC0_ID_GENOA 0x14AD1022

+/* PCI IDs for MI300 DF Function 0. */
+#define DF_FUNC0_ID_MI300 0x15281022
+
/* Shift needed for adjusting register values to true values. */
#define DF_DRAM_BASE_LIMIT_LSB 28
+#define MI300_DRAM_LIMIT_LSB 20

/* Cache Coherent Moderator Instance Type value on register */
#define DF_INST_TYPE_CCM 0
@@ -74,6 +78,9 @@ enum intlv_modes {
DF4_NPS1_12CHAN_HASH = 0x15,
DF4_NPS2_5CHAN_HASH = 0x16,
DF4_NPS1_10CHAN_HASH = 0x17,
+ MI3_HASH_8CHAN = 0x18,
+ MI3_HASH_16CHAN = 0x19,
+ MI3_HASH_32CHAN = 0x1A,
MI2_HASH_8CHAN = 0x1C,
MI2_HASH_16CHAN = 0x1D,
MI2_HASH_32CHAN = 0x1E,
@@ -219,6 +226,9 @@ struct addr_ctx {
* System-wide ID that includes 'node' bits.
*/
u16 cs_fabric_id;
+
+ /* ID calculated from topology */
+ u16 df_acc_id;
};

int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo);
@@ -235,7 +245,7 @@ u16 get_dst_fabric_id(struct addr_ctx *ctx);

int dehash_address(struct addr_ctx *ctx);

-int norm_to_sys_addr(u8 socket_id, u8 die_id, u8 cs_inst_id, u64 *addr);
+int norm_to_sys_addr(u16 df_acc_id, u8 socket_id, u8 die_id, u8 cs_inst_id, u64 *addr);

/*
* Helper to use test_bit() without needing to do
diff --git a/drivers/ras/amd/atl/map.c b/drivers/ras/amd/atl/map.c
index cf6127b1387b..2a246b603562 100644
--- a/drivers/ras/amd/atl/map.c
+++ b/drivers/ras/amd/atl/map.c
@@ -63,6 +63,10 @@ static int df4p5_get_intlv_mode(struct addr_ctx *ctx)
if (ctx->map.intlv_mode <= NOHASH_32CHAN)
return 0;

+ if (ctx->map.intlv_mode >= MI3_HASH_8CHAN &&
+ ctx->map.intlv_mode <= MI3_HASH_32CHAN)
+ return 0;
+
/*
* Modes matching the ranges above are returned as-is.
*
@@ -117,6 +121,9 @@ static u64 get_hi_addr_offset(u32 reg_dram_offset)
ATL_BAD_DF_REV;
}

+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ shift = MI300_DRAM_LIMIT_LSB;
+
return hi_addr_offset << shift;
}

@@ -138,13 +145,13 @@ static int get_dram_offset(struct addr_ctx *ctx, bool *enabled, u64 *norm_offset

if (df_cfg.rev >= DF4) {
/* Read D18F7x140 (DramOffset) */
- if (df_indirect_read_instance(ctx->node_id, 7, 0x140 + (4 * map_num),
+ if (df_indirect_read_instance(ctx->df_acc_id, 7, 0x140 + (4 * map_num),
ctx->inst_id, &reg_dram_offset))
return -EINVAL;

} else {
/* Read D18F0x1B4 (DramOffset) */
- if (df_indirect_read_instance(ctx->node_id, 0, 0x1B4 + (4 * map_num),
+ if (df_indirect_read_instance(ctx->df_acc_id, 0, 0x1B4 + (4 * map_num),
ctx->inst_id, &reg_dram_offset))
return -EINVAL;
}
@@ -170,7 +177,7 @@ static int df3_6ch_get_dram_addr_map(struct addr_ctx *ctx)
offset = 0x68;

/* Read D18F0x06{0,8} (DF::Skt0CsTargetRemap0)/(DF::Skt0CsTargetRemap1) */
- if (df_indirect_read_broadcast(ctx->node_id, 0, offset, &reg))
+ if (df_indirect_read_broadcast(ctx->df_acc_id, 0, offset, &reg))
return -EINVAL;

/* Save 8 remap entries. */
@@ -191,12 +198,12 @@ static int df3_6ch_get_dram_addr_map(struct addr_ctx *ctx)
static int df2_get_dram_addr_map(struct addr_ctx *ctx)
{
/* Read D18F0x110 (DramBaseAddress). */
- if (df_indirect_read_instance(ctx->node_id, 0, 0x110 + (8 * ctx->map.num),
+ if (df_indirect_read_instance(ctx->df_acc_id, 0, 0x110 + (8 * ctx->map.num),
ctx->inst_id, &ctx->map.base))
return -EINVAL;

/* Read D18F0x114 (DramLimitAddress). */
- if (df_indirect_read_instance(ctx->node_id, 0, 0x114 + (8 * ctx->map.num),
+ if (df_indirect_read_instance(ctx->df_acc_id, 0, 0x114 + (8 * ctx->map.num),
ctx->inst_id, &ctx->map.limit))
return -EINVAL;

@@ -209,7 +216,7 @@ static int df3_get_dram_addr_map(struct addr_ctx *ctx)
return -EINVAL;

/* Read D18F0x3F8 (DfGlobalCtl). */
- if (df_indirect_read_instance(ctx->node_id, 0, 0x3F8,
+ if (df_indirect_read_instance(ctx->df_acc_id, 0, 0x3F8,
ctx->inst_id, &ctx->map.ctl))
return -EINVAL;

@@ -222,22 +229,22 @@ static int df4_get_dram_addr_map(struct addr_ctx *ctx)
u32 remap_reg;

/* Read D18F7xE00 (DramBaseAddress). */
- if (df_indirect_read_instance(ctx->node_id, 7, 0xE00 + (16 * ctx->map.num),
+ if (df_indirect_read_instance(ctx->df_acc_id, 7, 0xE00 + (16 * ctx->map.num),
ctx->inst_id, &ctx->map.base))
return -EINVAL;

/* Read D18F7xE04 (DramLimitAddress). */
- if (df_indirect_read_instance(ctx->node_id, 7, 0xE04 + (16 * ctx->map.num),
+ if (df_indirect_read_instance(ctx->df_acc_id, 7, 0xE04 + (16 * ctx->map.num),
ctx->inst_id, &ctx->map.limit))
return -EINVAL;

/* Read D18F7xE08 (DramAddressCtl). */
- if (df_indirect_read_instance(ctx->node_id, 7, 0xE08 + (16 * ctx->map.num),
+ if (df_indirect_read_instance(ctx->df_acc_id, 7, 0xE08 + (16 * ctx->map.num),
ctx->inst_id, &ctx->map.ctl))
return -EINVAL;

/* Read D18F7xE0C (DramAddressIntlv). */
- if (df_indirect_read_instance(ctx->node_id, 7, 0xE0C + (16 * ctx->map.num),
+ if (df_indirect_read_instance(ctx->df_acc_id, 7, 0xE0C + (16 * ctx->map.num),
ctx->inst_id, &ctx->map.intlv))
return -EINVAL;

@@ -252,7 +259,7 @@ static int df4_get_dram_addr_map(struct addr_ctx *ctx)
remap_sel = FIELD_GET(DF4_REMAP_SEL, ctx->map.ctl);

/* Read D18F7x180 (CsTargetRemap0A). */
- if (df_indirect_read_instance(ctx->node_id, 7, 0x180 + (8 * remap_sel),
+ if (df_indirect_read_instance(ctx->df_acc_id, 7, 0x180 + (8 * remap_sel),
ctx->inst_id, &remap_reg))
return -EINVAL;

@@ -261,7 +268,7 @@ static int df4_get_dram_addr_map(struct addr_ctx *ctx)
ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;

/* Read D18F7x184 (CsTargetRemap0B). */
- if (df_indirect_read_instance(ctx->node_id, 7, 0x184 + (8 * remap_sel),
+ if (df_indirect_read_instance(ctx->df_acc_id, 7, 0x184 + (8 * remap_sel),
ctx->inst_id, &remap_reg))
return -EINVAL;

@@ -278,22 +285,22 @@ static int df4p5_get_dram_addr_map(struct addr_ctx *ctx)
u32 remap_reg;

/* Read D18F7x200 (DramBaseAddress). */
- if (df_indirect_read_instance(ctx->node_id, 7, 0x200 + (16 * ctx->map.num),
+ if (df_indirect_read_instance(ctx->df_acc_id, 7, 0x200 + (16 * ctx->map.num),
ctx->inst_id, &ctx->map.base))
return -EINVAL;

/* Read D18F7x204 (DramLimitAddress). */
- if (df_indirect_read_instance(ctx->node_id, 7, 0x204 + (16 * ctx->map.num),
+ if (df_indirect_read_instance(ctx->df_acc_id, 7, 0x204 + (16 * ctx->map.num),
ctx->inst_id, &ctx->map.limit))
return -EINVAL;

/* Read D18F7x208 (DramAddressCtl). */
- if (df_indirect_read_instance(ctx->node_id, 7, 0x208 + (16 * ctx->map.num),
+ if (df_indirect_read_instance(ctx->df_acc_id, 7, 0x208 + (16 * ctx->map.num),
ctx->inst_id, &ctx->map.ctl))
return -EINVAL;

/* Read D18F7x20C (DramAddressIntlv). */
- if (df_indirect_read_instance(ctx->node_id, 7, 0x20C + (16 * ctx->map.num),
+ if (df_indirect_read_instance(ctx->df_acc_id, 7, 0x20C + (16 * ctx->map.num),
ctx->inst_id, &ctx->map.intlv))
return -EINVAL;

@@ -308,7 +315,7 @@ static int df4p5_get_dram_addr_map(struct addr_ctx *ctx)
remap_sel = FIELD_GET(DF4_REMAP_SEL, ctx->map.ctl);

/* Read D18F7x180 (CsTargetRemap0A). */
- if (df_indirect_read_instance(ctx->node_id, 7, 0x180 + (24 * remap_sel),
+ if (df_indirect_read_instance(ctx->df_acc_id, 7, 0x180 + (24 * remap_sel),
ctx->inst_id, &remap_reg))
return -EINVAL;

@@ -317,7 +324,7 @@ static int df4p5_get_dram_addr_map(struct addr_ctx *ctx)
ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;

/* Read D18F7x184 (CsTargetRemap0B). */
- if (df_indirect_read_instance(ctx->node_id, 7, 0x184 + (24 * remap_sel),
+ if (df_indirect_read_instance(ctx->df_acc_id, 7, 0x184 + (24 * remap_sel),
ctx->inst_id, &remap_reg))
return -EINVAL;

@@ -326,7 +333,7 @@ static int df4p5_get_dram_addr_map(struct addr_ctx *ctx)
ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;

/* Read D18F7x188 (CsTargetRemap0C). */
- if (df_indirect_read_instance(ctx->node_id, 7, 0x188 + (24 * remap_sel),
+ if (df_indirect_read_instance(ctx->df_acc_id, 7, 0x188 + (24 * remap_sel),
ctx->inst_id, &remap_reg))
return -EINVAL;

@@ -455,7 +462,7 @@ static int lookup_cs_fabric_id(struct addr_ctx *ctx)
u32 reg;

/* Read D18F0x50 (FabricBlockInstanceInformation3). */
- if (df_indirect_read_instance(ctx->node_id, 0, 0x50, ctx->inst_id, &reg))
+ if (df_indirect_read_instance(ctx->df_acc_id, 0, 0x50, ctx->inst_id, &reg))
return -EINVAL;

if (df_cfg.rev < DF4p5)
@@ -463,6 +470,9 @@ static int lookup_cs_fabric_id(struct addr_ctx *ctx)
else
ctx->cs_fabric_id = FIELD_GET(DF4p5_CS_FABRIC_ID, reg);

+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ ctx->cs_fabric_id |= ctx->node_id << df_cfg.node_id_shift;
+
return 0;
}

@@ -578,6 +588,7 @@ static u8 get_num_intlv_chan(enum intlv_modes intlv_mode)
case DF3_COD1_8CHAN_HASH:
case DF4_NPS1_8CHAN_HASH:
case MI2_HASH_8CHAN:
+ case MI3_HASH_8CHAN:
case DF4p5_NPS1_8CHAN_1K_HASH:
case DF4p5_NPS1_8CHAN_2K_HASH:
return 8;
@@ -591,6 +602,7 @@ static u8 get_num_intlv_chan(enum intlv_modes intlv_mode)
return 12;
case NOHASH_16CHAN:
case MI2_HASH_16CHAN:
+ case MI3_HASH_16CHAN:
case DF4p5_NPS1_16CHAN_1K_HASH:
case DF4p5_NPS1_16CHAN_2K_HASH:
return 16;
@@ -599,6 +611,7 @@ static u8 get_num_intlv_chan(enum intlv_modes intlv_mode)
return 24;
case NOHASH_32CHAN:
case MI2_HASH_32CHAN:
+ case MI3_HASH_32CHAN:
return 32;
default:
ATL_BAD_INTLV_MODE(intlv_mode);
diff --git a/drivers/ras/amd/atl/reg_fields.h b/drivers/ras/amd/atl/reg_fields.h
index b85ab157773e..c3853a25217b 100644
--- a/drivers/ras/amd/atl/reg_fields.h
+++ b/drivers/ras/amd/atl/reg_fields.h
@@ -251,6 +251,11 @@
#define DF4_HASH_CTL_2M BIT(9)
#define DF4_HASH_CTL_1G BIT(10)
#define DF4_HASH_CTL_1T BIT(15)
+#define DF4p5_HASH_CTL_4K BIT(7)
+#define DF4p5_HASH_CTL_64K BIT(8)
+#define DF4p5_HASH_CTL_2M BIT(9)
+#define DF4p5_HASH_CTL_1G BIT(10)
+#define DF4p5_HASH_CTL_1T BIT(15)

/*
* High Address Offset
diff --git a/drivers/ras/amd/atl/system.c b/drivers/ras/amd/atl/system.c
index 656aac3f6c59..d80f24798a1e 100644
--- a/drivers/ras/amd/atl/system.c
+++ b/drivers/ras/amd/atl/system.c
@@ -124,6 +124,9 @@ static int df4_determine_df_rev(u32 reg)
if (reg == DF_FUNC0_ID_GENOA)
df_cfg.flags.genoa_quirk = 1;

+ if (reg == DF_FUNC0_ID_MI300)
+ df_cfg.flags.heterogeneous = 1;
+
return df4_get_fabric_id_mask_registers();
}

diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c
index 80030db6b8a5..f334be0dc034 100644
--- a/drivers/ras/amd/atl/umc.c
+++ b/drivers/ras/amd/atl/umc.c
@@ -17,8 +17,16 @@ static u8 get_socket_id(struct mce *m)
return m->socketid;
}

+#define MCA_IPID_INST_ID_HI GENMASK_ULL(47, 44)
static u8 get_die_id(struct mce *m)
{
+ /* The "AMD Node ID" is provided in MCA_IPID[InstanceIdHi] */
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous) {
+ u8 node_id = FIELD_GET(MCA_IPID_INST_ID_HI, m->ipid);
+
+ return node_id / 4;
+ }
+
/*
* For CPUs, this is the AMD Node ID modulo the number
* of AMD Nodes per socket.
@@ -37,14 +45,32 @@ static u8 get_cs_inst_id(struct mce *m)
return FIELD_GET(UMC_CHANNEL_NUM, m->ipid);
}

+/*
+ * Use CPU's AMD Node ID for all cases.
+ *
+ * This is needed to read DF registers which can only be
+ * done on CPU-attached DFs even in heterogeneous cases.
+ *
+ * Future systems may report MCA errors across AMD Nodes.
+ * For example, errors from CPU socket 1 are reported to a
+ * CPU on socket 0. When this happens, the assumption below
+ * will break. But the AMD Node ID will be reported in
+ * MCA_IPID[InstanceIdHi] at that time.
+ */
+static u16 get_df_acc_id(struct mce *m)
+{
+ return topology_die_id(m->extcpu);
+}
+
int umc_mca_addr_to_sys_addr(struct mce *m, u64 *sys_addr)
{
u8 cs_inst_id = get_cs_inst_id(m);
u8 socket_id = get_socket_id(m);
u64 addr = get_norm_addr(m);
u8 die_id = get_die_id(m);
+ u16 df_acc_id = get_df_acc_id(m);

- if (norm_to_sys_addr(socket_id, die_id, cs_inst_id, &addr))
+ if (norm_to_sys_addr(df_acc_id, socket_id, die_id, cs_inst_id, &addr))
return -EINVAL;

*sys_addr = addr;
--
2.25.1

2023-11-29 07:36:41

by M K, Muralidhara

[permalink] [raw]
Subject: [PATCH v2 6/6] RAS: EDAC/amd64: Retire all system physical address from HBM3 row

From: Muralidhara M K <[email protected]>

AMD systems have HBM memory embedded within the chip, The entire memory
is managed by host OS. Error containment needs to be reliable, because
HBM memory cannot be replaced.

HBM3 memory has 8 columns in each row and column bits are c2, c3 and c4
which gives 8 possible combination of addresses in each row.

Identify all these system physical addresses in a HBM row and retire all
system physical address to get rid of intermittent or recurrent memory
errors.

Signed-off-by: Muralidhara M K <[email protected]>
---
Changes:
v1 -> v2 : Rename and modify function amd_umc_retire_column_spa_from_row()

drivers/edac/amd64_edac.c | 3 ++
drivers/ras/amd/atl/umc.c | 77 +++++++++++++++++++++++++++++++++++++++
include/linux/amd-atl.h | 2 +
3 files changed, 82 insertions(+)

diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 623f84c53d2d..9872ede7eca9 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2831,6 +2831,9 @@ static void decode_umc_error(int node_id, struct mce *m)

error_address_to_page_and_offset(sys_addr, &err);

+ if (pvt->fam == 0x19 && (pvt->model >= 0x90 && pvt->model <= 0x9f))
+ amd_umc_retire_column_spa_from_row(m);
+
log_error:
__log_ecc_error(mci, &err, ecc_type);
}
diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c
index 3533db279cec..de51b666b20e 100644
--- a/drivers/ras/amd/atl/umc.c
+++ b/drivers/ras/amd/atl/umc.c
@@ -255,3 +255,80 @@ int umc_mca_addr_to_sys_addr(struct mce *m, u64 *sys_addr)
return 0;
}
EXPORT_SYMBOL_GPL(umc_mca_addr_to_sys_addr);
+
+/*
+ * High Bandwidth Memory (HBM v3) has fixed number of columns in a row.
+ * In specific, HBMv3 has 8 columns in one row.
+ * Extract column bits in a row to find all the combination of masks and
+ * to retire all the system physical addresses in that particular row.
+ */
+#define MAX_COLUMNS_IN_HBM_ROW 8
+
+/* Column 2, 3 and 4th bits in Normalized Address */
+#define UMC_NA_C2_BIT BIT(8)
+#define UMC_NA_C3_BIT BIT(9)
+#define UMC_NA_C4_BIT BIT(14)
+
+/* Possible combinations of column address masks in a HBM v3 row */
+#define C_1_1_1_MASK (UMC_NA_C4_BIT | UMC_NA_C3_BIT | UMC_NA_C2_BIT)
+#define C_1_1_0_MASK (UMC_NA_C4_BIT | UMC_NA_C3_BIT)
+#define C_1_0_1_MASK (UMC_NA_C4_BIT | UMC_NA_C2_BIT)
+#define C_1_0_0_MASK (UMC_NA_C4_BIT)
+#define C_0_1_1_MASK (UMC_NA_C3_BIT | UMC_NA_C2_BIT)
+#define C_0_1_0_MASK (UMC_NA_C3_BIT)
+#define C_0_0_1_MASK (UMC_NA_C2_BIT)
+#define C_0_0_0_MASK ~C_1_1_1_MASK
+
+/* Identify system address physical addresses of all columns in a HBM v3 row */
+static void identify_column_spa_from_row(struct mce *m, u64 *col)
+{
+ u8 cs_inst_id = get_cs_inst_id(m);
+ u8 socket_id = get_socket_id(m);
+ u64 norm_addr = get_norm_addr(m);
+ u8 die_id = get_die_id(m);
+ u16 df_acc_id = get_df_acc_id(m);
+
+ u64 retire_addr, column;
+ u64 column_masks[] = { 0, C_0_0_1_MASK, C_0_1_0_MASK, C_0_1_1_MASK,
+ C_1_0_0_MASK, C_1_0_1_MASK, C_1_1_0_MASK, C_1_1_1_MASK };
+
+ /* clear and loop for all possibilities of [c4 c3 c2] */
+ norm_addr &= C_0_0_0_MASK;
+
+ for (column = 0; column < ARRAY_SIZE(column_masks); column++) {
+ retire_addr = norm_addr | column_masks[column];
+
+ if (norm_to_sys_addr(df_acc_id, socket_id, die_id, cs_inst_id, &retire_addr))
+ pr_warn("Failed norm_to_sys_addr for column[%lld]\n", column);
+ else
+ col[column] = retire_addr;
+ }
+}
+
+void amd_umc_retire_column_spa_from_row(struct mce *m)
+{
+ u64 col[MAX_COLUMNS_IN_HBM_ROW];
+ u64 tmp[MAX_COLUMNS_IN_HBM_ROW];
+ int i, j, count = 0;
+ unsigned long pfn;
+
+ pr_info("Identify SPA of all columns from row for MCE Addr:0x%llx\n", m->addr);
+ identify_column_spa_from_row(m, col);
+
+ /* Find duplicate column SPA in a row */
+ for (i = 0; i < MAX_COLUMNS_IN_HBM_ROW; i++) {
+ for (j = 0; j < count; j++) {
+ if (col[i] == tmp[j])
+ break;
+ }
+ if (j == count) {
+ tmp[count] = col[i];
+ /* do page retirement, except for duplicate addresses */
+ pr_debug("Retire column spa:0x%llx ", tmp[count]);
+ pfn = PHYS_PFN(tmp[count]);
+ memory_failure(pfn, 0);
+ count++;
+ }
+ }
+}
+EXPORT_SYMBOL(amd_umc_retire_column_spa_from_row);
diff --git a/include/linux/amd-atl.h b/include/linux/amd-atl.h
index c625ea3ab5d0..6cba39be63ca 100644
--- a/include/linux/amd-atl.h
+++ b/include/linux/amd-atl.h
@@ -25,4 +25,6 @@ static inline int amd_umc_mca_addr_to_sys_addr(struct mce *m, u64 *sys_addr)
return umc_mca_addr_to_sys_addr(m, sys_addr);
}

+void amd_umc_retire_column_spa_from_row(struct mce *m);
+
#endif /* _AMD_ATL_H */
--
2.25.1

2023-11-29 07:36:44

by M K, Muralidhara

[permalink] [raw]
Subject: [PATCH v2 3/6] RAS: Add MCA Error address conversion for UMC

From: Muralidhara M K <[email protected]>

On AMD systems with HBM3 memory, The reported MCA address is
DRAM address which needs to be converted to normalized address
before the data fabric address translation.

MI300A models have on-chip HBM3 memory capable of On-Die ECC support.
On-Die ECC error address to MCA is a encoded address reported with
a DRAM address (PC/SID/Bank/ROW/COL) instead of normalized address
unlike MI200s UMC ECC, as the implementation difference between
HBM3 On-Die ECC and HBM2 host ECC.
Because On-Die ECC address reporting is done in the back-end of UMC
and it no longer has normalized address at that point.
So software needs to convert the reported MCA Error Address back to
normalized address.

Signed-off-by: Muralidhara M K <[email protected]>
---
Changes:
v1 -> v2 : None

drivers/ras/amd/atl/umc.c | 145 ++++++++++++++++++++++++++++++++++++++
1 file changed, 145 insertions(+)

diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c
index f334be0dc034..94eb2c5eae3b 100644
--- a/drivers/ras/amd/atl/umc.c
+++ b/drivers/ras/amd/atl/umc.c
@@ -12,6 +12,147 @@

#include "internal.h"

+static bool internal_bit_wise_xor(u32 inp)
+{
+ bool tmp = 0;
+ int i;
+
+ for (i = 0; i < 32; i++)
+ tmp = tmp ^ ((inp >> i) & 0x1);
+
+ return tmp;
+}
+
+/*
+ * Mapping of MCA decoded error address bit location to
+ * normalized address on MI300A systems.
+ */
+static const u8 umc_mca2na_mapping[] = {
+ 0, 5, 6, 8, 9, 14, 12, 13,
+ 10, 11, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28,
+ 7, 29, 30,
+};
+
+/* AddrHashBank and AddrHashPC/PC2 umc register bit fields */
+static struct {
+ u32 xor_enable :1;
+ u32 col_xor :13;
+ u32 row_xor :18;
+} addr_hash_pc, addr_hash_bank[4];
+
+static struct {
+ u32 bank_xor :6;
+} addr_hash_pc2;
+
+#define COLUMN_LOCATION GENMASK(5, 1)
+#define ROW_LOCATION GENMASK(23, 10)
+/*
+ * The location of bank, column and row are fixed.
+ * location of column bit must be NA[5].
+ * Row bits are always placed in a contiguous stretch of NA above the
+ * column and bank bits.
+ * Bits below the row bits can be either column or bank in any order,
+ * with the exception that NA[5] must be a column bit.
+ * Stack ID(SID) bits are placed in the MSB position of the NA.
+ */
+static int umc_ondie_addr_to_normaddr(u64 mca_addr, u16 nid)
+{
+ u32 bank[4], bank_hash[4], pc_hash;
+ u32 col, row, rawbank = 0, pc;
+ int i, temp = 0, err;
+ u64 mca2na;
+
+ /* Default umc base address on MI300A systems */
+ u32 gpu_umc_base = 0x90000;
+
+ /*
+ * Error address logged on MI300A systems is ondie MCA address
+ * in the format MCA_Addr[27:0] =
+ * {SID[1:0],PC[0],row[14:0],bank[3:0],col[4:0],1'b0}
+ * The bit locations are calculated as per umc_mca2na_mapping[]
+ * to find normalized address.
+ * Refer F19 M90h BKDG Section 20.3.1.3 for clarifications
+ *
+ * XORs need to be applied based on the hash settings below.
+ */
+
+ /* Calculate column and row */
+ col = FIELD_GET(COLUMN_LOCATION, mca_addr);
+ row = FIELD_GET(ROW_LOCATION, mca_addr);
+
+ /* Apply hashing on below banks for bank calculation */
+ for (i = 0; i < 4; i++)
+ bank_hash[i] = (mca_addr >> (6 + i)) & 0x1;
+
+ /* bank hash algorithm */
+ for (i = 0; i < 4; i++) {
+ /* Read AMD PPR UMC::AddrHashBank register */
+ err = amd_smn_read(nid, gpu_umc_base + 0xC8 + (i * 4), &temp);
+ if (err)
+ return err;
+
+ addr_hash_bank[i].xor_enable = temp & 1;
+ addr_hash_bank[i].col_xor = FIELD_GET(GENMASK(13, 1), temp);
+ addr_hash_bank[i].row_xor = FIELD_GET(GENMASK(31, 14), temp);
+ /* bank hash selection */
+ bank[i] = bank_hash[i] ^ (addr_hash_bank[i].xor_enable &
+ (internal_bit_wise_xor(col & addr_hash_bank[i].col_xor) ^
+ internal_bit_wise_xor(row & addr_hash_bank[i].row_xor)));
+ }
+
+ /* To apply hash on pc bit */
+ pc_hash = (mca_addr >> 25) & 0x1;
+
+ /* Read AMD PPR UMC::CH::AddrHashPC register */
+ err = amd_smn_read(nid, gpu_umc_base + 0xE0, &temp);
+ if (err)
+ return err;
+
+ addr_hash_pc.xor_enable = temp & 1;
+ addr_hash_pc.col_xor = FIELD_GET(GENMASK(13, 1), temp);
+ addr_hash_pc.row_xor = FIELD_GET(GENMASK(31, 14), temp);
+
+ /* Read AMD PPR UMC::CH::AddrHashPC2 register*/
+ err = amd_smn_read(nid, gpu_umc_base + 0xE4, &temp);
+ if (err)
+ return err;
+
+ addr_hash_pc2.bank_xor = FIELD_GET(GENMASK(5, 0), temp);
+
+ /* Calculate bank value from bank[0..3], bank[4] and bank[5] */
+ for (i = 0; i < 4; i++)
+ rawbank |= (bank[i] & 1) << i;
+
+ rawbank |= (mca_addr >> 22) & 0x30;
+
+ /* pseudochannel(pc) hash selection */
+ pc = pc_hash ^ (addr_hash_pc.xor_enable &
+ (internal_bit_wise_xor(col & addr_hash_pc.col_xor) ^
+ internal_bit_wise_xor(row & addr_hash_pc.row_xor) ^
+ internal_bit_wise_xor(rawbank & addr_hash_pc2.bank_xor)));
+
+ /* Mask b'25(pc_bit) and b'[9:6](bank) */
+ mca_addr &= ~0x20003c0ULL;
+
+ for (i = 0; i < 4; i++)
+ mca_addr |= (bank[i] << (6 + i));
+
+ mca_addr |= (pc << 25);
+
+ /* NA[4..0] is fixed */
+ mca2na = 0x0;
+ /* convert mca error address to normalized address */
+ for (i = 1; i < ARRAY_SIZE(umc_mca2na_mapping); i++)
+ mca2na |= ((mca_addr >> i) & 0x1) << umc_mca2na_mapping[i];
+
+ mca_addr = mca2na;
+ pr_debug("Error Addr 0x%016llx\n", mca_addr);
+ pr_debug("Error hit on Bank %d Row %d Column %d\n", rawbank, row, col);
+
+ return mca_addr;
+}
+
static u8 get_socket_id(struct mce *m)
{
return m->socketid;
@@ -36,6 +177,10 @@ static u8 get_die_id(struct mce *m)

static u64 get_norm_addr(struct mce *m)
{
+ /* MI300: DRAM->Normalized translation */
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ return umc_ondie_addr_to_normaddr(m->addr, get_socket_id(m));
+
return m->addr;
}

--
2.25.1

2023-11-29 07:36:45

by M K, Muralidhara

[permalink] [raw]
Subject: [PATCH v2 1/6] RAS: Add Address Translation support for MI200

From: Muralidhara M K <[email protected]>

Add support for address translation on Data Fabric version 3.5
Add new interleaving modes for heterogeneous model support and
adjust how the DRAM address maps are found early in the
translation for certain cases.

Signed-off-by: Muralidhara M K <[email protected]>
Co-developed-by: Yazen Ghannam <[email protected]>
Signed-off-by: Yazen Ghannam <[email protected]>
---
Changes:
v1 -> v2 : None

drivers/ras/amd/atl/dehash.c | 60 +++++++++++++++++
drivers/ras/amd/atl/denormalize.c | 11 +++-
drivers/ras/amd/atl/internal.h | 15 ++++-
drivers/ras/amd/atl/map.c | 105 +++++++++++++++++++++++++++++-
drivers/ras/amd/atl/reg_fields.h | 29 +++++++++
drivers/ras/amd/atl/system.c | 1 +
6 files changed, 217 insertions(+), 4 deletions(-)

diff --git a/drivers/ras/amd/atl/dehash.c b/drivers/ras/amd/atl/dehash.c
index e501f2e918d7..5760e6bca194 100644
--- a/drivers/ras/amd/atl/dehash.c
+++ b/drivers/ras/amd/atl/dehash.c
@@ -395,6 +395,61 @@ static int df4p5_dehash_addr(struct addr_ctx *ctx)
return 0;
}

+/*
+ * MI200 hash bits
+ * 64K 2M 1G
+ * CSSelect[0] = XOR of addr{8, 16, 21, 30};
+ * CSSelect[1] = XOR of addr{9, 17, 22, 31};
+ * CSSelect[2] = XOR of addr{10, 18, 23, 32};
+ * CSSelect[3] = XOR of addr{11, 19, 24, 33}; - 16 and 32 channel only
+ * CSSelect[4] = XOR of addr{12, 20, 25, 34}; - 32 channel only
+ */
+static int mi200_dehash_addr(struct addr_ctx *ctx)
+{
+ u8 num_intlv_bits = ctx->map.total_intlv_bits;
+ bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G;
+ u8 hashed_bit, intlv_bit, i;
+
+ /* Assert that interleave bit is 8. */
+ if (ctx->map.intlv_bit_pos != 8) {
+ pr_warn("%s: Invalid interleave bit: %u",
+ __func__, ctx->map.intlv_bit_pos);
+ return -EINVAL;
+ }
+
+ /* Assert that die interleaving is disabled. */
+ if (ctx->map.num_intlv_dies > 1) {
+ pr_warn("%s: Invalid number of interleave dies: %u",
+ __func__, ctx->map.num_intlv_dies);
+ return -EINVAL;
+ }
+
+ /* Assert that socket interleaving is disabled. */
+ if (ctx->map.num_intlv_sockets > 1) {
+ pr_warn("%s: Invalid number of interleave sockets: %u",
+ __func__, ctx->map.num_intlv_sockets);
+ return -EINVAL;
+ }
+
+ hash_ctl_64k = FIELD_GET(DF3_HASH_CTL_64K, ctx->map.ctl);
+ hash_ctl_2M = FIELD_GET(DF3_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF3_HASH_CTL_1G, ctx->map.ctl);
+
+ for (i = 0; i < num_intlv_bits; i++) {
+ intlv_bit = atl_get_bit(8 + i, ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= atl_get_bit(8 + i, ctx->ret_addr);
+ hashed_bit ^= atl_get_bit(16 + i, ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= atl_get_bit(21 + i, ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= atl_get_bit(30 + i, ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(8 + i);
+ }
+ return 0;
+}
+
int dehash_address(struct addr_ctx *ctx)
{
switch (ctx->map.intlv_mode) {
@@ -452,6 +507,11 @@ int dehash_address(struct addr_ctx *ctx)
case DF4p5_NPS1_16CHAN_2K_HASH:
return df4p5_dehash_addr(ctx);

+ case MI2_HASH_8CHAN:
+ case MI2_HASH_16CHAN:
+ case MI2_HASH_32CHAN:
+ return mi200_dehash_addr(ctx);
+
default:
ATL_BAD_INTLV_MODE(ctx->map.intlv_mode);
return -EINVAL;
diff --git a/drivers/ras/amd/atl/denormalize.c b/drivers/ras/amd/atl/denormalize.c
index fe1480c8e0d8..03eb1eea68f9 100644
--- a/drivers/ras/amd/atl/denormalize.c
+++ b/drivers/ras/amd/atl/denormalize.c
@@ -16,7 +16,7 @@
* Returns the Destination Fabric ID. This is the first (lowest)
* CS Fabric ID used within a DRAM Address map.
*/
-static u16 get_dst_fabric_id(struct addr_ctx *ctx)
+u16 get_dst_fabric_id(struct addr_ctx *ctx)
{
switch (df_cfg.rev) {
case DF2:
@@ -97,6 +97,9 @@ static u64 make_space_for_cs_id(struct addr_ctx *ctx)
case NOHASH_8CHAN:
case NOHASH_16CHAN:
case NOHASH_32CHAN:
+ case MI2_HASH_8CHAN:
+ case MI2_HASH_16CHAN:
+ case MI2_HASH_32CHAN:
case DF2_2CHAN_HASH:
return make_space_for_cs_id_at_intlv_bit(ctx);

@@ -233,6 +236,9 @@ static u16 calculate_cs_id(struct addr_ctx *ctx)
case DF3_COD4_2CHAN_HASH:
case DF3_COD2_4CHAN_HASH:
case DF3_COD1_8CHAN_HASH:
+ case MI2_HASH_8CHAN:
+ case MI2_HASH_16CHAN:
+ case MI2_HASH_32CHAN:
case DF2_2CHAN_HASH:
return get_cs_id_df2(ctx);

@@ -296,6 +302,9 @@ static u64 insert_cs_id(struct addr_ctx *ctx, u64 denorm_addr, u16 cs_id)
case NOHASH_8CHAN:
case NOHASH_16CHAN:
case NOHASH_32CHAN:
+ case MI2_HASH_8CHAN:
+ case MI2_HASH_16CHAN:
+ case MI2_HASH_32CHAN:
case DF2_2CHAN_HASH:
return insert_cs_id_at_intlv_bit(ctx, denorm_addr, cs_id);

diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h
index f3888c8fd02d..0ce3fd9ef4d4 100644
--- a/drivers/ras/amd/atl/internal.h
+++ b/drivers/ras/amd/atl/internal.h
@@ -30,6 +30,12 @@
/* Shift needed for adjusting register values to true values. */
#define DF_DRAM_BASE_LIMIT_LSB 28

+/* Cache Coherent Moderator Instance Type value on register */
+#define DF_INST_TYPE_CCM 0
+
+/* Maximum possible number of DRAM maps within a Data Fabric. */
+#define DF_NUM_DRAM_MAPS_AVAILABLE 16
+
/*
* Glossary of acronyms used in address translation for Zen-based systems
*
@@ -68,6 +74,9 @@ enum intlv_modes {
DF4_NPS1_12CHAN_HASH = 0x15,
DF4_NPS2_5CHAN_HASH = 0x16,
DF4_NPS1_10CHAN_HASH = 0x17,
+ MI2_HASH_8CHAN = 0x1C,
+ MI2_HASH_16CHAN = 0x1D,
+ MI2_HASH_32CHAN = 0x1E,
DF2_2CHAN_HASH = 0x21,
/* DF4.5 modes are all IntLvNumChan + 0x20 */
DF4p5_NPS1_16CHAN_1K_HASH = 0x2C,
@@ -94,8 +103,9 @@ enum intlv_modes {

struct df_flags {
__u8 legacy_ficaa : 1,
+ heterogeneous : 1,
genoa_quirk : 1,
- __reserved_0 : 6;
+ __reserved_0 : 5;
};

struct df_config {
@@ -220,6 +230,9 @@ int determine_node_id(struct addr_ctx *ctx, u8 socket_num, u8 die_num);
int get_address_map(struct addr_ctx *ctx);

int denormalize_address(struct addr_ctx *ctx);
+
+u16 get_dst_fabric_id(struct addr_ctx *ctx);
+
int dehash_address(struct addr_ctx *ctx);

int norm_to_sys_addr(u8 socket_id, u8 die_id, u8 cs_inst_id, u64 *addr);
diff --git a/drivers/ras/amd/atl/map.c b/drivers/ras/amd/atl/map.c
index 05141da27028..cf6127b1387b 100644
--- a/drivers/ras/amd/atl/map.c
+++ b/drivers/ras/amd/atl/map.c
@@ -355,6 +355,101 @@ static int get_dram_addr_map(struct addr_ctx *ctx)
}
}

+static int find_moderator_instance_id(struct addr_ctx *ctx)
+{
+ u16 num_df_instances;
+ u32 reg;
+
+ /* Read D18F0x40 (FabricBlockInstanceCount). */
+ if (df_indirect_read_broadcast(0, 0, 0x40, &reg))
+ return -EINVAL;
+
+ num_df_instances = FIELD_GET(DF_BLOCK_INSTANCE_COUNT, reg);
+
+ for (ctx->inst_id = 0; ctx->inst_id < num_df_instances; ctx->inst_id++) {
+ /* Read D18F0x44 (FabricBlockInstanceInformation0). */
+ if (df_indirect_read_instance(0, 0, 0x44, ctx->inst_id, &reg))
+ return -EINVAL;
+
+ if (!reg)
+ continue;
+
+ /* Match on the first CCM instance. */
+ if (FIELD_GET(DF_INSTANCE_TYPE, reg) == DF_INST_TYPE_CCM)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int find_map_by_dst_fabric_id(struct addr_ctx *ctx)
+{
+ u64 mask = df_cfg.node_id_mask;
+
+ for (ctx->map.num = 0; ctx->map.num < DF_NUM_DRAM_MAPS_AVAILABLE ; ctx->map.num++) {
+ if (get_dram_addr_map(ctx))
+ return -EINVAL;
+
+ /*
+ * Match if the Destination Fabric ID in this map is the same as
+ * the Fabric ID for the target memory device.
+ */
+ if ((get_dst_fabric_id(ctx) & mask) == (ctx->cs_fabric_id & mask))
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* UMC to CS mapping for MI200 die[0]s */
+u8 umc_to_cs_mapping_mi200_die0[] = { 28, 20, 24, 16, 12, 4, 8, 0,
+ 6, 30, 2, 26, 22, 14, 18, 10,
+ 19, 11, 15, 7, 3, 27, 31, 23,
+ 9, 1, 5, 29, 25, 17, 21, 13};
+
+/* UMC to CS mapping for MI200 die[1]s */
+u8 umc_to_cs_mapping_mi200_die1[] = { 19, 11, 15, 7, 3, 27, 31, 23,
+ 9, 1, 5, 29, 25, 17, 21, 13,
+ 28, 20, 24, 16, 12, 4, 8, 0,
+ 6, 30, 2, 26, 22, 14, 18, 10};
+
+static int get_umc_to_cs_mapping(struct addr_ctx *ctx)
+{
+ if (ctx->inst_id >= sizeof(umc_to_cs_mapping_mi200_die0))
+ return -EINVAL;
+
+ /*
+ * MI200 has 2 dies and are enumerated alternatively
+ * die0's are enumerated as node 2, 4, 6 and 8
+ * die1's are enumerated as node 1, 3, 5 and 7
+ */
+ if (ctx->node_id % 2)
+ ctx->inst_id = umc_to_cs_mapping_mi200_die1[ctx->inst_id];
+ else
+ ctx->inst_id = umc_to_cs_mapping_mi200_die0[ctx->inst_id];
+
+ return 0;
+}
+
+static int get_address_map_heterogeneous(struct addr_ctx *ctx)
+{
+ if (ctx->node_id >= amd_nb_num()) {
+ if (get_umc_to_cs_mapping(ctx))
+ return -EINVAL;
+ }
+
+ ctx->cs_fabric_id = ctx->inst_id;
+ ctx->cs_fabric_id |= ctx->node_id << df_cfg.node_id_shift;
+
+ if (find_moderator_instance_id(ctx))
+ return -EINVAL;
+
+ if (find_map_by_dst_fabric_id(ctx))
+ return -EINVAL;
+
+ return 0;
+}
+
static int lookup_cs_fabric_id(struct addr_ctx *ctx)
{
u32 reg;
@@ -482,6 +577,7 @@ static u8 get_num_intlv_chan(enum intlv_modes intlv_mode)
case NOHASH_8CHAN:
case DF3_COD1_8CHAN_HASH:
case DF4_NPS1_8CHAN_HASH:
+ case MI2_HASH_8CHAN:
case DF4p5_NPS1_8CHAN_1K_HASH:
case DF4p5_NPS1_8CHAN_2K_HASH:
return 8;
@@ -494,6 +590,7 @@ static u8 get_num_intlv_chan(enum intlv_modes intlv_mode)
case DF4p5_NPS1_12CHAN_2K_HASH:
return 12;
case NOHASH_16CHAN:
+ case MI2_HASH_16CHAN:
case DF4p5_NPS1_16CHAN_1K_HASH:
case DF4p5_NPS1_16CHAN_2K_HASH:
return 16;
@@ -501,6 +598,7 @@ static u8 get_num_intlv_chan(enum intlv_modes intlv_mode)
case DF4p5_NPS0_24CHAN_2K_HASH:
return 24;
case NOHASH_32CHAN:
+ case MI2_HASH_32CHAN:
return 32;
default:
ATL_BAD_INTLV_MODE(intlv_mode);
@@ -645,8 +743,11 @@ int get_address_map(struct addr_ctx *ctx)
{
int ret = 0;

- /* TODO: Add special path for DF3.5 heterogeneous systems. */
- ret = get_address_map_common(ctx);
+ /* Add special path for DF3.5 heterogeneous systems. */
+ if (df_cfg.flags.heterogeneous && df_cfg.rev == DF3p5)
+ ret = get_address_map_heterogeneous(ctx);
+ else
+ ret = get_address_map_common(ctx);
if (ret)
return ret;

diff --git a/drivers/ras/amd/atl/reg_fields.h b/drivers/ras/amd/atl/reg_fields.h
index d48470e12498..b85ab157773e 100644
--- a/drivers/ras/amd/atl/reg_fields.h
+++ b/drivers/ras/amd/atl/reg_fields.h
@@ -601,3 +601,32 @@
#define DF2_SOCKET_ID_SHIFT GENMASK(31, 28)
#define DF3_SOCKET_ID_SHIFT GENMASK(9, 8)
#define DF4_SOCKET_ID_SHIFT GENMASK(11, 8)
+
+/*
+ * Total number of instances of all the blocks in DF
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x040 [Fabric Block Instance Count]
+ * DF3 BlkInstCount [7:0]
+ * DF3p5 BlkInstCount [7:0]
+ * DF4 BlkInstCount [7:0]
+ * DF4p5 BlkInstCount [9:0]
+ */
+#define DF_BLOCK_INSTANCE_COUNT GENMASK(9, 0)
+
+/*
+ * Information on the block capabilities
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x044 [Fabric Block Instance Information 0]
+ * DF3p5 BlkInstCount [3:0]
+ */
+#define DF_INSTANCE_TYPE GENMASK(3, 0)
diff --git a/drivers/ras/amd/atl/system.c b/drivers/ras/amd/atl/system.c
index 86488138e120..656aac3f6c59 100644
--- a/drivers/ras/amd/atl/system.c
+++ b/drivers/ras/amd/atl/system.c
@@ -144,6 +144,7 @@ static int determine_df_rev_legacy(void)

if (FIELD_GET(DF4_COMPONENT_ID_MASK, fabric_id_mask0)) {
df_cfg.rev = DF3p5;
+ df_cfg.flags.heterogeneous = 1;

/* Read D18F1x154 (SystemFabricIdMask1) */
if (df_indirect_read_broadcast(0, 1, 0x154, &fabric_id_mask1))
--
2.25.1

2023-11-29 07:36:46

by M K, Muralidhara

[permalink] [raw]
Subject: [PATCH v2 4/6] RAS: Add static lookup table to get CS physical ID

From: Muralidhara M K <[email protected]>

AMD MI300A models have a single Data Fabric (DF) instance per socket.
So, all 4 AIDs are not software-visible (using PCI Device 18h, etc.).

The MCA_IPID_UMC[InstanceId] field holds the SMN base address for the
UMC instance and SMN address mapping repeated same for each of all 4
AIDs in as socket.

Add a static lookup table by reading the UMC SMN address from the
MCA_IPID_UMC[InstanceId] field and use the value to look up the
CS physical ID from the table.

Signed-off-by: Muralidhara M K <[email protected]>
---
Changes:
v1 -> v2 : None

drivers/ras/amd/atl/umc.c | 33 +++++++++++++++++++++++++++++++++
1 file changed, 33 insertions(+)

diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c
index 94eb2c5eae3b..3533db279cec 100644
--- a/drivers/ras/amd/atl/umc.c
+++ b/drivers/ras/amd/atl/umc.c
@@ -153,6 +153,35 @@ static int umc_ondie_addr_to_normaddr(u64 mca_addr, u16 nid)
return mca_addr;
}

+/*
+ * MCA_IPID_UMC[InstanceId] holds the SMN Base Address for a UMC instance.
+ * MI-300 has a fixed, model-specific mapping between a UMC instance and its
+ * related Data Fabric CS instance.
+ * Use the UMC SMN Base Address value to find the appropriate CS instance ID.
+ */
+static const u32 csmap[32] = {
+ 0x393f00, 0x293f00, 0x193f00, 0x093f00, 0x392f00, 0x292f00,
+ 0x192f00, 0x092f00, 0x391f00, 0x291f00, 0x191f00, 0x091f00,
+ 0x390f00, 0x290f00, 0x190f00, 0x090f00, 0x793f00, 0x693f00,
+ 0x593f00, 0x493f00, 0x792f00, 0x692f00, 0x592f00, 0x492f00,
+ 0x791f00, 0x691f00, 0x591f00, 0x491f00, 0x790f00, 0x690f00,
+ 0x590f00, 0x490f00 };
+
+/* MCA_IPID[InstanceId] give the Instance Number UMC SmnAddr */
+#define UMC_PHY_INSTANCE_NUM GENMASK(31, 0)
+
+static u8 fixup_cs_inst_id(struct mce *m)
+{
+ u32 smn_addr = FIELD_GET(UMC_PHY_INSTANCE_NUM, m->ipid);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(csmap); i++) {
+ if (smn_addr == csmap[i])
+ break;
+ }
+ return i;
+}
+
static u8 get_socket_id(struct mce *m)
{
return m->socketid;
@@ -187,6 +216,10 @@ static u64 get_norm_addr(struct mce *m)
#define UMC_CHANNEL_NUM GENMASK(31, 20)
static u8 get_cs_inst_id(struct mce *m)
{
+ /* MI300: static mapping table for MCA_IPID[InstanceId] to CS physical ID. */
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ return fixup_cs_inst_id(m);
+
return FIELD_GET(UMC_CHANNEL_NUM, m->ipid);
}

--
2.25.1