According to Peter, Thomas, Joe and David's advices, try fixing all memory allocation
failure handling in tlb_uv.c
compiled in x86_64
Signed-off-by: Zhouyi Zhou <[email protected]>
---
arch/x86/platform/uv/tlb_uv.c | 122 +++++++++++++++++++++++++++++------------
1 file changed, 87 insertions(+), 35 deletions(-)
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index dfe605a..1a45dfa 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1680,7 +1680,7 @@ static int __init uv_ptc_init(void)
/*
* Initialize the sending side's sending buffers.
*/
-static void activation_descriptor_init(int node, int pnode, int base_pnode)
+static int activation_descriptor_init(int node, int pnode, int base_pnode)
{
int i;
int cpu;
@@ -1701,7 +1701,9 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
*/
dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
- BUG_ON(!bau_desc);
+ if (!bau_desc)
+ return -ENOMEM;
+
gpa = uv_gpa(bau_desc);
n = uv_gpa_to_gnode(gpa);
@@ -1756,6 +1758,8 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
bcp = &per_cpu(bau_control, cpu);
bcp->descriptor_base = bau_desc;
}
+
+ return 0;
}
/*
@@ -1764,7 +1768,7 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
* - node is first node (kernel memory notion) on the uvhub
* - pnode is the uvhub's physical identifier
*/
-static void pq_init(int node, int pnode)
+static int pq_init(int node, int pnode)
{
int cpu;
size_t plsize;
@@ -1776,12 +1780,16 @@ static void pq_init(int node, int pnode)
unsigned long last;
struct bau_pq_entry *pqp;
struct bau_control *bcp;
+ int ret = 0;
plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
vp = kmalloc_node(plsize, GFP_KERNEL, node);
- pqp = (struct bau_pq_entry *)vp;
- BUG_ON(!pqp);
+ if (!vp) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ pqp = (struct bau_pq_entry *)vp;
cp = (char *)pqp + 31;
pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
@@ -1808,29 +1816,40 @@ static void pq_init(int node, int pnode)
/* in effect, all msg_type's are set to MSG_NOOP */
memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
+
+ return 0;
+fail:
+ return ret;
}
/*
* Initialization of each UV hub's structures
*/
-static void __init init_uvhub(int uvhub, int vector, int base_pnode)
+static int __init init_uvhub(int uvhub, int vector, int base_pnode)
{
int node;
int pnode;
unsigned long apicid;
+ int ret;
node = uvhub_to_first_node(uvhub);
pnode = uv_blade_to_pnode(uvhub);
- activation_descriptor_init(node, pnode, base_pnode);
+ ret = activation_descriptor_init(node, pnode, base_pnode);
+ if (ret)
+ return ret;
- pq_init(node, pnode);
+ ret = pq_init(node, pnode);
+ if (ret)
+ return ret;
/*
* The below initialization can't be in firmware because the
* messaging IRQ will be determined by the OS.
*/
apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
write_mmr_data_config(pnode, ((apicid << 32) | vector));
+
+ return 0;
}
/*
@@ -1926,7 +1945,7 @@ static int __init get_cpu_topology(int base_pnode,
printk(KERN_EMERG
"cpu %d pnode %d-%d beyond %d; BAU disabled\n",
cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
- return 1;
+ return -EINVAL;
}
bcp->osnode = cpu_to_node(cpu);
@@ -1950,7 +1969,7 @@ static int __init get_cpu_topology(int base_pnode,
if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
printk(KERN_EMERG "%d cpus per socket invalid\n",
sdp->num_cpus);
- return 1;
+ return -EINVAL;
}
}
return 0;
@@ -1959,29 +1978,29 @@ static int __init get_cpu_topology(int base_pnode,
/*
* Each socket is to get a local array of pnodes/hubs.
*/
-static void make_per_cpu_thp(struct bau_control *smaster)
+static int make_per_cpu_thp(struct bau_control *smaster)
{
int cpu;
size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
-
+ int ret;
smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
+ if (!smaster->thp) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
memset(smaster->thp, 0, hpsz);
for_each_present_cpu(cpu) {
smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
}
-}
-
-/*
- * Each uvhub is to get a local cpumask.
- */
-static void make_per_hub_cpumask(struct bau_control *hmaster)
-{
- int sz = sizeof(cpumask_t);
- hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
+ return 0;
+fail:
+ return ret;
}
+
/*
* Initialize all the per_cpu information for the cpu's on a given socket,
* given what has been gathered into the socket_desc struct.
@@ -2014,14 +2033,14 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
bcp->uvhub_version = 2;
else {
printk(KERN_EMERG "uvhub version not 1 or 2\n");
- return 1;
+ return -EINVAL;
}
bcp->uvhub_master = *hmasterp;
bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
printk(KERN_EMERG "%d cpus per uvhub invalid\n",
bcp->uvhub_cpu);
- return 1;
+ return -EINVAL;
}
}
return 0;
@@ -2037,6 +2056,7 @@ static int __init summarize_uvhub_sockets(int nuvhubs,
int socket;
int uvhub;
unsigned short socket_mask;
+ int ret = 0;
for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
struct uvhub_desc *bdp;
@@ -2053,16 +2073,30 @@ static int __init summarize_uvhub_sockets(int nuvhubs,
struct socket_desc *sdp;
if ((socket_mask & 1)) {
sdp = &bdp->socket[socket];
- if (scan_sock(sdp, bdp, &smaster, &hmaster))
- return 1;
- make_per_cpu_thp(smaster);
+ ret = scan_sock(sdp, bdp, &smaster, &hmaster);
+ if (ret)
+ goto fail;
+ ret = make_per_cpu_thp(smaster);
+ if (ret)
+ goto fail;
}
socket++;
socket_mask = (socket_mask >> 1);
}
- make_per_hub_cpumask(hmaster);
+/*
+ * Each uvhub is to get a local cpumask.
+ */
+ hmaster->cpumask = kzalloc_node(
+ sizeof(cpumask_t), GFP_KERNEL, hmaster->osnode);
+ if (!hmaster->cpumask) {
+ ret = -ENOMEM;
+ goto fail;
+ }
}
return 0;
+
+fail:
+ return ret;
}
/*
@@ -2073,15 +2107,25 @@ static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
unsigned char *uvhub_mask;
void *vp;
struct uvhub_desc *uvhub_descs;
+ int ret = 0;
timeout_us = calculate_destination_timeout();
- vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
+ vp = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
+ if (!vp) {
+ ret = -ENOMEM;
+ goto fail_descs;
+ }
uvhub_descs = (struct uvhub_desc *)vp;
- memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
+
uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
+ if (!uvhub_mask) {
+ ret = -ENOMEM;
+ goto fail_mask;
+ }
- if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
+ ret = get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask);
+ if (ret)
goto fail;
if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
@@ -2093,9 +2137,11 @@ static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
return 0;
fail:
- kfree(uvhub_descs);
kfree(uvhub_mask);
- return 1;
+fail_mask:
+ kfree(uvhub_descs);
+fail_descs:
+ return ret;
}
/*
@@ -2110,13 +2156,16 @@ static int __init uv_bau_init(void)
int cpus;
int vector;
cpumask_var_t *mask;
+ int ret;
if (!is_uv_system())
return 0;
for_each_possible_cpu(cur_cpu) {
mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
- zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
+ if (!zalloc_cpumask_var_node(
+ mask, GFP_KERNEL, cpu_to_node(cur_cpu)))
+ return -ENOMEM;
}
nuvhubs = uv_num_possible_blades();
@@ -2139,8 +2188,11 @@ static int __init uv_bau_init(void)
vector = UV_BAU_MESSAGE;
for_each_possible_blade(uvhub)
- if (uv_blade_nr_possible_cpus(uvhub))
- init_uvhub(uvhub, vector, uv_base_pnode);
+ if (uv_blade_nr_possible_cpus(uvhub)) {
+ ret = init_uvhub(uvhub, vector, uv_base_pnode);
+ if (ret)
+ return ret;
+ }
alloc_intr_gate(vector, uv_bau_message_intr1);
--
1.7.10.4
On Thu, 12 Jun 2014, Zhouyi Zhou wrote:
> According to Peter, Thomas, Joe and David's advices, try fixing all memory allocation
> failure handling in tlb_uv.c
>
This changelog still isn't complete, it needs to describe what specific
issues the patch is addressing: what kind of memory allocation failure?
What happens currently with such failure? What changes to that are you
making? Why is this a helpful change?
You'll also need a space between "[PATCH v2]" and "x86/tlb_uv:" in your
subject line.
> compiled in x86_64
> Signed-off-by: Zhouyi Zhou <[email protected]>
> ---
> arch/x86/platform/uv/tlb_uv.c | 122 +++++++++++++++++++++++++++++------------
> 1 file changed, 87 insertions(+), 35 deletions(-)
>
> diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
> index dfe605a..1a45dfa 100644
> --- a/arch/x86/platform/uv/tlb_uv.c
> +++ b/arch/x86/platform/uv/tlb_uv.c
> @@ -1680,7 +1680,7 @@ static int __init uv_ptc_init(void)
> /*
> * Initialize the sending side's sending buffers.
> */
> -static void activation_descriptor_init(int node, int pnode, int base_pnode)
> +static int activation_descriptor_init(int node, int pnode, int base_pnode)
> {
> int i;
> int cpu;
> @@ -1701,7 +1701,9 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
> */
> dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
> bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
> - BUG_ON(!bau_desc);
> + if (!bau_desc)
> + return -ENOMEM;
> +
>
> gpa = uv_gpa(bau_desc);
> n = uv_gpa_to_gnode(gpa);
This is done on bootstrap, so if we are really out of memory here then
what is the net result of returning -ENOMEM to the initcall handler? In
other words, what functionality do we lose and is it appropriate? Is
there any chance that we're in an inconsistent state?
You've also added a blank line for no reason.
> @@ -1756,6 +1758,8 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
> bcp = &per_cpu(bau_control, cpu);
> bcp->descriptor_base = bau_desc;
> }
> +
> + return 0;
> }
>
> /*
You've turned a non-failable function into a failable one but there's
nothing provided that suggests we can handle failure appropriately. It's
not sufficient to simply start returning errno when the result may be
inconsistent. How have you proven that returning -ENOMEM to the initcall
handler works?
> @@ -1764,7 +1768,7 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
> * - node is first node (kernel memory notion) on the uvhub
> * - pnode is the uvhub's physical identifier
> */
> -static void pq_init(int node, int pnode)
> +static int pq_init(int node, int pnode)
> {
> int cpu;
> size_t plsize;
> @@ -1776,12 +1780,16 @@ static void pq_init(int node, int pnode)
> unsigned long last;
> struct bau_pq_entry *pqp;
> struct bau_control *bcp;
> + int ret = 0;
>
> plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
> vp = kmalloc_node(plsize, GFP_KERNEL, node);
> - pqp = (struct bau_pq_entry *)vp;
> - BUG_ON(!pqp);
> + if (!vp) {
> + ret = -ENOMEM;
> + goto fail;
> + }
>
> + pqp = (struct bau_pq_entry *)vp;
> cp = (char *)pqp + 31;
> pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
>
> @@ -1808,29 +1816,40 @@ static void pq_init(int node, int pnode)
>
> /* in effect, all msg_type's are set to MSG_NOOP */
> memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
> +
> + return 0;
> +fail:
> + return ret;
> }
Same concern about pq_init(), it also makes init_uvhub() failable for the
first time and nothing is proving we can do that.
>
> /*
> * Initialization of each UV hub's structures
> */
> -static void __init init_uvhub(int uvhub, int vector, int base_pnode)
> +static int __init init_uvhub(int uvhub, int vector, int base_pnode)
> {
> int node;
> int pnode;
> unsigned long apicid;
> + int ret;
>
> node = uvhub_to_first_node(uvhub);
> pnode = uv_blade_to_pnode(uvhub);
>
> - activation_descriptor_init(node, pnode, base_pnode);
> + ret = activation_descriptor_init(node, pnode, base_pnode);
> + if (ret)
> + return ret;
>
> - pq_init(node, pnode);
> + ret = pq_init(node, pnode);
> + if (ret)
> + return ret;
And now I see this entire patch is wrong because you're returning failure
without cleaning up what activation_descriptor_init() did.
If you're going to suggest patches, please ensure that there is an actual
problem being resolved.