The problem is that we call this with a spin lock held.
The call tree is:
pvcalls_front_accept() holds bedata->socket_lock.
-> create_active()
-> __get_free_pages() uses GFP_KERNEL
The create_active() function is only called from pvcalls_front_accept()
with a spin_lock held, The allocation is not allowed to sleep and
GFP_KERNEL is not sufficient.
This issue was detected by using the Coccinelle software.
v2: Add a function doing the allocations which is called
outside the lock and passing the allocated data to
create_active().
v3: Use the matching deallocators i.e., free_page()
and free_pages(), respectively.
v4: It would be better to pre-populate map (struct sock_mapping),
rather than introducing one more new struct.
v5: Since allocating the data outside of this call it should also
be freed outside, when create_active() fails.
Move kzalloc(sizeof(*map2), GFP_ATOMIC) outside spinlock and
use GFP_KERNEL instead.
Suggested-by: Juergen Gross <[email protected]>
Suggested-by: Boris Ostrovsky <[email protected]>
Suggested-by: Stefano Stabellini <[email protected]>
Signed-off-by: Wen Yang <[email protected]>
CC: Julia Lawall <[email protected]>
CC: Boris Ostrovsky <[email protected]>
CC: Juergen Gross <[email protected]>
CC: Stefano Stabellini <[email protected]>
CC: [email protected]
CC: [email protected]
---
drivers/xen/pvcalls-front.c | 83 +++++++++++++++++++++++++++----------
1 file changed, 61 insertions(+), 22 deletions(-)
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 77224d8f3e6f..5fd764a7b879 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -335,6 +335,41 @@ int pvcalls_front_socket(struct socket *sock)
return ret;
}
+static void free_active_ring(struct sock_mapping *map)
+{
+ free_pages((unsigned long)map->active.data.in,
+ map->active.ring->ring_order);
+ free_page((unsigned long)map->active.ring);
+}
+
+static int alloc_active_ring(struct sock_mapping *map)
+{
+ void *bytes;
+
+ map->active.ring = NULL;
+ map->active.data.in = NULL;
+ map->active.ring = (struct pvcalls_data_intf *)
+ get_zeroed_page(GFP_KERNEL);
+ if (!map->active.ring)
+ goto out;
+
+ map->active.ring->ring_order = PVCALLS_RING_ORDER;
+ bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ PVCALLS_RING_ORDER);
+ if (!bytes)
+ goto out;
+
+ map->active.data.in = bytes;
+ map->active.data.out = bytes +
+ XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+
+ return 0;
+
+out:
+ free_active_ring(map);
+ return -ENOMEM;
+}
+
static int create_active(struct sock_mapping *map, int *evtchn)
{
void *bytes;
@@ -343,15 +378,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
*evtchn = -1;
init_waitqueue_head(&map->active.inflight_conn_req);
- map->active.ring = (struct pvcalls_data_intf *)
- __get_free_page(GFP_KERNEL | __GFP_ZERO);
- if (map->active.ring == NULL)
- goto out_error;
- map->active.ring->ring_order = PVCALLS_RING_ORDER;
- bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- PVCALLS_RING_ORDER);
- if (bytes == NULL)
- goto out_error;
+ bytes = map->active.data.in;
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
map->active.ring->ref[i] = gnttab_grant_foreign_access(
pvcalls_front_dev->otherend_id,
@@ -361,10 +388,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
pvcalls_front_dev->otherend_id,
pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
- map->active.data.in = bytes;
- map->active.data.out = bytes +
- XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
-
ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
if (ret)
goto out_error;
@@ -385,8 +408,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
out_error:
if (*evtchn >= 0)
xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
- free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
- free_page((unsigned long)map->active.ring);
return ret;
}
@@ -406,17 +427,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+ ret = alloc_active_ring(map);
+ if (ret < 0) {
+ pvcalls_exit_sock(sock);
+ return ret;
+ }
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
+ free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
ret = create_active(map, &evtchn);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
+ free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
@@ -780,25 +808,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
}
}
- spin_lock(&bedata->socket_lock);
- ret = get_request(bedata, &req_id);
- if (ret < 0) {
+ map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
+ if (map2 == NULL) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
- spin_unlock(&bedata->socket_lock);
+ pvcalls_exit_sock(sock);
+ return -ENOMEM;
+ }
+ ret = alloc_active_ring(map2);
+ if (ret < 0) {
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags);
+ kfree(map2);
pvcalls_exit_sock(sock);
return ret;
}
- map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
- if (map2 == NULL) {
+ spin_lock(&bedata->socket_lock);
+ ret = get_request(bedata, &req_id);
+ if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
+ free_active_ring(map2);
+ kfree(map2);
pvcalls_exit_sock(sock);
- return -ENOMEM;
+ return ret;
}
+
ret = create_active(map2, &evtchn);
if (ret < 0) {
+ free_active_ring(map2);
kfree(map2);
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
--
2.19.1
On Tue, 4 Dec 2018, Wen Yang wrote:
> The problem is that we call this with a spin lock held.
> The call tree is:
> pvcalls_front_accept() holds bedata->socket_lock.
> -> create_active()
> -> __get_free_pages() uses GFP_KERNEL
>
> The create_active() function is only called from pvcalls_front_accept()
> with a spin_lock held, The allocation is not allowed to sleep and
> GFP_KERNEL is not sufficient.
>
> This issue was detected by using the Coccinelle software.
>
> v2: Add a function doing the allocations which is called
> outside the lock and passing the allocated data to
> create_active().
>
> v3: Use the matching deallocators i.e., free_page()
> and free_pages(), respectively.
>
> v4: It would be better to pre-populate map (struct sock_mapping),
> rather than introducing one more new struct.
>
> v5: Since allocating the data outside of this call it should also
> be freed outside, when create_active() fails.
> Move kzalloc(sizeof(*map2), GFP_ATOMIC) outside spinlock and
> use GFP_KERNEL instead.
>
> Suggested-by: Juergen Gross <[email protected]>
> Suggested-by: Boris Ostrovsky <[email protected]>
> Suggested-by: Stefano Stabellini <[email protected]>
> Signed-off-by: Wen Yang <[email protected]>
> CC: Julia Lawall <[email protected]>
> CC: Boris Ostrovsky <[email protected]>
> CC: Juergen Gross <[email protected]>
> CC: Stefano Stabellini <[email protected]>
> CC: [email protected]
> CC: [email protected]
> ---
> drivers/xen/pvcalls-front.c | 83 +++++++++++++++++++++++++++----------
> 1 file changed, 61 insertions(+), 22 deletions(-)
>
> diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
> index 77224d8f3e6f..5fd764a7b879 100644
> --- a/drivers/xen/pvcalls-front.c
> +++ b/drivers/xen/pvcalls-front.c
> @@ -335,6 +335,41 @@ int pvcalls_front_socket(struct socket *sock)
> return ret;
> }
>
> +static void free_active_ring(struct sock_mapping *map)
> +{
> + free_pages((unsigned long)map->active.data.in,
> + map->active.ring->ring_order);
> + free_page((unsigned long)map->active.ring);
> +}
> +
> +static int alloc_active_ring(struct sock_mapping *map)
> +{
> + void *bytes;
> +
> + map->active.ring = NULL;
This is superfluous
Aside from this:
Acked-by: Stefano Stabellini <[email protected]>
> + map->active.data.in = NULL;
> + map->active.ring = (struct pvcalls_data_intf *)
> + get_zeroed_page(GFP_KERNEL);
> + if (!map->active.ring)
> + goto out;
> +
> + map->active.ring->ring_order = PVCALLS_RING_ORDER;
> + bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
> + PVCALLS_RING_ORDER);
> + if (!bytes)
> + goto out;
> +
> + map->active.data.in = bytes;
> + map->active.data.out = bytes +
> + XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
> +
> + return 0;
> +
> +out:
> + free_active_ring(map);
> + return -ENOMEM;
> +}
> +
> static int create_active(struct sock_mapping *map, int *evtchn)
> {
> void *bytes;
> @@ -343,15 +378,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
> *evtchn = -1;
> init_waitqueue_head(&map->active.inflight_conn_req);
>
> - map->active.ring = (struct pvcalls_data_intf *)
> - __get_free_page(GFP_KERNEL | __GFP_ZERO);
> - if (map->active.ring == NULL)
> - goto out_error;
> - map->active.ring->ring_order = PVCALLS_RING_ORDER;
> - bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
> - PVCALLS_RING_ORDER);
> - if (bytes == NULL)
> - goto out_error;
> + bytes = map->active.data.in;
> for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
> map->active.ring->ref[i] = gnttab_grant_foreign_access(
> pvcalls_front_dev->otherend_id,
> @@ -361,10 +388,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
> pvcalls_front_dev->otherend_id,
> pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
>
> - map->active.data.in = bytes;
> - map->active.data.out = bytes +
> - XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
> -
> ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
> if (ret)
> goto out_error;
> @@ -385,8 +408,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
> out_error:
> if (*evtchn >= 0)
> xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
> - free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
> - free_page((unsigned long)map->active.ring);
> return ret;
> }
>
> @@ -406,17 +427,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
> return PTR_ERR(map);
>
> bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
> + ret = alloc_active_ring(map);
> + if (ret < 0) {
> + pvcalls_exit_sock(sock);
> + return ret;
> + }
>
> spin_lock(&bedata->socket_lock);
> ret = get_request(bedata, &req_id);
> if (ret < 0) {
> spin_unlock(&bedata->socket_lock);
> + free_active_ring(map);
> pvcalls_exit_sock(sock);
> return ret;
> }
> ret = create_active(map, &evtchn);
> if (ret < 0) {
> spin_unlock(&bedata->socket_lock);
> + free_active_ring(map);
> pvcalls_exit_sock(sock);
> return ret;
> }
> @@ -780,25 +808,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
> }
> }
>
> - spin_lock(&bedata->socket_lock);
> - ret = get_request(bedata, &req_id);
> - if (ret < 0) {
> + map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
> + if (map2 == NULL) {
> clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
> (void *)&map->passive.flags);
> - spin_unlock(&bedata->socket_lock);
> + pvcalls_exit_sock(sock);
> + return -ENOMEM;
> + }
> + ret = alloc_active_ring(map2);
> + if (ret < 0) {
> + clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
> + (void *)&map->passive.flags);
> + kfree(map2);
> pvcalls_exit_sock(sock);
> return ret;
> }
> - map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
> - if (map2 == NULL) {
> + spin_lock(&bedata->socket_lock);
> + ret = get_request(bedata, &req_id);
> + if (ret < 0) {
> clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
> (void *)&map->passive.flags);
> spin_unlock(&bedata->socket_lock);
> + free_active_ring(map2);
> + kfree(map2);
> pvcalls_exit_sock(sock);
> - return -ENOMEM;
> + return ret;
> }
> +
> ret = create_active(map2, &evtchn);
> if (ret < 0) {
> + free_active_ring(map2);
> kfree(map2);
> clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
> (void *)&map->passive.flags);
> --
> 2.19.1
>