2018-12-07 04:17:29

by Dongli Zhang

[permalink] [raw]
Subject: [PATCH 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront

The xenstore 'ring-page-order' is used globally for each blkback queue and
therefore should be read from xenstore only once. However, it is obtained
in read_per_ring_refs() which might be called multiple times during the
initialization of each blkback queue.

If the blkfront is malicious and the 'ring-page-order' is set in different
value by blkfront every time before blkback reads it, this may end up at
the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" in
xen_blkif_disconnect() when frontend is destroyed.

This patch reworks connect_ring() to read xenstore 'ring-page-order' only
once.

Signed-off-by: Dongli Zhang <[email protected]>
---
drivers/block/xen-blkback/xenbus.c | 49 ++++++++++++++++++++++++--------------
1 file changed, 31 insertions(+), 18 deletions(-)

diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index a4bc74e..4a8ce20 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -919,14 +919,15 @@ static void connect(struct backend_info *be)
/*
* Each ring may have multi pages, depends on "ring-page-order".
*/
-static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
+static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir,
+ bool use_ring_page_order)
{
unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
struct pending_req *req, *n;
int err, i, j;
struct xen_blkif *blkif = ring->blkif;
struct xenbus_device *dev = blkif->be->dev;
- unsigned int ring_page_order, nr_grefs, evtchn;
+ unsigned int nr_grefs, evtchn;

err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
&evtchn);
@@ -936,28 +937,18 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
return err;
}

- err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
- &ring_page_order);
- if (err != 1) {
+ nr_grefs = blkif->nr_ring_pages;
+
+ if (!use_ring_page_order) {
err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", &ring_ref[0]);
if (err != 1) {
err = -EINVAL;
xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
return err;
}
- nr_grefs = 1;
} else {
unsigned int i;

- if (ring_page_order > xen_blkif_max_ring_order) {
- err = -EINVAL;
- xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d",
- dir, ring_page_order,
- xen_blkif_max_ring_order);
- return err;
- }
-
- nr_grefs = 1 << ring_page_order;
for (i = 0; i < nr_grefs; i++) {
char ring_ref_name[RINGREF_NAME_LEN];

@@ -972,7 +963,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
}
}
}
- blkif->nr_ring_pages = nr_grefs;

for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -1030,6 +1020,8 @@ static int connect_ring(struct backend_info *be)
size_t xspathsize;
const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
unsigned int requested_num_queues = 0;
+ bool use_ring_page_order = false;
+ unsigned int ring_page_order;

pr_debug("%s %s\n", __func__, dev->otherend);

@@ -1075,8 +1067,28 @@ static int connect_ring(struct backend_info *be)
be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
pers_grants ? "persistent grants" : "");

+ err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
+ &ring_page_order);
+
+ if (err != 1) {
+ be->blkif->nr_ring_pages = 1;
+ } else {
+ if (ring_page_order > xen_blkif_max_ring_order) {
+ err = -EINVAL;
+ xenbus_dev_fatal(dev, err,
+ "requested ring page order %d exceed max:%d",
+ ring_page_order,
+ xen_blkif_max_ring_order);
+ return err;
+ }
+
+ use_ring_page_order = true;
+ be->blkif->nr_ring_pages = 1 << ring_page_order;
+ }
+
if (be->blkif->nr_rings == 1)
- return read_per_ring_refs(&be->blkif->rings[0], dev->otherend);
+ return read_per_ring_refs(&be->blkif->rings[0], dev->otherend,
+ use_ring_page_order);
else {
xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
xspath = kmalloc(xspathsize, GFP_KERNEL);
@@ -1088,7 +1100,8 @@ static int connect_ring(struct backend_info *be)
for (i = 0; i < be->blkif->nr_rings; i++) {
memset(xspath, 0, xspathsize);
snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i);
- err = read_per_ring_refs(&be->blkif->rings[i], xspath);
+ err = read_per_ring_refs(&be->blkif->rings[i], xspath,
+ use_ring_page_order);
if (err) {
kfree(xspath);
return err;
--
2.7.4



2018-12-07 09:41:23

by Paul Durrant

[permalink] [raw]
Subject: RE: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront

> -----Original Message-----
> From: Xen-devel [mailto:[email protected]] On Behalf
> Of Dongli Zhang
> Sent: 07 December 2018 04:18
> To: [email protected]; [email protected]; linux-
> [email protected]
> Cc: [email protected]; Roger Pau Monne <[email protected]>;
> [email protected]
> Subject: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to
> avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
>
> The xenstore 'ring-page-order' is used globally for each blkback queue and
> therefore should be read from xenstore only once. However, it is obtained
> in read_per_ring_refs() which might be called multiple times during the
> initialization of each blkback queue.

That is certainly sub-optimal.

>
> If the blkfront is malicious and the 'ring-page-order' is set in different
> value by blkfront every time before blkback reads it, this may end up at
> the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" in
> xen_blkif_disconnect() when frontend is destroyed.

I can't actually see what useful function blkif->nr_ring_pages actually performs any more. Perhaps you could actually get rid of it?

>
> This patch reworks connect_ring() to read xenstore 'ring-page-order' only
> once.

That is certainly a good thing :-)

Paul

>
> Signed-off-by: Dongli Zhang <[email protected]>
> ---
> drivers/block/xen-blkback/xenbus.c | 49 ++++++++++++++++++++++++---------
> -----
> 1 file changed, 31 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-
> blkback/xenbus.c
> index a4bc74e..4a8ce20 100644
> --- a/drivers/block/xen-blkback/xenbus.c
> +++ b/drivers/block/xen-blkback/xenbus.c
> @@ -919,14 +919,15 @@ static void connect(struct backend_info *be)
> /*
> * Each ring may have multi pages, depends on "ring-page-order".
> */
> -static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
> *dir)
> +static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
> *dir,
> + bool use_ring_page_order)
> {
> unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
> struct pending_req *req, *n;
> int err, i, j;
> struct xen_blkif *blkif = ring->blkif;
> struct xenbus_device *dev = blkif->be->dev;
> - unsigned int ring_page_order, nr_grefs, evtchn;
> + unsigned int nr_grefs, evtchn;
>
> err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
> &evtchn);
> @@ -936,28 +937,18 @@ static int read_per_ring_refs(struct xen_blkif_ring
> *ring, const char *dir)
> return err;
> }
>
> - err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
> - &ring_page_order);
> - if (err != 1) {
> + nr_grefs = blkif->nr_ring_pages;
> +
> + if (!use_ring_page_order) {
> err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
> &ring_ref[0]);
> if (err != 1) {
> err = -EINVAL;
> xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
> return err;
> }
> - nr_grefs = 1;
> } else {
> unsigned int i;
>
> - if (ring_page_order > xen_blkif_max_ring_order) {
> - err = -EINVAL;
> - xenbus_dev_fatal(dev, err, "%s/request %d ring page
> order exceed max:%d",
> - dir, ring_page_order,
> - xen_blkif_max_ring_order);
> - return err;
> - }
> -
> - nr_grefs = 1 << ring_page_order;
> for (i = 0; i < nr_grefs; i++) {
> char ring_ref_name[RINGREF_NAME_LEN];
>
> @@ -972,7 +963,6 @@ static int read_per_ring_refs(struct xen_blkif_ring
> *ring, const char *dir)
> }
> }
> }
> - blkif->nr_ring_pages = nr_grefs;
>
> for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
> req = kzalloc(sizeof(*req), GFP_KERNEL);
> @@ -1030,6 +1020,8 @@ static int connect_ring(struct backend_info *be)
> size_t xspathsize;
> const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-
> NNN" */
> unsigned int requested_num_queues = 0;
> + bool use_ring_page_order = false;
> + unsigned int ring_page_order;
>
> pr_debug("%s %s\n", __func__, dev->otherend);
>
> @@ -1075,8 +1067,28 @@ static int connect_ring(struct backend_info *be)
> be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
> pers_grants ? "persistent grants" : "");
>
> + err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
> + &ring_page_order);
> +
> + if (err != 1) {
> + be->blkif->nr_ring_pages = 1;
> + } else {
> + if (ring_page_order > xen_blkif_max_ring_order) {
> + err = -EINVAL;
> + xenbus_dev_fatal(dev, err,
> + "requested ring page order %d exceed
> max:%d",
> + ring_page_order,
> + xen_blkif_max_ring_order);
> + return err;
> + }
> +
> + use_ring_page_order = true;
> + be->blkif->nr_ring_pages = 1 << ring_page_order;
> + }
> +
> if (be->blkif->nr_rings == 1)
> - return read_per_ring_refs(&be->blkif->rings[0], dev-
> >otherend);
> + return read_per_ring_refs(&be->blkif->rings[0], dev->otherend,
> + use_ring_page_order);
> else {
> xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
> xspath = kmalloc(xspathsize, GFP_KERNEL);
> @@ -1088,7 +1100,8 @@ static int connect_ring(struct backend_info *be)
> for (i = 0; i < be->blkif->nr_rings; i++) {
> memset(xspath, 0, xspathsize);
> snprintf(xspath, xspathsize, "%s/queue-%u", dev-
> >otherend, i);
> - err = read_per_ring_refs(&be->blkif->rings[i], xspath);
> + err = read_per_ring_refs(&be->blkif->rings[i], xspath,
> + use_ring_page_order);
> if (err) {
> kfree(xspath);
> return err;
> --
> 2.7.4
>
>
> _______________________________________________
> Xen-devel mailing list
> [email protected]
> https://lists.xenproject.org/mailman/listinfo/xen-devel

2018-12-07 15:11:47

by Dongli Zhang

[permalink] [raw]
Subject: Re: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront

Hi Paul,

On 12/07/2018 05:39 PM, Paul Durrant wrote:
>> -----Original Message-----
>> From: Xen-devel [mailto:[email protected]] On Behalf
>> Of Dongli Zhang
>> Sent: 07 December 2018 04:18
>> To: [email protected]; [email protected]; linux-
>> [email protected]
>> Cc: [email protected]; Roger Pau Monne <[email protected]>;
>> [email protected]
>> Subject: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to
>> avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
>>
>> The xenstore 'ring-page-order' is used globally for each blkback queue and
>> therefore should be read from xenstore only once. However, it is obtained
>> in read_per_ring_refs() which might be called multiple times during the
>> initialization of each blkback queue.
>
> That is certainly sub-optimal.
>
>>
>> If the blkfront is malicious and the 'ring-page-order' is set in different
>> value by blkfront every time before blkback reads it, this may end up at
>> the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" in
>> xen_blkif_disconnect() when frontend is destroyed.
>
> I can't actually see what useful function blkif->nr_ring_pages actually performs any more. Perhaps you could actually get rid of it?

How about we keep it? Other than reading from xenstore, it is the only place for
us to know the value from 'ring-page-order'.

This helps calculate the initialized number of elements on all
xen_blkif_ring->pending_free lists. That's how "WARN_ON(i !=
(XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" is used to double check if
there is no leak of elements reclaimed from all xen_blkif_ring->pending_free.

It helps vmcore analysis as well. Given blkif->nr_ring_pages, we would be able
to double check if the number of ring buffer slots are correct.

I could not see any drawback leaving blkif->nr_ring_pagesin the code.

Dongli Zhang

>
>>
>> This patch reworks connect_ring() to read xenstore 'ring-page-order' only
>> once.
>
> That is certainly a good thing :-)
>
> Paul
>
>>
>> Signed-off-by: Dongli Zhang <[email protected]>
>> ---
>> drivers/block/xen-blkback/xenbus.c | 49 ++++++++++++++++++++++++---------
>> -----
>> 1 file changed, 31 insertions(+), 18 deletions(-)
>>
>> diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-
>> blkback/xenbus.c
>> index a4bc74e..4a8ce20 100644
>> --- a/drivers/block/xen-blkback/xenbus.c
>> +++ b/drivers/block/xen-blkback/xenbus.c
>> @@ -919,14 +919,15 @@ static void connect(struct backend_info *be)
>> /*
>> * Each ring may have multi pages, depends on "ring-page-order".
>> */
>> -static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
>> *dir)
>> +static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
>> *dir,
>> + bool use_ring_page_order)
>> {
>> unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
>> struct pending_req *req, *n;
>> int err, i, j;
>> struct xen_blkif *blkif = ring->blkif;
>> struct xenbus_device *dev = blkif->be->dev;
>> - unsigned int ring_page_order, nr_grefs, evtchn;
>> + unsigned int nr_grefs, evtchn;
>>
>> err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
>> &evtchn);
>> @@ -936,28 +937,18 @@ static int read_per_ring_refs(struct xen_blkif_ring
>> *ring, const char *dir)
>> return err;
>> }
>>
>> - err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
>> - &ring_page_order);
>> - if (err != 1) {
>> + nr_grefs = blkif->nr_ring_pages;
>> +
>> + if (!use_ring_page_order) {
>> err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
>> &ring_ref[0]);
>> if (err != 1) {
>> err = -EINVAL;
>> xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
>> return err;
>> }
>> - nr_grefs = 1;
>> } else {
>> unsigned int i;
>>
>> - if (ring_page_order > xen_blkif_max_ring_order) {
>> - err = -EINVAL;
>> - xenbus_dev_fatal(dev, err, "%s/request %d ring page
>> order exceed max:%d",
>> - dir, ring_page_order,
>> - xen_blkif_max_ring_order);
>> - return err;
>> - }
>> -
>> - nr_grefs = 1 << ring_page_order;
>> for (i = 0; i < nr_grefs; i++) {
>> char ring_ref_name[RINGREF_NAME_LEN];
>>
>> @@ -972,7 +963,6 @@ static int read_per_ring_refs(struct xen_blkif_ring
>> *ring, const char *dir)
>> }
>> }
>> }
>> - blkif->nr_ring_pages = nr_grefs;
>>
>> for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
>> req = kzalloc(sizeof(*req), GFP_KERNEL);
>> @@ -1030,6 +1020,8 @@ static int connect_ring(struct backend_info *be)
>> size_t xspathsize;
>> const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-
>> NNN" */
>> unsigned int requested_num_queues = 0;
>> + bool use_ring_page_order = false;
>> + unsigned int ring_page_order;
>>
>> pr_debug("%s %s\n", __func__, dev->otherend);
>>
>> @@ -1075,8 +1067,28 @@ static int connect_ring(struct backend_info *be)
>> be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
>> pers_grants ? "persistent grants" : "");
>>
>> + err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
>> + &ring_page_order);
>> +
>> + if (err != 1) {
>> + be->blkif->nr_ring_pages = 1;
>> + } else {
>> + if (ring_page_order > xen_blkif_max_ring_order) {
>> + err = -EINVAL;
>> + xenbus_dev_fatal(dev, err,
>> + "requested ring page order %d exceed
>> max:%d",
>> + ring_page_order,
>> + xen_blkif_max_ring_order);
>> + return err;
>> + }
>> +
>> + use_ring_page_order = true;
>> + be->blkif->nr_ring_pages = 1 << ring_page_order;
>> + }
>> +
>> if (be->blkif->nr_rings == 1)
>> - return read_per_ring_refs(&be->blkif->rings[0], dev-
>>> otherend);
>> + return read_per_ring_refs(&be->blkif->rings[0], dev->otherend,
>> + use_ring_page_order);
>> else {
>> xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
>> xspath = kmalloc(xspathsize, GFP_KERNEL);
>> @@ -1088,7 +1100,8 @@ static int connect_ring(struct backend_info *be)
>> for (i = 0; i < be->blkif->nr_rings; i++) {
>> memset(xspath, 0, xspathsize);
>> snprintf(xspath, xspathsize, "%s/queue-%u", dev-
>>> otherend, i);
>> - err = read_per_ring_refs(&be->blkif->rings[i], xspath);
>> + err = read_per_ring_refs(&be->blkif->rings[i], xspath,
>> + use_ring_page_order);
>> if (err) {
>> kfree(xspath);
>> return err;
>> --
>> 2.7.4
>>
>>
>> _______________________________________________
>> Xen-devel mailing list
>> [email protected]
>> https://lists.xenproject.org/mailman/listinfo/xen-devel

2018-12-07 15:16:55

by Paul Durrant

[permalink] [raw]
Subject: RE: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront

> -----Original Message-----
> From: Dongli Zhang [mailto:[email protected]]
> Sent: 07 December 2018 15:10
> To: Paul Durrant <[email protected]>; [email protected];
> [email protected]; [email protected]
> Cc: [email protected]; Roger Pau Monne <[email protected]>;
> [email protected]
> Subject: Re: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to
> avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
>
> Hi Paul,
>
> On 12/07/2018 05:39 PM, Paul Durrant wrote:
> >> -----Original Message-----
> >> From: Xen-devel [mailto:[email protected]] On
> Behalf
> >> Of Dongli Zhang
> >> Sent: 07 December 2018 04:18
> >> To: [email protected]; [email protected];
> linux-
> >> [email protected]
> >> Cc: [email protected]; Roger Pau Monne <[email protected]>;
> >> [email protected]
> >> Subject: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to
> >> avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
> >>
> >> The xenstore 'ring-page-order' is used globally for each blkback queue
> and
> >> therefore should be read from xenstore only once. However, it is
> obtained
> >> in read_per_ring_refs() which might be called multiple times during the
> >> initialization of each blkback queue.
> >
> > That is certainly sub-optimal.
> >
> >>
> >> If the blkfront is malicious and the 'ring-page-order' is set in
> different
> >> value by blkfront every time before blkback reads it, this may end up
> at
> >> the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));"
> in
> >> xen_blkif_disconnect() when frontend is destroyed.
> >
> > I can't actually see what useful function blkif->nr_ring_pages actually
> performs any more. Perhaps you could actually get rid of it?
>
> How about we keep it? Other than reading from xenstore, it is the only
> place for
> us to know the value from 'ring-page-order'.
>
> This helps calculate the initialized number of elements on all
> xen_blkif_ring->pending_free lists. That's how "WARN_ON(i !=
> (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" is used to double
> check if
> there is no leak of elements reclaimed from all xen_blkif_ring-
> >pending_free.
>
> It helps vmcore analysis as well. Given blkif->nr_ring_pages, we would be
> able
> to double check if the number of ring buffer slots are correct.
>
> I could not see any drawback leaving blkif->nr_ring_pagesin the code.

No, there's no drawback apart from space, but apart from that cross-check and, as you say, core analysis it seems to have little value.

Paul

>
> Dongli Zhang
>
> >
> >>
> >> This patch reworks connect_ring() to read xenstore 'ring-page-order'
> only
> >> once.
> >
> > That is certainly a good thing :-)
> >
> > Paul
> >
> >>
> >> Signed-off-by: Dongli Zhang <[email protected]>
> >> ---
> >> drivers/block/xen-blkback/xenbus.c | 49 ++++++++++++++++++++++++------
> ---
> >> -----
> >> 1 file changed, 31 insertions(+), 18 deletions(-)
> >>
> >> diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-
> >> blkback/xenbus.c
> >> index a4bc74e..4a8ce20 100644
> >> --- a/drivers/block/xen-blkback/xenbus.c
> >> +++ b/drivers/block/xen-blkback/xenbus.c
> >> @@ -919,14 +919,15 @@ static void connect(struct backend_info *be)
> >> /*
> >> * Each ring may have multi pages, depends on "ring-page-order".
> >> */
> >> -static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
> >> *dir)
> >> +static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
> >> *dir,
> >> + bool use_ring_page_order)
> >> {
> >> unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
> >> struct pending_req *req, *n;
> >> int err, i, j;
> >> struct xen_blkif *blkif = ring->blkif;
> >> struct xenbus_device *dev = blkif->be->dev;
> >> - unsigned int ring_page_order, nr_grefs, evtchn;
> >> + unsigned int nr_grefs, evtchn;
> >>
> >> err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
> >> &evtchn);
> >> @@ -936,28 +937,18 @@ static int read_per_ring_refs(struct
> xen_blkif_ring
> >> *ring, const char *dir)
> >> return err;
> >> }
> >>
> >> - err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
> >> - &ring_page_order);
> >> - if (err != 1) {
> >> + nr_grefs = blkif->nr_ring_pages;
> >> +
> >> + if (!use_ring_page_order) {
> >> err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
> >> &ring_ref[0]);
> >> if (err != 1) {
> >> err = -EINVAL;
> >> xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
> >> return err;
> >> }
> >> - nr_grefs = 1;
> >> } else {
> >> unsigned int i;
> >>
> >> - if (ring_page_order > xen_blkif_max_ring_order) {
> >> - err = -EINVAL;
> >> - xenbus_dev_fatal(dev, err, "%s/request %d ring page
> >> order exceed max:%d",
> >> - dir, ring_page_order,
> >> - xen_blkif_max_ring_order);
> >> - return err;
> >> - }
> >> -
> >> - nr_grefs = 1 << ring_page_order;
> >> for (i = 0; i < nr_grefs; i++) {
> >> char ring_ref_name[RINGREF_NAME_LEN];
> >>
> >> @@ -972,7 +963,6 @@ static int read_per_ring_refs(struct xen_blkif_ring
> >> *ring, const char *dir)
> >> }
> >> }
> >> }
> >> - blkif->nr_ring_pages = nr_grefs;
> >>
> >> for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
> >> req = kzalloc(sizeof(*req), GFP_KERNEL);
> >> @@ -1030,6 +1020,8 @@ static int connect_ring(struct backend_info *be)
> >> size_t xspathsize;
> >> const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-
> >> NNN" */
> >> unsigned int requested_num_queues = 0;
> >> + bool use_ring_page_order = false;
> >> + unsigned int ring_page_order;
> >>
> >> pr_debug("%s %s\n", __func__, dev->otherend);
> >>
> >> @@ -1075,8 +1067,28 @@ static int connect_ring(struct backend_info *be)
> >> be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
> >> pers_grants ? "persistent grants" : "");
> >>
> >> + err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
> >> + &ring_page_order);
> >> +
> >> + if (err != 1) {
> >> + be->blkif->nr_ring_pages = 1;
> >> + } else {
> >> + if (ring_page_order > xen_blkif_max_ring_order) {
> >> + err = -EINVAL;
> >> + xenbus_dev_fatal(dev, err,
> >> + "requested ring page order %d exceed
> >> max:%d",
> >> + ring_page_order,
> >> + xen_blkif_max_ring_order);
> >> + return err;
> >> + }
> >> +
> >> + use_ring_page_order = true;
> >> + be->blkif->nr_ring_pages = 1 << ring_page_order;
> >> + }
> >> +
> >> if (be->blkif->nr_rings == 1)
> >> - return read_per_ring_refs(&be->blkif->rings[0], dev-
> >>> otherend);
> >> + return read_per_ring_refs(&be->blkif->rings[0], dev->otherend,
> >> + use_ring_page_order);
> >> else {
> >> xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
> >> xspath = kmalloc(xspathsize, GFP_KERNEL);
> >> @@ -1088,7 +1100,8 @@ static int connect_ring(struct backend_info *be)
> >> for (i = 0; i < be->blkif->nr_rings; i++) {
> >> memset(xspath, 0, xspathsize);
> >> snprintf(xspath, xspathsize, "%s/queue-%u", dev-
> >>> otherend, i);
> >> - err = read_per_ring_refs(&be->blkif->rings[i], xspath);
> >> + err = read_per_ring_refs(&be->blkif->rings[i], xspath,
> >> + use_ring_page_order);
> >> if (err) {
> >> kfree(xspath);
> >> return err;
> >> --
> >> 2.7.4
> >>
> >>
> >> _______________________________________________
> >> Xen-devel mailing list
> >> [email protected]
> >> https://lists.xenproject.org/mailman/listinfo/xen-devel

2018-12-07 15:52:48

by Dongli Zhang

[permalink] [raw]
Subject: Re: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront



On 12/07/2018 11:15 PM, Paul Durrant wrote:
>> -----Original Message-----
>> From: Dongli Zhang [mailto:[email protected]]
>> Sent: 07 December 2018 15:10
>> To: Paul Durrant <[email protected]>; [email protected];
>> [email protected]; [email protected]
>> Cc: [email protected]; Roger Pau Monne <[email protected]>;
>> [email protected]
>> Subject: Re: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to
>> avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
>>
>> Hi Paul,
>>
>> On 12/07/2018 05:39 PM, Paul Durrant wrote:
>>>> -----Original Message-----
>>>> From: Xen-devel [mailto:[email protected]] On
>> Behalf
>>>> Of Dongli Zhang
>>>> Sent: 07 December 2018 04:18
>>>> To: [email protected]; [email protected];
>> linux-
>>>> [email protected]
>>>> Cc: [email protected]; Roger Pau Monne <[email protected]>;
>>>> [email protected]
>>>> Subject: [Xen-devel] [PATCH 1/1] xen/blkback: rework connect_ring() to
>>>> avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
>>>>
>>>> The xenstore 'ring-page-order' is used globally for each blkback queue
>> and
>>>> therefore should be read from xenstore only once. However, it is
>> obtained
>>>> in read_per_ring_refs() which might be called multiple times during the
>>>> initialization of each blkback queue.
>>>
>>> That is certainly sub-optimal.
>>>
>>>>
>>>> If the blkfront is malicious and the 'ring-page-order' is set in
>> different
>>>> value by blkfront every time before blkback reads it, this may end up
>> at
>>>> the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));"
>> in
>>>> xen_blkif_disconnect() when frontend is destroyed.
>>>
>>> I can't actually see what useful function blkif->nr_ring_pages actually
>> performs any more. Perhaps you could actually get rid of it?
>>
>> How about we keep it? Other than reading from xenstore, it is the only
>> place for
>> us to know the value from 'ring-page-order'.
>>
>> This helps calculate the initialized number of elements on all
>> xen_blkif_ring->pending_free lists. That's how "WARN_ON(i !=
>> (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" is used to double
>> check if
>> there is no leak of elements reclaimed from all xen_blkif_ring-
>>> pending_free.
>>
>> It helps vmcore analysis as well. Given blkif->nr_ring_pages, we would be
>> able
>> to double check if the number of ring buffer slots are correct.
>>
>> I could not see any drawback leaving blkif->nr_ring_pagesin the code.
>
> No, there's no drawback apart from space, but apart from that cross-check and, as you say, core analysis it seems to have little value.
>
> Paul

I will not remove blkif->nr_ring_pages and leave the current patch waiting for
review.

Dongli Zhang



>
>>
>> Dongli Zhang
>>
>>>
>>>>
>>>> This patch reworks connect_ring() to read xenstore 'ring-page-order'
>> only
>>>> once.
>>>
>>> That is certainly a good thing :-)
>>>
>>> Paul
>>>
>>>>
>>>> Signed-off-by: Dongli Zhang <[email protected]>
>>>> ---
>>>> drivers/block/xen-blkback/xenbus.c | 49 ++++++++++++++++++++++++------
>> ---
>>>> -----
>>>> 1 file changed, 31 insertions(+), 18 deletions(-)
>>>>
>>>> diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-
>>>> blkback/xenbus.c
>>>> index a4bc74e..4a8ce20 100644
>>>> --- a/drivers/block/xen-blkback/xenbus.c
>>>> +++ b/drivers/block/xen-blkback/xenbus.c
>>>> @@ -919,14 +919,15 @@ static void connect(struct backend_info *be)
>>>> /*
>>>> * Each ring may have multi pages, depends on "ring-page-order".
>>>> */
>>>> -static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
>>>> *dir)
>>>> +static int read_per_ring_refs(struct xen_blkif_ring *ring, const char
>>>> *dir,
>>>> + bool use_ring_page_order)
>>>> {
>>>> unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
>>>> struct pending_req *req, *n;
>>>> int err, i, j;
>>>> struct xen_blkif *blkif = ring->blkif;
>>>> struct xenbus_device *dev = blkif->be->dev;
>>>> - unsigned int ring_page_order, nr_grefs, evtchn;
>>>> + unsigned int nr_grefs, evtchn;
>>>>
>>>> err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
>>>> &evtchn);
>>>> @@ -936,28 +937,18 @@ static int read_per_ring_refs(struct
>> xen_blkif_ring
>>>> *ring, const char *dir)
>>>> return err;
>>>> }
>>>>
>>>> - err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
>>>> - &ring_page_order);
>>>> - if (err != 1) {
>>>> + nr_grefs = blkif->nr_ring_pages;
>>>> +
>>>> + if (!use_ring_page_order) {
>>>> err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
>>>> &ring_ref[0]);
>>>> if (err != 1) {
>>>> err = -EINVAL;
>>>> xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
>>>> return err;
>>>> }
>>>> - nr_grefs = 1;
>>>> } else {
>>>> unsigned int i;
>>>>
>>>> - if (ring_page_order > xen_blkif_max_ring_order) {
>>>> - err = -EINVAL;
>>>> - xenbus_dev_fatal(dev, err, "%s/request %d ring page
>>>> order exceed max:%d",
>>>> - dir, ring_page_order,
>>>> - xen_blkif_max_ring_order);
>>>> - return err;
>>>> - }
>>>> -
>>>> - nr_grefs = 1 << ring_page_order;
>>>> for (i = 0; i < nr_grefs; i++) {
>>>> char ring_ref_name[RINGREF_NAME_LEN];
>>>>
>>>> @@ -972,7 +963,6 @@ static int read_per_ring_refs(struct xen_blkif_ring
>>>> *ring, const char *dir)
>>>> }
>>>> }
>>>> }
>>>> - blkif->nr_ring_pages = nr_grefs;
>>>>
>>>> for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
>>>> req = kzalloc(sizeof(*req), GFP_KERNEL);
>>>> @@ -1030,6 +1020,8 @@ static int connect_ring(struct backend_info *be)
>>>> size_t xspathsize;
>>>> const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-
>>>> NNN" */
>>>> unsigned int requested_num_queues = 0;
>>>> + bool use_ring_page_order = false;
>>>> + unsigned int ring_page_order;
>>>>
>>>> pr_debug("%s %s\n", __func__, dev->otherend);
>>>>
>>>> @@ -1075,8 +1067,28 @@ static int connect_ring(struct backend_info *be)
>>>> be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
>>>> pers_grants ? "persistent grants" : "");
>>>>
>>>> + err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
>>>> + &ring_page_order);
>>>> +
>>>> + if (err != 1) {
>>>> + be->blkif->nr_ring_pages = 1;
>>>> + } else {
>>>> + if (ring_page_order > xen_blkif_max_ring_order) {
>>>> + err = -EINVAL;
>>>> + xenbus_dev_fatal(dev, err,
>>>> + "requested ring page order %d exceed
>>>> max:%d",
>>>> + ring_page_order,
>>>> + xen_blkif_max_ring_order);
>>>> + return err;
>>>> + }
>>>> +
>>>> + use_ring_page_order = true;
>>>> + be->blkif->nr_ring_pages = 1 << ring_page_order;
>>>> + }
>>>> +
>>>> if (be->blkif->nr_rings == 1)
>>>> - return read_per_ring_refs(&be->blkif->rings[0], dev-
>>>>> otherend);
>>>> + return read_per_ring_refs(&be->blkif->rings[0], dev->otherend,
>>>> + use_ring_page_order);
>>>> else {
>>>> xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
>>>> xspath = kmalloc(xspathsize, GFP_KERNEL);
>>>> @@ -1088,7 +1100,8 @@ static int connect_ring(struct backend_info *be)
>>>> for (i = 0; i < be->blkif->nr_rings; i++) {
>>>> memset(xspath, 0, xspathsize);
>>>> snprintf(xspath, xspathsize, "%s/queue-%u", dev-
>>>>> otherend, i);
>>>> - err = read_per_ring_refs(&be->blkif->rings[i], xspath);
>>>> + err = read_per_ring_refs(&be->blkif->rings[i], xspath,
>>>> + use_ring_page_order);
>>>> if (err) {
>>>> kfree(xspath);
>>>> return err;
>>>> --
>>>> 2.7.4
>>>>
>>>>
>>>> _______________________________________________
>>>> Xen-devel mailing list
>>>> [email protected]
>>>> https://lists.xenproject.org/mailman/listinfo/xen-devel

2018-12-10 16:22:25

by Roger Pau Monne

[permalink] [raw]
Subject: Re: [PATCH 1/1] xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront

On Fri, Dec 07, 2018 at 12:18:04PM +0800, Dongli Zhang wrote:
> The xenstore 'ring-page-order' is used globally for each blkback queue and
> therefore should be read from xenstore only once. However, it is obtained
> in read_per_ring_refs() which might be called multiple times during the
> initialization of each blkback queue.
>
> If the blkfront is malicious and the 'ring-page-order' is set in different
> value by blkfront every time before blkback reads it, this may end up at
> the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" in
> xen_blkif_disconnect() when frontend is destroyed.
>
> This patch reworks connect_ring() to read xenstore 'ring-page-order' only
> once.
>
> Signed-off-by: Dongli Zhang <[email protected]>
> ---
> drivers/block/xen-blkback/xenbus.c | 49 ++++++++++++++++++++++++--------------
> 1 file changed, 31 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
> index a4bc74e..4a8ce20 100644
> --- a/drivers/block/xen-blkback/xenbus.c
> +++ b/drivers/block/xen-blkback/xenbus.c
> @@ -919,14 +919,15 @@ static void connect(struct backend_info *be)
> /*
> * Each ring may have multi pages, depends on "ring-page-order".
> */
> -static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
> +static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir,
> + bool use_ring_page_order)

Why not change the order of read_per_ring_refs so that it tries to
fetch the grant references from "ring-refXX" first and then fallback
to reading the legacy grant reference if nr_ring_pages == 1?

This will avoid having to pass an extra parameter to it.

> {
> unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
> struct pending_req *req, *n;
> int err, i, j;
> struct xen_blkif *blkif = ring->blkif;
> struct xenbus_device *dev = blkif->be->dev;
> - unsigned int ring_page_order, nr_grefs, evtchn;
> + unsigned int nr_grefs, evtchn;
>
> err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
> &evtchn);
> @@ -936,28 +937,18 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
> return err;
> }
>
> - err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
> - &ring_page_order);
> - if (err != 1) {
> + nr_grefs = blkif->nr_ring_pages;
> +
> + if (!use_ring_page_order) {
> err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", &ring_ref[0]);
> if (err != 1) {
> err = -EINVAL;
> xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
> return err;
> }
> - nr_grefs = 1;
> } else {
> unsigned int i;
>
> - if (ring_page_order > xen_blkif_max_ring_order) {
> - err = -EINVAL;
> - xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d",
> - dir, ring_page_order,
> - xen_blkif_max_ring_order);
> - return err;
> - }
> -
> - nr_grefs = 1 << ring_page_order;
> for (i = 0; i < nr_grefs; i++) {
> char ring_ref_name[RINGREF_NAME_LEN];
>
> @@ -972,7 +963,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
> }
> }
> }
> - blkif->nr_ring_pages = nr_grefs;
>
> for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
> req = kzalloc(sizeof(*req), GFP_KERNEL);
> @@ -1030,6 +1020,8 @@ static int connect_ring(struct backend_info *be)
> size_t xspathsize;
> const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
> unsigned int requested_num_queues = 0;
> + bool use_ring_page_order = false;
> + unsigned int ring_page_order;
>
> pr_debug("%s %s\n", __func__, dev->otherend);
>
> @@ -1075,8 +1067,28 @@ static int connect_ring(struct backend_info *be)
> be->blkif->nr_rings, be->blkif->blk_protocol, protocol,
> pers_grants ? "persistent grants" : "");
>
> + err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
> + &ring_page_order);

You can use xenbus_read_unsigned IMO, which will simplify the logic
below a little bit.

Thanks, Roger.