Harden the split buffer detachment path by adding boundary checking. Note
that when this fails we may fail to unmap some swiotlb mapping, which could
result in a leak and a DOS. But that's acceptable because an malicious host
can DOS us anyways.
Signed-off-by: Andi Kleen <[email protected]>
---
drivers/virtio/virtio_ring.c | 25 +++++++++++++++++++++----
1 file changed, 21 insertions(+), 4 deletions(-)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index d37ff5a0ff58..1e9aa1e95e1b 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -651,12 +651,19 @@ static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
return needs_kick;
}
-static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
- void **ctx)
+static int detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
+ void **ctx)
{
unsigned int i, j;
__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
+ /* We'll leak DMA mappings when this happens, but nothing
+ * can be done about that. In the worst case the host
+ * could DOS us, but it can of course do that anyways.
+ */
+ if (!inside_split_ring(vq, head))
+ return -EIO;
+
/* Clear data ptr. */
vq->split.desc_state[head].data = NULL;
@@ -666,6 +673,8 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
while (vq->split.vring.desc[i].flags & nextflag) {
vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
+ if (!inside_split_ring(vq, i))
+ return -EIO;
vq->vq.num_free++;
}
@@ -684,7 +693,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
/* Free the indirect table, if any, now that it's unmapped. */
if (!indir_desc)
- return;
+ return 0;
len = virtio32_to_cpu(vq->vq.vdev,
vq->split.vring.desc[head].len);
@@ -701,6 +710,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
} else if (ctx) {
*ctx = vq->split.desc_state[head].indir_desc;
}
+ return 0;
}
static inline bool more_used_split(const struct vring_virtqueue *vq)
@@ -717,6 +727,7 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
void *ret;
unsigned int i;
u16 last_used;
+ int err;
START_USE(vq);
@@ -751,7 +762,12 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
/* detach_buf_split clears data, so grab it now. */
ret = vq->split.desc_state[i].data;
- detach_buf_split(vq, i, ctx);
+ err = detach_buf_split(vq, i, ctx);
+ if (err) {
+ END_USE(vq);
+ return NULL;
+ }
+
vq->last_used_idx++;
/* If we expect an interrupt for the next entry, tell host
* by writing event index and flush out the write before
@@ -863,6 +879,7 @@ static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
/* detach_buf_split clears data, so grab it now. */
buf = vq->split.desc_state[i].data;
detach_buf_split(vq, i, NULL);
+ /* Don't need to check for error because nothing is returned */
vq->split.avail_idx_shadow--;
vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
vq->split.avail_idx_shadow);
--
2.25.4
在 2021/6/3 上午8:41, Andi Kleen 写道:
> Harden the split buffer detachment path by adding boundary checking. Note
> that when this fails we may fail to unmap some swiotlb mapping, which could
> result in a leak and a DOS. But that's acceptable because an malicious host
> can DOS us anyways.
>
> Signed-off-by: Andi Kleen <[email protected]>
> ---
> drivers/virtio/virtio_ring.c | 25 +++++++++++++++++++++----
> 1 file changed, 21 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> index d37ff5a0ff58..1e9aa1e95e1b 100644
> --- a/drivers/virtio/virtio_ring.c
> +++ b/drivers/virtio/virtio_ring.c
> @@ -651,12 +651,19 @@ static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
> return needs_kick;
> }
>
> -static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
> - void **ctx)
> +static int detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
> + void **ctx)
> {
> unsigned int i, j;
> __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
>
> + /* We'll leak DMA mappings when this happens, but nothing
> + * can be done about that. In the worst case the host
> + * could DOS us, but it can of course do that anyways.
> + */
> + if (!inside_split_ring(vq, head))
> + return -EIO;
I think the caller have already did this for us with even more check on
the token (virtqueue_get_buf_ctx_split()):
if (unlikely(i >= vq->split.vring.num)) {
BAD_RING(vq, "id %u out of range\n", i);
return NULL;
}
if (unlikely(!vq->split.desc_state[i].data)) {
BAD_RING(vq, "id %u is not a head!\n", i);
return NULL;
}
> +
> /* Clear data ptr. */
> vq->split.desc_state[head].data = NULL;
>
> @@ -666,6 +673,8 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
> while (vq->split.vring.desc[i].flags & nextflag) {
> vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
> i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
> + if (!inside_split_ring(vq, i))
> + return -EIO;
Similarly, if we don't depend on the metadata stored in the descriptor,
we don't need this check.
> vq->vq.num_free++;
> }
>
> @@ -684,7 +693,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
>
> /* Free the indirect table, if any, now that it's unmapped. */
> if (!indir_desc)
> - return;
> + return 0;
>
> len = virtio32_to_cpu(vq->vq.vdev,
> vq->split.vring.desc[head].len);
> @@ -701,6 +710,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
> } else if (ctx) {
> *ctx = vq->split.desc_state[head].indir_desc;
> }
> + return 0;
> }
>
> static inline bool more_used_split(const struct vring_virtqueue *vq)
> @@ -717,6 +727,7 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
> void *ret;
> unsigned int i;
> u16 last_used;
> + int err;
>
> START_USE(vq);
>
> @@ -751,7 +762,12 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
>
> /* detach_buf_split clears data, so grab it now. */
> ret = vq->split.desc_state[i].data;
> - detach_buf_split(vq, i, ctx);
> + err = detach_buf_split(vq, i, ctx);
> + if (err) {
> + END_USE(vq);
This reminds me that we don't use END_USE() after BAD_RING() which
should be fixed.
Thanks
> + return NULL;
> + }
> +
> vq->last_used_idx++;
> /* If we expect an interrupt for the next entry, tell host
> * by writing event index and flush out the write before
> @@ -863,6 +879,7 @@ static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
> /* detach_buf_split clears data, so grab it now. */
> buf = vq->split.desc_state[i].data;
> detach_buf_split(vq, i, NULL);
> + /* Don't need to check for error because nothing is returned */
> vq->split.avail_idx_shadow--;
> vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
> vq->split.avail_idx_shadow);