2023-08-10 16:05:38

by Richard Weinberger

[permalink] [raw]
Subject: [PATCH 1/7] ubi: block: Refactor sg list processing for highmem

Currently sg_virt() is used while filling the sg list from LEB data.
This approach cannot work with highmem.

Refactor ubi_eba_read_leb_sg() to use kmap_atomic() for sg list
access.
Since kmap_atomic() disables preempt a bounce buffer is needed.
kmap_local_page() is not used to allow easy backporting of this patch
to older kernels.

The followup patches in this series will switch to kmap_sg()
and we can remove our own helper and the bounce buffer.

Cc: [email protected]
Fixes: 9ff08979e1742 ("UBI: Add initial support for scatter gather")
Reported-by: Stephan Wurm <[email protected]>
Signed-off-by: Richard Weinberger <[email protected]>
---
drivers/mtd/ubi/block.c | 11 ++---
drivers/mtd/ubi/eba.c | 95 ++++++++++++++++++++++++++++-------------
include/linux/mtd/ubi.h | 12 +++---
3 files changed, 76 insertions(+), 42 deletions(-)

diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 437c5b83ffe51..5b2e6c74ac5a8 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -193,13 +193,10 @@ static blk_status_t ubiblock_read(struct request *req)

blk_mq_start_request(req);

- /*
- * It is safe to ignore the return value of blk_rq_map_sg() because
- * the number of sg entries is limited to UBI_MAX_SG_COUNT
- * and ubi_read_sg() will check that limit.
- */
ubi_sgl_init(&pdu->usgl);
- blk_rq_map_sg(req->q, req, pdu->usgl.sg);
+ ret = blk_rq_map_sg(req->q, req, pdu->usgl.sg);
+ ubi_assert(ret > 0 && ret < UBI_MAX_SG_COUNT);
+ pdu->usgl.len = ret;

while (bytes_left) {
/*
@@ -212,7 +209,7 @@ static blk_status_t ubiblock_read(struct request *req)
ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
if (ret < 0)
break;
-
+ pdu->usgl.tot_offset += to_read;
bytes_left -= to_read;
to_read = bytes_left;
leb += 1;
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 655ff41863e2b..82c54bf7c2067 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/crc32.h>
#include <linux/err.h>
+#include <linux/highmem.h>
#include "ubi.h"

/* Number of physical eraseblocks reserved for atomic LEB change operation */
@@ -730,6 +731,44 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
return err;
}

+/*
+ * Basically a copy of scsi_kmap_atomic_sg().
+ * As long scsi_kmap_atomic_sg() is not part of lib/scatterlist.c have
+ * our own version to avoid a dependency on CONFIG_SCSI.
+ */
+static void *ubi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
+ size_t *offset, size_t *len)
+{
+ int i;
+ size_t sg_len = 0, len_complete = 0;
+ struct scatterlist *sg;
+ struct page *page;
+
+ for_each_sg(sgl, sg, sg_count, i) {
+ len_complete = sg_len; /* Complete sg-entries */
+ sg_len += sg->length;
+ if (sg_len > *offset)
+ break;
+ }
+
+ if (WARN_ON_ONCE(i == sg_count))
+ return NULL;
+
+ /* Offset starting from the beginning of first page in this sg-entry */
+ *offset = *offset - len_complete + sg->offset;
+
+ /* Assumption: contiguous pages can be accessed as "page + i" */
+ page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
+ *offset &= ~PAGE_MASK;
+
+ /* Bytes in this sg-entry from *offset to the end of the page */
+ sg_len = PAGE_SIZE - *offset;
+ if (*len > sg_len)
+ *len = sg_len;
+
+ return kmap_atomic(page);
+}
+
/**
* ubi_eba_read_leb_sg - read data into a scatter gather list.
* @ubi: UBI device description object
@@ -748,40 +787,38 @@ int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
struct ubi_sgl *sgl, int lnum, int offset, int len,
int check)
{
- int to_read;
- int ret;
- struct scatterlist *sg;
+ size_t map_len, map_offset, cur_offset;
+ int ret, to_read = len;
+ char *bounce_buf;

- for (;;) {
- ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
- sg = &sgl->sg[sgl->list_pos];
- if (len < sg->length - sgl->page_pos)
- to_read = len;
- else
- to_read = sg->length - sgl->page_pos;
-
- ret = ubi_eba_read_leb(ubi, vol, lnum,
- sg_virt(sg) + sgl->page_pos, offset,
- to_read, check);
- if (ret < 0)
- return ret;
-
- offset += to_read;
- len -= to_read;
- if (!len) {
- sgl->page_pos += to_read;
- if (sgl->page_pos == sg->length) {
- sgl->list_pos++;
- sgl->page_pos = 0;
- }
+ bounce_buf = kvmalloc(to_read, GFP_KERNEL);
+ if (!bounce_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }

- break;
- }
+ ret = ubi_eba_read_leb(ubi, vol, lnum, bounce_buf, offset, to_read, check);
+ if (ret < 0)
+ goto out;
+
+ cur_offset = 0;
+ while (to_read > 0) {
+ char *dst;

- sgl->list_pos++;
- sgl->page_pos = 0;
+ map_len = to_read;
+ map_offset = cur_offset + sgl->tot_offset;
+
+ dst = ubi_kmap_atomic_sg(sgl->sg, sgl->len, &map_offset, &map_len);
+ memcpy(dst + map_offset, bounce_buf + cur_offset, map_len);
+ kunmap_atomic(dst);
+
+ cur_offset += map_len;
+ to_read -= map_len;
}

+ ret = 0;
+out:
+ kvfree(bounce_buf);
return ret;
}

diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index a529347fd75b2..521e0e8b3ede3 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -115,8 +115,8 @@ struct ubi_volume_info {

/**
* struct ubi_sgl - UBI scatter gather list data structure.
- * @list_pos: current position in @sg[]
- * @page_pos: current position in @sg[@list_pos]
+ * @list_len: number of elemtns in @sg[]
+ * @tot_offset: current position the scatter gather list
* @sg: the scatter gather list itself
*
* ubi_sgl is a wrapper around a scatter list which keeps track of the
@@ -124,8 +124,8 @@ struct ubi_volume_info {
* it can be used across multiple ubi_leb_read_sg() calls.
*/
struct ubi_sgl {
- int list_pos;
- int page_pos;
+ int len;
+ int tot_offset;
struct scatterlist sg[UBI_MAX_SG_COUNT];
};

@@ -138,8 +138,8 @@ struct ubi_sgl {
*/
static inline void ubi_sgl_init(struct ubi_sgl *usgl)
{
- usgl->list_pos = 0;
- usgl->page_pos = 0;
+ usgl->len = 0;
+ usgl->tot_offset = 0;
}

/**
--
2.35.3



2023-08-10 16:36:30

by Richard Weinberger

[permalink] [raw]
Subject: Re: [PATCH 1/7] ubi: block: Refactor sg list processing for highmem

----- Ursprüngliche Mail -----
> Von: "Christoph Hellwig" <[email protected]>
>> Refactor ubi_eba_read_leb_sg() to use kmap_atomic() for sg list
>> access.
>> Since kmap_atomic() disables preempt a bounce buffer is needed.
>> kmap_local_page() is not used to allow easy backporting of this patch
>> to older kernels.
>>
>> The followup patches in this series will switch to kmap_sg()
>> and we can remove our own helper and the bounce buffer.
>
> Please just use kmap_local and avoid the bounce buffering.

Patch 6 does this.

Thanks,
//richard

2023-08-10 16:39:00

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 1/7] ubi: block: Refactor sg list processing for highmem

On Thu, Aug 10, 2023 at 06:00:12PM +0200, Richard Weinberger wrote:
> Currently sg_virt() is used while filling the sg list from LEB data.
> This approach cannot work with highmem.
>
> Refactor ubi_eba_read_leb_sg() to use kmap_atomic() for sg list
> access.
> Since kmap_atomic() disables preempt a bounce buffer is needed.
> kmap_local_page() is not used to allow easy backporting of this patch
> to older kernels.
>
> The followup patches in this series will switch to kmap_sg()
> and we can remove our own helper and the bounce buffer.

Please just use kmap_local and avoid the bounce buffering.


2023-08-10 16:49:44

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 1/7] ubi: block: Refactor sg list processing for highmem

On Thu, Aug 10, 2023 at 06:15:36PM +0200, Richard Weinberger wrote:
> >> The followup patches in this series will switch to kmap_sg()
> >> and we can remove our own helper and the bounce buffer.
> >
> > Please just use kmap_local and avoid the bounce buffering.
>
> Patch 6 does this.

But why add the bounce buffering first if you can avoid it from the
very beginning by just using kmap_local instead of adding a new
caller for the deprecate kmap_atomic?

2023-08-10 21:12:15

by Richard Weinberger

[permalink] [raw]
Subject: Re: [PATCH 1/7] ubi: block: Refactor sg list processing for highmem

----- Ursprüngliche Mail -----
> Von: "Christoph Hellwig" <[email protected]>
> An: "richard" <[email protected]>
> On Thu, Aug 10, 2023 at 06:15:36PM +0200, Richard Weinberger wrote:
>> >> The followup patches in this series will switch to kmap_sg()
>> >> and we can remove our own helper and the bounce buffer.
>> >
>> > Please just use kmap_local and avoid the bounce buffering.
>>
>> Patch 6 does this.
>
> But why add the bounce buffering first if you can avoid it from the
> very beginning by just using kmap_local instead of adding a new
> caller for the deprecate kmap_atomic?

Because I want this fix also in all stable trees. kmap_local() is rather
new. When back porting patch 1/7, bounce buffers and kmap_atomic()
are needed anyway.
By doing this in patch 1/7 I avoid backport troubles and keep the
delta between upstream and stable trees minimal.

Thanks,
//richard

2023-08-11 08:52:28

by Richard Weinberger

[permalink] [raw]
Subject: Re: [PATCH 1/7] ubi: block: Refactor sg list processing for highmem

----- Ursprüngliche Mail -----
> Von: "Christoph Hellwig" <[email protected]>
> On Thu, Aug 10, 2023 at 09:54:46PM +0200, Richard Weinberger wrote:
>> > But why add the bounce buffering first if you can avoid it from the
>> > very beginning by just using kmap_local instead of adding a new
>> > caller for the deprecate kmap_atomic?
>>
>> Because I want this fix also in all stable trees. kmap_local() is rather
>> new. When back porting patch 1/7, bounce buffers and kmap_atomic()
>> are needed anyway.
>> By doing this in patch 1/7 I avoid backport troubles and keep the
>> delta between upstream and stable trees minimal.
>
> Just use plain kmap for the historic backports.

Hm, yes. For UBIblock kmap should be too expensive.

Thanks,
//richard

2023-08-11 09:26:27

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 1/7] ubi: block: Refactor sg list processing for highmem

On Fri, Aug 11, 2023 at 10:34:52AM +0200, Richard Weinberger wrote:
> ----- Urspr?ngliche Mail -----
> > Von: "Christoph Hellwig" <[email protected]>
> > On Thu, Aug 10, 2023 at 09:54:46PM +0200, Richard Weinberger wrote:
> >> > But why add the bounce buffering first if you can avoid it from the
> >> > very beginning by just using kmap_local instead of adding a new
> >> > caller for the deprecate kmap_atomic?
> >>
> >> Because I want this fix also in all stable trees. kmap_local() is rather
> >> new. When back porting patch 1/7, bounce buffers and kmap_atomic()
> >> are needed anyway.
> >> By doing this in patch 1/7 I avoid backport troubles and keep the
> >> delta between upstream and stable trees minimal.
> >
> > Just use plain kmap for the historic backports.
>
> Hm, yes. For UBIblock kmap should be too expensive.

? kmap is a no-op for !highmem. And it will always be way cheaper
than bounce buffering for the case where you are actually fed highmem
pages.

2023-08-11 09:32:50

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH 1/7] ubi: block: Refactor sg list processing for highmem

On Thu, Aug 10, 2023 at 09:54:46PM +0200, Richard Weinberger wrote:
> > But why add the bounce buffering first if you can avoid it from the
> > very beginning by just using kmap_local instead of adding a new
> > caller for the deprecate kmap_atomic?
>
> Because I want this fix also in all stable trees. kmap_local() is rather
> new. When back porting patch 1/7, bounce buffers and kmap_atomic()
> are needed anyway.
> By doing this in patch 1/7 I avoid backport troubles and keep the
> delta between upstream and stable trees minimal.

Just use plain kmap for the historic backports.