From: Leonid Ravich <[email protected]>
prepare for changing alloc size.
Signed-off-by: Leonid Ravich <[email protected]>
---
drivers/dma/ioat/dma.c | 14 ++++++++------
drivers/dma/ioat/dma.h | 10 ++++++----
drivers/dma/ioat/init.c | 2 +-
3 files changed, 15 insertions(+), 11 deletions(-)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 18c011e..1e0e6c1 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -332,8 +332,8 @@ static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
u8 *pos;
off_t offs;
- chunk = idx / IOAT_DESCS_PER_2M;
- idx &= (IOAT_DESCS_PER_2M - 1);
+ chunk = idx / IOAT_DESCS_PER_CHUNK;
+ idx &= (IOAT_DESCS_PER_CHUNK - 1);
offs = idx * IOAT_DESC_SZ;
pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
phys = ioat_chan->descs[chunk].hw + offs;
@@ -370,7 +370,8 @@ struct ioat_ring_ent **
if (!ring)
return NULL;
- ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
+ chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
+ ioat_chan->desc_chunks = chunks;
for (i = 0; i < chunks; i++) {
struct ioat_descs *descs = &ioat_chan->descs[i];
@@ -382,8 +383,9 @@ struct ioat_ring_ent **
for (idx = 0; idx < i; idx++) {
descs = &ioat_chan->descs[idx];
- dma_free_coherent(to_dev(ioat_chan), SZ_2M,
- descs->virt, descs->hw);
+ dma_free_coherent(to_dev(ioat_chan),
+ IOAT_CHUNK_SIZE,
+ descs->virt, descs->hw);
descs->virt = NULL;
descs->hw = 0;
}
@@ -404,7 +406,7 @@ struct ioat_ring_ent **
for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
dma_free_coherent(to_dev(ioat_chan),
- SZ_2M,
+ IOAT_CHUNK_SIZE,
ioat_chan->descs[idx].virt,
ioat_chan->descs[idx].hw);
ioat_chan->descs[idx].virt = NULL;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index b8e8e0b..535aba9 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -81,6 +81,11 @@ struct ioatdma_device {
u32 msixpba;
};
+#define IOAT_MAX_ORDER 16
+#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
+#define IOAT_CHUNK_SIZE (SZ_2M)
+#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE/IOAT_DESC_SZ)
+
struct ioat_descs {
void *virt;
dma_addr_t hw;
@@ -128,7 +133,7 @@ struct ioatdma_chan {
u16 produce;
struct ioat_ring_ent **ring;
spinlock_t prep_lock;
- struct ioat_descs descs[2];
+ struct ioat_descs descs[IOAT_MAX_DESCS/IOAT_DESCS_PER_CHUNK];
int desc_chunks;
int intr_coalesce;
int prev_intr_coalesce;
@@ -301,9 +306,6 @@ static inline bool is_ioat_bug(unsigned long err)
return !!err;
}
-#define IOAT_MAX_ORDER 16
-#define IOAT_MAX_DESCS 65536
-#define IOAT_DESCS_PER_2M 32768
static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
{
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 60e9afb..58d1356 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -651,7 +651,7 @@ static void ioat_free_chan_resources(struct dma_chan *c)
}
for (i = 0; i < ioat_chan->desc_chunks; i++) {
- dma_free_coherent(to_dev(ioat_chan), SZ_2M,
+ dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE,
ioat_chan->descs[i].virt,
ioat_chan->descs[i].hw);
ioat_chan->descs[i].virt = NULL;
--
1.9.3
From: Leonid Ravich <[email protected]>
current IOAT driver using big (2MB) allocations chunk for its descriptors
therefore each ioat dma engine need 2 such chunks
(64k entres in ring each entry 64B = 4MB)
requiring 2 * 2M * dmaengine contiguies memory chunk
might fail due to memory fragmention.
so we decresging chunk size and using more chunks.
Signed-off-by: Leonid Ravich <[email protected]>
---
drivers/dma/ioat/dma.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 535aba9..e9757bc 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -83,7 +83,7 @@ struct ioatdma_device {
#define IOAT_MAX_ORDER 16
#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
-#define IOAT_CHUNK_SIZE (SZ_2M)
+#define IOAT_CHUNK_SIZE (SZ_512K)
#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE/IOAT_DESC_SZ)
struct ioat_descs {
--
1.9.3
On 4/2/2020 2:27 AM, [email protected] wrote:
> From: Leonid Ravich <[email protected]>
>
> prepare for changing alloc size.
>
> Signed-off-by: Leonid Ravich <[email protected]>
I'm ok with the changes in the series. Were you able to test this on
hardware? A few formating nits below
> ---
> drivers/dma/ioat/dma.c | 14 ++++++++------
> drivers/dma/ioat/dma.h | 10 ++++++----
> drivers/dma/ioat/init.c | 2 +-
> 3 files changed, 15 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
> index 18c011e..1e0e6c1 100644
> --- a/drivers/dma/ioat/dma.c
> +++ b/drivers/dma/ioat/dma.c
> @@ -332,8 +332,8 @@ static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
> u8 *pos;
> off_t offs;
>
> - chunk = idx / IOAT_DESCS_PER_2M;
> - idx &= (IOAT_DESCS_PER_2M - 1);
> + chunk = idx / IOAT_DESCS_PER_CHUNK;
> + idx &= (IOAT_DESCS_PER_CHUNK - 1);
> offs = idx * IOAT_DESC_SZ;
> pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
> phys = ioat_chan->descs[chunk].hw + offs;
> @@ -370,7 +370,8 @@ struct ioat_ring_ent **
> if (!ring)
> return NULL;
>
> - ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
> + chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
> + ioat_chan->desc_chunks = chunks;
>
> for (i = 0; i < chunks; i++) {
> struct ioat_descs *descs = &ioat_chan->descs[i];
> @@ -382,8 +383,9 @@ struct ioat_ring_ent **
>
> for (idx = 0; idx < i; idx++) {
> descs = &ioat_chan->descs[idx];
> - dma_free_coherent(to_dev(ioat_chan), SZ_2M,
> - descs->virt, descs->hw);
> + dma_free_coherent(to_dev(ioat_chan),
> + IOAT_CHUNK_SIZE,
> + descs->virt, descs->hw);
> descs->virt = NULL;
> descs->hw = 0;
> }
> @@ -404,7 +406,7 @@ struct ioat_ring_ent **
>
> for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
> dma_free_coherent(to_dev(ioat_chan),
> - SZ_2M,
> + IOAT_CHUNK_SIZE,
> ioat_chan->descs[idx].virt,
> ioat_chan->descs[idx].hw);
> ioat_chan->descs[idx].virt = NULL;
> diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
> index b8e8e0b..535aba9 100644
> --- a/drivers/dma/ioat/dma.h
> +++ b/drivers/dma/ioat/dma.h
> @@ -81,6 +81,11 @@ struct ioatdma_device {
> u32 msixpba;
> };
>
> +#define IOAT_MAX_ORDER 16
> +#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
> +#define IOAT_CHUNK_SIZE (SZ_2M)
> +#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE/IOAT_DESC_SZ)
(IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
> +
> struct ioat_descs {
> void *virt;
> dma_addr_t hw;
> @@ -128,7 +133,7 @@ struct ioatdma_chan {
> u16 produce;
> struct ioat_ring_ent **ring;
> spinlock_t prep_lock;
> - struct ioat_descs descs[2];
> + struct ioat_descs descs[IOAT_MAX_DESCS/IOAT_DESCS_PER_CHUNK];
IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK
> int desc_chunks;
> int intr_coalesce;
> int prev_intr_coalesce;
> @@ -301,9 +306,6 @@ static inline bool is_ioat_bug(unsigned long err)
> return !!err;
> }
>
> -#define IOAT_MAX_ORDER 16
> -#define IOAT_MAX_DESCS 65536
> -#define IOAT_DESCS_PER_2M 32768
>
> static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
> {
> diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
> index 60e9afb..58d1356 100644
> --- a/drivers/dma/ioat/init.c
> +++ b/drivers/dma/ioat/init.c
> @@ -651,7 +651,7 @@ static void ioat_free_chan_resources(struct dma_chan *c)
> }
>
> for (i = 0; i < ioat_chan->desc_chunks; i++) {
> - dma_free_coherent(to_dev(ioat_chan), SZ_2M,
> + dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE,
> ioat_chan->descs[i].virt,
> ioat_chan->descs[i].hw);
> ioat_chan->descs[i].virt = NULL;
>
On 4/2/2020 2:27 AM, [email protected] wrote:
> From: Leonid Ravich <[email protected]>
>
> current IOAT driver using big (2MB) allocations chunk for its descriptors
> therefore each ioat dma engine need 2 such chunks
> (64k entres in ring each entry 64B = 4MB)
> requiring 2 * 2M * dmaengine contiguies memory chunk
> might fail due to memory fragmention.
>
> so we decresging chunk size and using more chunks.
decreasing
>
> Signed-off-by: Leonid Ravich <[email protected]>
Acked-by: Dave Jiang <[email protected]> if the two patches have been
tested on hw.
> ---
> drivers/dma/ioat/dma.h | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
> index 535aba9..e9757bc 100644
> --- a/drivers/dma/ioat/dma.h
> +++ b/drivers/dma/ioat/dma.h
> @@ -83,7 +83,7 @@ struct ioatdma_device {
>
> #define IOAT_MAX_ORDER 16
> #define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
> -#define IOAT_CHUNK_SIZE (SZ_2M)
> +#define IOAT_CHUNK_SIZE (SZ_512K)
> #define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE/IOAT_DESC_SZ)
>
> struct ioat_descs {
>
From: Leonid Ravich <[email protected]>
prepare for changing alloc size.
Acked-by: Dave Jiang <[email protected]>
Signed-off-by: Leonid Ravich <[email protected]>
---
drivers/dma/ioat/dma.c | 14 ++++++++------
drivers/dma/ioat/dma.h | 10 ++++++----
drivers/dma/ioat/init.c | 2 +-
3 files changed, 15 insertions(+), 11 deletions(-)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 18c011e..1e0e6c1 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -332,8 +332,8 @@ static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
u8 *pos;
off_t offs;
- chunk = idx / IOAT_DESCS_PER_2M;
- idx &= (IOAT_DESCS_PER_2M - 1);
+ chunk = idx / IOAT_DESCS_PER_CHUNK;
+ idx &= (IOAT_DESCS_PER_CHUNK - 1);
offs = idx * IOAT_DESC_SZ;
pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
phys = ioat_chan->descs[chunk].hw + offs;
@@ -370,7 +370,8 @@ struct ioat_ring_ent **
if (!ring)
return NULL;
- ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
+ chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
+ ioat_chan->desc_chunks = chunks;
for (i = 0; i < chunks; i++) {
struct ioat_descs *descs = &ioat_chan->descs[i];
@@ -382,8 +383,9 @@ struct ioat_ring_ent **
for (idx = 0; idx < i; idx++) {
descs = &ioat_chan->descs[idx];
- dma_free_coherent(to_dev(ioat_chan), SZ_2M,
- descs->virt, descs->hw);
+ dma_free_coherent(to_dev(ioat_chan),
+ IOAT_CHUNK_SIZE,
+ descs->virt, descs->hw);
descs->virt = NULL;
descs->hw = 0;
}
@@ -404,7 +406,7 @@ struct ioat_ring_ent **
for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
dma_free_coherent(to_dev(ioat_chan),
- SZ_2M,
+ IOAT_CHUNK_SIZE,
ioat_chan->descs[idx].virt,
ioat_chan->descs[idx].hw);
ioat_chan->descs[idx].virt = NULL;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index b8e8e0b..5216c6b 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -81,6 +81,11 @@ struct ioatdma_device {
u32 msixpba;
};
+#define IOAT_MAX_ORDER 16
+#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
+#define IOAT_CHUNK_SIZE (SZ_2M)
+#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
+
struct ioat_descs {
void *virt;
dma_addr_t hw;
@@ -128,7 +133,7 @@ struct ioatdma_chan {
u16 produce;
struct ioat_ring_ent **ring;
spinlock_t prep_lock;
- struct ioat_descs descs[2];
+ struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK];
int desc_chunks;
int intr_coalesce;
int prev_intr_coalesce;
@@ -301,9 +306,6 @@ static inline bool is_ioat_bug(unsigned long err)
return !!err;
}
-#define IOAT_MAX_ORDER 16
-#define IOAT_MAX_DESCS 65536
-#define IOAT_DESCS_PER_2M 32768
static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
{
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 60e9afb..58d1356 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -651,7 +651,7 @@ static void ioat_free_chan_resources(struct dma_chan *c)
}
for (i = 0; i < ioat_chan->desc_chunks; i++) {
- dma_free_coherent(to_dev(ioat_chan), SZ_2M,
+ dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE,
ioat_chan->descs[i].virt,
ioat_chan->descs[i].hw);
ioat_chan->descs[i].virt = NULL;
--
1.9.3
From: Leonid Ravich <[email protected]>
current IOAT driver using big (2MB) allocations chunk for its descriptors
therefore each ioat dma engine need 2 such chunks
(64k entres in ring each entry 64B = 4MB)
requiring 2 * 2M * dmaengine contiguies memory chunk
might fail due to memory fragmention.
so we decreasing chunk size and using more chunks.
Acked-by: Dave Jiang <[email protected]>
Signed-off-by: Leonid Ravich <[email protected]>
---
drivers/dma/ioat/dma.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 5216c6b..e6b622e 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -83,7 +83,7 @@ struct ioatdma_device {
#define IOAT_MAX_ORDER 16
#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
-#define IOAT_CHUNK_SIZE (SZ_2M)
+#define IOAT_CHUNK_SIZE (SZ_512K)
#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
struct ioat_descs {
--
1.9.3
On 4/2/2020 9:33 AM, [email protected] wrote:
> From: Leonid Ravich <[email protected]>
>
> prepare for changing alloc size.
>
> Acked-by: Dave Jiang <[email protected]>
> Signed-off-by: Leonid Ravich <[email protected]>
Hi Leonid, I haven't actually acked this patch yet, pending your answer
on if this has been tested on hardware. Thanks.
> ---
> drivers/dma/ioat/dma.c | 14 ++++++++------
> drivers/dma/ioat/dma.h | 10 ++++++----
> drivers/dma/ioat/init.c | 2 +-
> 3 files changed, 15 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
> index 18c011e..1e0e6c1 100644
> --- a/drivers/dma/ioat/dma.c
> +++ b/drivers/dma/ioat/dma.c
> @@ -332,8 +332,8 @@ static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
> u8 *pos;
> off_t offs;
>
> - chunk = idx / IOAT_DESCS_PER_2M;
> - idx &= (IOAT_DESCS_PER_2M - 1);
> + chunk = idx / IOAT_DESCS_PER_CHUNK;
> + idx &= (IOAT_DESCS_PER_CHUNK - 1);
> offs = idx * IOAT_DESC_SZ;
> pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
> phys = ioat_chan->descs[chunk].hw + offs;
> @@ -370,7 +370,8 @@ struct ioat_ring_ent **
> if (!ring)
> return NULL;
>
> - ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
> + chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
> + ioat_chan->desc_chunks = chunks;
>
> for (i = 0; i < chunks; i++) {
> struct ioat_descs *descs = &ioat_chan->descs[i];
> @@ -382,8 +383,9 @@ struct ioat_ring_ent **
>
> for (idx = 0; idx < i; idx++) {
> descs = &ioat_chan->descs[idx];
> - dma_free_coherent(to_dev(ioat_chan), SZ_2M,
> - descs->virt, descs->hw);
> + dma_free_coherent(to_dev(ioat_chan),
> + IOAT_CHUNK_SIZE,
> + descs->virt, descs->hw);
> descs->virt = NULL;
> descs->hw = 0;
> }
> @@ -404,7 +406,7 @@ struct ioat_ring_ent **
>
> for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
> dma_free_coherent(to_dev(ioat_chan),
> - SZ_2M,
> + IOAT_CHUNK_SIZE,
> ioat_chan->descs[idx].virt,
> ioat_chan->descs[idx].hw);
> ioat_chan->descs[idx].virt = NULL;
> diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
> index b8e8e0b..5216c6b 100644
> --- a/drivers/dma/ioat/dma.h
> +++ b/drivers/dma/ioat/dma.h
> @@ -81,6 +81,11 @@ struct ioatdma_device {
> u32 msixpba;
> };
>
> +#define IOAT_MAX_ORDER 16
> +#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
> +#define IOAT_CHUNK_SIZE (SZ_2M)
> +#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
> +
> struct ioat_descs {
> void *virt;
> dma_addr_t hw;
> @@ -128,7 +133,7 @@ struct ioatdma_chan {
> u16 produce;
> struct ioat_ring_ent **ring;
> spinlock_t prep_lock;
> - struct ioat_descs descs[2];
> + struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK];
> int desc_chunks;
> int intr_coalesce;
> int prev_intr_coalesce;
> @@ -301,9 +306,6 @@ static inline bool is_ioat_bug(unsigned long err)
> return !!err;
> }
>
> -#define IOAT_MAX_ORDER 16
> -#define IOAT_MAX_DESCS 65536
> -#define IOAT_DESCS_PER_2M 32768
>
> static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
> {
> diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
> index 60e9afb..58d1356 100644
> --- a/drivers/dma/ioat/init.c
> +++ b/drivers/dma/ioat/init.c
> @@ -651,7 +651,7 @@ static void ioat_free_chan_resources(struct dma_chan *c)
> }
>
> for (i = 0; i < ioat_chan->desc_chunks; i++) {
> - dma_free_coherent(to_dev(ioat_chan), SZ_2M,
> + dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE,
> ioat_chan->descs[i].virt,
> ioat_chan->descs[i].hw);
> ioat_chan->descs[i].virt = NULL;
>
On 4/2/2020 9:46 AM, Ravich, Leonid wrote:
> Sorry Dave ,
> sure was tested on Intel Sky Lake-E CBDMA
Thanks Leonid. Acked.
>
> -----Original Message-----
> From: Dave Jiang <[email protected]>
> Sent: Thursday, April 2, 2020 7:43 PM
> To: Ravich, Leonid; [email protected]
> Cc: [email protected]; Vinod Koul; Williams, Dan J; Greg Kroah-Hartman; Zavras, Alexios; Barabash, Alexander; Thomas Gleixner; Kate Stewart; Jilayne Lovejoy; Logan Gunthorpe; [email protected]
> Subject: Re: [PATCH v2 1/2] dmaengine: ioat: fixing chunk sizing macros dependency
>
>
> [EXTERNAL EMAIL]
>
>
>
> On 4/2/2020 9:33 AM, [email protected] wrote:
>> From: Leonid Ravich <[email protected]>
>>
>> prepare for changing alloc size.
>>
>> Acked-by: Dave Jiang <[email protected]>
>> Signed-off-by: Leonid Ravich <[email protected]>
>
> Hi Leonid, I haven't actually acked this patch yet, pending your answer on if this has been tested on hardware. Thanks.
>
>> ---
>> drivers/dma/ioat/dma.c | 14 ++++++++------
>> drivers/dma/ioat/dma.h | 10 ++++++----
>> drivers/dma/ioat/init.c | 2 +-
>> 3 files changed, 15 insertions(+), 11 deletions(-)
>>
>> diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index
>> 18c011e..1e0e6c1 100644
>> --- a/drivers/dma/ioat/dma.c
>> +++ b/drivers/dma/ioat/dma.c
>> @@ -332,8 +332,8 @@ static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
>> u8 *pos;
>> off_t offs;
>>
>> - chunk = idx / IOAT_DESCS_PER_2M;
>> - idx &= (IOAT_DESCS_PER_2M - 1);
>> + chunk = idx / IOAT_DESCS_PER_CHUNK;
>> + idx &= (IOAT_DESCS_PER_CHUNK - 1);
>> offs = idx * IOAT_DESC_SZ;
>> pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
>> phys = ioat_chan->descs[chunk].hw + offs; @@ -370,7 +370,8 @@
>> struct ioat_ring_ent **
>> if (!ring)
>> return NULL;
>>
>> - ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
>> + chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
>> + ioat_chan->desc_chunks = chunks;
>>
>> for (i = 0; i < chunks; i++) {
>> struct ioat_descs *descs = &ioat_chan->descs[i]; @@ -382,8 +383,9
>> @@ struct ioat_ring_ent **
>>
>> for (idx = 0; idx < i; idx++) {
>> descs = &ioat_chan->descs[idx];
>> - dma_free_coherent(to_dev(ioat_chan), SZ_2M,
>> - descs->virt, descs->hw);
>> + dma_free_coherent(to_dev(ioat_chan),
>> + IOAT_CHUNK_SIZE,
>> + descs->virt, descs->hw);
>> descs->virt = NULL;
>> descs->hw = 0;
>> }
>> @@ -404,7 +406,7 @@ struct ioat_ring_ent **
>>
>> for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
>> dma_free_coherent(to_dev(ioat_chan),
>> - SZ_2M,
>> + IOAT_CHUNK_SIZE,
>> ioat_chan->descs[idx].virt,
>> ioat_chan->descs[idx].hw);
>> ioat_chan->descs[idx].virt = NULL; diff --git
>> a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index
>> b8e8e0b..5216c6b 100644
>> --- a/drivers/dma/ioat/dma.h
>> +++ b/drivers/dma/ioat/dma.h
>> @@ -81,6 +81,11 @@ struct ioatdma_device {
>> u32 msixpba;
>> };
>>
>> +#define IOAT_MAX_ORDER 16
>> +#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER) #define IOAT_CHUNK_SIZE
>> +(SZ_2M) #define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
>> +
>> struct ioat_descs {
>> void *virt;
>> dma_addr_t hw;
>> @@ -128,7 +133,7 @@ struct ioatdma_chan {
>> u16 produce;
>> struct ioat_ring_ent **ring;
>> spinlock_t prep_lock;
>> - struct ioat_descs descs[2];
>> + struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK];
>> int desc_chunks;
>> int intr_coalesce;
>> int prev_intr_coalesce;
>> @@ -301,9 +306,6 @@ static inline bool is_ioat_bug(unsigned long err)
>> return !!err;
>> }
>>
>> -#define IOAT_MAX_ORDER 16
>> -#define IOAT_MAX_DESCS 65536
>> -#define IOAT_DESCS_PER_2M 32768
>>
>> static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
>> {
>> diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index
>> 60e9afb..58d1356 100644
>> --- a/drivers/dma/ioat/init.c
>> +++ b/drivers/dma/ioat/init.c
>> @@ -651,7 +651,7 @@ static void ioat_free_chan_resources(struct dma_chan *c)
>> }
>>
>> for (i = 0; i < ioat_chan->desc_chunks; i++) {
>> - dma_free_coherent(to_dev(ioat_chan), SZ_2M,
>> + dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE,
>> ioat_chan->descs[i].virt,
>> ioat_chan->descs[i].hw);
>> ioat_chan->descs[i].virt = NULL;
>>
Sorry Dave ,
sure was tested on Intel Sky Lake-E CBDMA
-----Original Message-----
From: Dave Jiang <[email protected]>
Sent: Thursday, April 2, 2020 7:43 PM
To: Ravich, Leonid; [email protected]
Cc: [email protected]; Vinod Koul; Williams, Dan J; Greg Kroah-Hartman; Zavras, Alexios; Barabash, Alexander; Thomas Gleixner; Kate Stewart; Jilayne Lovejoy; Logan Gunthorpe; [email protected]
Subject: Re: [PATCH v2 1/2] dmaengine: ioat: fixing chunk sizing macros dependency
[EXTERNAL EMAIL]
On 4/2/2020 9:33 AM, [email protected] wrote:
> From: Leonid Ravich <[email protected]>
>
> prepare for changing alloc size.
>
> Acked-by: Dave Jiang <[email protected]>
> Signed-off-by: Leonid Ravich <[email protected]>
Hi Leonid, I haven't actually acked this patch yet, pending your answer on if this has been tested on hardware. Thanks.
> ---
> drivers/dma/ioat/dma.c | 14 ++++++++------
> drivers/dma/ioat/dma.h | 10 ++++++----
> drivers/dma/ioat/init.c | 2 +-
> 3 files changed, 15 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index
> 18c011e..1e0e6c1 100644
> --- a/drivers/dma/ioat/dma.c
> +++ b/drivers/dma/ioat/dma.c
> @@ -332,8 +332,8 @@ static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
> u8 *pos;
> off_t offs;
>
> - chunk = idx / IOAT_DESCS_PER_2M;
> - idx &= (IOAT_DESCS_PER_2M - 1);
> + chunk = idx / IOAT_DESCS_PER_CHUNK;
> + idx &= (IOAT_DESCS_PER_CHUNK - 1);
> offs = idx * IOAT_DESC_SZ;
> pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
> phys = ioat_chan->descs[chunk].hw + offs; @@ -370,7 +370,8 @@
> struct ioat_ring_ent **
> if (!ring)
> return NULL;
>
> - ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
> + chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
> + ioat_chan->desc_chunks = chunks;
>
> for (i = 0; i < chunks; i++) {
> struct ioat_descs *descs = &ioat_chan->descs[i]; @@ -382,8 +383,9
> @@ struct ioat_ring_ent **
>
> for (idx = 0; idx < i; idx++) {
> descs = &ioat_chan->descs[idx];
> - dma_free_coherent(to_dev(ioat_chan), SZ_2M,
> - descs->virt, descs->hw);
> + dma_free_coherent(to_dev(ioat_chan),
> + IOAT_CHUNK_SIZE,
> + descs->virt, descs->hw);
> descs->virt = NULL;
> descs->hw = 0;
> }
> @@ -404,7 +406,7 @@ struct ioat_ring_ent **
>
> for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
> dma_free_coherent(to_dev(ioat_chan),
> - SZ_2M,
> + IOAT_CHUNK_SIZE,
> ioat_chan->descs[idx].virt,
> ioat_chan->descs[idx].hw);
> ioat_chan->descs[idx].virt = NULL; diff --git
> a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index
> b8e8e0b..5216c6b 100644
> --- a/drivers/dma/ioat/dma.h
> +++ b/drivers/dma/ioat/dma.h
> @@ -81,6 +81,11 @@ struct ioatdma_device {
> u32 msixpba;
> };
>
> +#define IOAT_MAX_ORDER 16
> +#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER) #define IOAT_CHUNK_SIZE
> +(SZ_2M) #define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
> +
> struct ioat_descs {
> void *virt;
> dma_addr_t hw;
> @@ -128,7 +133,7 @@ struct ioatdma_chan {
> u16 produce;
> struct ioat_ring_ent **ring;
> spinlock_t prep_lock;
> - struct ioat_descs descs[2];
> + struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK];
> int desc_chunks;
> int intr_coalesce;
> int prev_intr_coalesce;
> @@ -301,9 +306,6 @@ static inline bool is_ioat_bug(unsigned long err)
> return !!err;
> }
>
> -#define IOAT_MAX_ORDER 16
> -#define IOAT_MAX_DESCS 65536
> -#define IOAT_DESCS_PER_2M 32768
>
> static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
> {
> diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index
> 60e9afb..58d1356 100644
> --- a/drivers/dma/ioat/init.c
> +++ b/drivers/dma/ioat/init.c
> @@ -651,7 +651,7 @@ static void ioat_free_chan_resources(struct dma_chan *c)
> }
>
> for (i = 0; i < ioat_chan->desc_chunks; i++) {
> - dma_free_coherent(to_dev(ioat_chan), SZ_2M,
> + dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE,
> ioat_chan->descs[i].virt,
> ioat_chan->descs[i].hw);
> ioat_chan->descs[i].virt = NULL;
>
On 02-04-20, 19:33, [email protected] wrote:
> From: Leonid Ravich <[email protected]>
>
> prepare for changing alloc size.
This does not tell what the change is doing. A patch should describe the
change... pls explain the change is size here
>
> Acked-by: Dave Jiang <[email protected]>
> Signed-off-by: Leonid Ravich <[email protected]>
> ---
> drivers/dma/ioat/dma.c | 14 ++++++++------
> drivers/dma/ioat/dma.h | 10 ++++++----
> drivers/dma/ioat/init.c | 2 +-
> 3 files changed, 15 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
> index 18c011e..1e0e6c1 100644
> --- a/drivers/dma/ioat/dma.c
> +++ b/drivers/dma/ioat/dma.c
> @@ -332,8 +332,8 @@ static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
> u8 *pos;
> off_t offs;
>
> - chunk = idx / IOAT_DESCS_PER_2M;
> - idx &= (IOAT_DESCS_PER_2M - 1);
> + chunk = idx / IOAT_DESCS_PER_CHUNK;
> + idx &= (IOAT_DESCS_PER_CHUNK - 1);
> offs = idx * IOAT_DESC_SZ;
> pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
> phys = ioat_chan->descs[chunk].hw + offs;
> @@ -370,7 +370,8 @@ struct ioat_ring_ent **
> if (!ring)
> return NULL;
>
> - ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
> + chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
> + ioat_chan->desc_chunks = chunks;
>
> for (i = 0; i < chunks; i++) {
> struct ioat_descs *descs = &ioat_chan->descs[i];
> @@ -382,8 +383,9 @@ struct ioat_ring_ent **
>
> for (idx = 0; idx < i; idx++) {
> descs = &ioat_chan->descs[idx];
> - dma_free_coherent(to_dev(ioat_chan), SZ_2M,
> - descs->virt, descs->hw);
> + dma_free_coherent(to_dev(ioat_chan),
> + IOAT_CHUNK_SIZE,
> + descs->virt, descs->hw);
> descs->virt = NULL;
> descs->hw = 0;
> }
> @@ -404,7 +406,7 @@ struct ioat_ring_ent **
>
> for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
> dma_free_coherent(to_dev(ioat_chan),
> - SZ_2M,
> + IOAT_CHUNK_SIZE,
> ioat_chan->descs[idx].virt,
> ioat_chan->descs[idx].hw);
> ioat_chan->descs[idx].virt = NULL;
> diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
> index b8e8e0b..5216c6b 100644
> --- a/drivers/dma/ioat/dma.h
> +++ b/drivers/dma/ioat/dma.h
> @@ -81,6 +81,11 @@ struct ioatdma_device {
> u32 msixpba;
> };
>
> +#define IOAT_MAX_ORDER 16
> +#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
> +#define IOAT_CHUNK_SIZE (SZ_2M)
> +#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
> +
> struct ioat_descs {
> void *virt;
> dma_addr_t hw;
> @@ -128,7 +133,7 @@ struct ioatdma_chan {
> u16 produce;
> struct ioat_ring_ent **ring;
> spinlock_t prep_lock;
> - struct ioat_descs descs[2];
> + struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK];
> int desc_chunks;
> int intr_coalesce;
> int prev_intr_coalesce;
> @@ -301,9 +306,6 @@ static inline bool is_ioat_bug(unsigned long err)
> return !!err;
> }
>
> -#define IOAT_MAX_ORDER 16
> -#define IOAT_MAX_DESCS 65536
> -#define IOAT_DESCS_PER_2M 32768
>
> static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
> {
> diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
> index 60e9afb..58d1356 100644
> --- a/drivers/dma/ioat/init.c
> +++ b/drivers/dma/ioat/init.c
> @@ -651,7 +651,7 @@ static void ioat_free_chan_resources(struct dma_chan *c)
> }
>
> for (i = 0; i < ioat_chan->desc_chunks; i++) {
> - dma_free_coherent(to_dev(ioat_chan), SZ_2M,
> + dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE,
> ioat_chan->descs[i].virt,
> ioat_chan->descs[i].hw);
> ioat_chan->descs[i].virt = NULL;
> --
> 1.9.3
--
~Vinod
On 02-04-20, 19:33, [email protected] wrote:
> From: Leonid Ravich <[email protected]>
>
> current IOAT driver using big (2MB) allocations chunk for its descriptors
> therefore each ioat dma engine need 2 such chunks
> (64k entres in ring each entry 64B = 4MB)
> requiring 2 * 2M * dmaengine contiguies memory chunk
> might fail due to memory fragmention.
This is quite decent explanation :) pls use upto 72 chars to make it a
better read.
>
> so we decreasing chunk size and using more chunks.
>
> Acked-by: Dave Jiang <[email protected]>
> Signed-off-by: Leonid Ravich <[email protected]>
> ---
> drivers/dma/ioat/dma.h | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
> index 5216c6b..e6b622e 100644
> --- a/drivers/dma/ioat/dma.h
> +++ b/drivers/dma/ioat/dma.h
> @@ -83,7 +83,7 @@ struct ioatdma_device {
>
> #define IOAT_MAX_ORDER 16
> #define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
> -#define IOAT_CHUNK_SIZE (SZ_2M)
> +#define IOAT_CHUNK_SIZE (SZ_512K)
> #define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
>
> struct ioat_descs {
> --
> 1.9.3
--
~Vinod
From: Leonid Ravich <[email protected]>
changing macros which assumption is chunk size of 2M,
which can be other size
prepare for changing allocation chunk size.
Acked-by: Dave Jiang <[email protected]>
Signed-off-by: Leonid Ravich <[email protected]>
---
Changing in v3:
- Make the commit message more clearer.
drivers/dma/ioat/dma.c | 14 ++++++++------
drivers/dma/ioat/dma.h | 10 ++++++----
drivers/dma/ioat/init.c | 2 +-
3 files changed, 15 insertions(+), 11 deletions(-)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 18c011e..1e0e6c1 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -332,8 +332,8 @@ static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
u8 *pos;
off_t offs;
- chunk = idx / IOAT_DESCS_PER_2M;
- idx &= (IOAT_DESCS_PER_2M - 1);
+ chunk = idx / IOAT_DESCS_PER_CHUNK;
+ idx &= (IOAT_DESCS_PER_CHUNK - 1);
offs = idx * IOAT_DESC_SZ;
pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
phys = ioat_chan->descs[chunk].hw + offs;
@@ -370,7 +370,8 @@ struct ioat_ring_ent **
if (!ring)
return NULL;
- ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
+ chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
+ ioat_chan->desc_chunks = chunks;
for (i = 0; i < chunks; i++) {
struct ioat_descs *descs = &ioat_chan->descs[i];
@@ -382,8 +383,9 @@ struct ioat_ring_ent **
for (idx = 0; idx < i; idx++) {
descs = &ioat_chan->descs[idx];
- dma_free_coherent(to_dev(ioat_chan), SZ_2M,
- descs->virt, descs->hw);
+ dma_free_coherent(to_dev(ioat_chan),
+ IOAT_CHUNK_SIZE,
+ descs->virt, descs->hw);
descs->virt = NULL;
descs->hw = 0;
}
@@ -404,7 +406,7 @@ struct ioat_ring_ent **
for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
dma_free_coherent(to_dev(ioat_chan),
- SZ_2M,
+ IOAT_CHUNK_SIZE,
ioat_chan->descs[idx].virt,
ioat_chan->descs[idx].hw);
ioat_chan->descs[idx].virt = NULL;
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index b8e8e0b..5216c6b 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -81,6 +81,11 @@ struct ioatdma_device {
u32 msixpba;
};
+#define IOAT_MAX_ORDER 16
+#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
+#define IOAT_CHUNK_SIZE (SZ_2M)
+#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
+
struct ioat_descs {
void *virt;
dma_addr_t hw;
@@ -128,7 +133,7 @@ struct ioatdma_chan {
u16 produce;
struct ioat_ring_ent **ring;
spinlock_t prep_lock;
- struct ioat_descs descs[2];
+ struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK];
int desc_chunks;
int intr_coalesce;
int prev_intr_coalesce;
@@ -301,9 +306,6 @@ static inline bool is_ioat_bug(unsigned long err)
return !!err;
}
-#define IOAT_MAX_ORDER 16
-#define IOAT_MAX_DESCS 65536
-#define IOAT_DESCS_PER_2M 32768
static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
{
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 60e9afb..58d1356 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -651,7 +651,7 @@ static void ioat_free_chan_resources(struct dma_chan *c)
}
for (i = 0; i < ioat_chan->desc_chunks; i++) {
- dma_free_coherent(to_dev(ioat_chan), SZ_2M,
+ dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE,
ioat_chan->descs[i].virt,
ioat_chan->descs[i].hw);
ioat_chan->descs[i].virt = NULL;
--
1.9.3
From: Leonid Ravich <[email protected]>
requreing kmalloc of 2M high chance to fail in
fragmented memory.
IOAT ring requires 64k * 64B memory
which will be achived by 512k * 8 allocation
instead of 2M * 2.
Acked-by: Dave Jiang <[email protected]>
Signed-off-by: Leonid Ravich <[email protected]>
---
Changing in v3:
- Make the commit message more clearer.
drivers/dma/ioat/dma.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 5216c6b..e6b622e 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -83,7 +83,7 @@ struct ioatdma_device {
#define IOAT_MAX_ORDER 16
#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
-#define IOAT_CHUNK_SIZE (SZ_2M)
+#define IOAT_CHUNK_SIZE (SZ_512K)
#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
struct ioat_descs {
--
1.9.3
On 16-04-20, 20:06, [email protected] wrote:
> From: Leonid Ravich <[email protected]>
>
> changing macros which assumption is chunk size of 2M,
> which can be other size
> prepare for changing allocation chunk size.
Applied both, thanks
--
~Vinod
From: Leonid Ravich <[email protected]>
moving duplicate code from timeout error handling to common
function.
Acked-by: Dave Jiang <[email protected]>
Signed-off-by: Leonid Ravich <[email protected]>
---
drivers/dma/ioat/dma.c | 45 +++++++++++++++++++--------------------------
1 file changed, 19 insertions(+), 26 deletions(-)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1e0e6c1..da59b28 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -869,6 +869,23 @@ static void check_active(struct ioatdma_chan *ioat_chan)
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
}
+static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
+{
+ spin_lock_bh(&ioat_chan->prep_lock);
+ set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+
+ ioat_abort_descs(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Reset channel...\n");
+ ioat_reset_hw(ioat_chan);
+ dev_warn(to_dev(ioat_chan), "Restart channel...\n");
+ ioat_restart_channel(ioat_chan);
+
+ spin_lock_bh(&ioat_chan->prep_lock);
+ clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+}
+
void ioat_timer_event(struct timer_list *t)
{
struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
@@ -891,19 +908,7 @@ void ioat_timer_event(struct timer_list *t)
if (test_bit(IOAT_RUN, &ioat_chan->state)) {
spin_lock_bh(&ioat_chan->cleanup_lock);
- spin_lock_bh(&ioat_chan->prep_lock);
- set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
- spin_unlock_bh(&ioat_chan->prep_lock);
-
- ioat_abort_descs(ioat_chan);
- dev_warn(to_dev(ioat_chan), "Reset channel...\n");
- ioat_reset_hw(ioat_chan);
- dev_warn(to_dev(ioat_chan), "Restart channel...\n");
- ioat_restart_channel(ioat_chan);
-
- spin_lock_bh(&ioat_chan->prep_lock);
- clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
- spin_unlock_bh(&ioat_chan->prep_lock);
+ ioat_reboot_chan(ioat_chan);
spin_unlock_bh(&ioat_chan->cleanup_lock);
}
@@ -939,19 +944,7 @@ void ioat_timer_event(struct timer_list *t)
dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
ioat_ring_active(ioat_chan));
- spin_lock_bh(&ioat_chan->prep_lock);
- set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
- spin_unlock_bh(&ioat_chan->prep_lock);
-
- ioat_abort_descs(ioat_chan);
- dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
- ioat_reset_hw(ioat_chan);
- dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
- ioat_restart_channel(ioat_chan);
-
- spin_lock_bh(&ioat_chan->prep_lock);
- clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
- spin_unlock_bh(&ioat_chan->prep_lock);
+ ioat_reboot_chan(ioat_chan);
spin_unlock_bh(&ioat_chan->cleanup_lock);
return;
} else
--
1.9.3