2022-06-08 08:51:45

by Chris Lew

[permalink] [raw]
Subject: [PATCH 0/4] Introduction of rpmsg_rx_done

This series proposes an implementation for the rpmsg framework to do
deferred cleanup of buffers provided in the rx callback. The current
implementation assumes that the client is done with the buffer after
returning from the rx callback.

In some cases where the data size is large, the client may want to
avoid copying the data in the rx callback for later processing. This
series proposes two new facilities for signaling that they want to
hold on to a buffer after the rx callback.
They are:
- New API rpmsg_rx_done() to tell the rpmsg framework the client is
done with the buffer
- New return codes for the rx callback to signal that the client will
hold onto a buffer and later call rpmsg_rx_done()

This series implements the qcom_glink_native backend for these new
facilities.

Chris Lew (4):
rpmsg: core: Add rx done hooks
rpmsg: char: Add support to use rpmsg_rx_done
rpmsg: glink: Try to send rx done in irq
rpmsg: glink: Add support for rpmsg_rx_done

drivers/rpmsg/qcom_glink_native.c | 112 ++++++++++++++++++++++++++++++--------
drivers/rpmsg/rpmsg_char.c | 50 ++++++++++++++++-
drivers/rpmsg/rpmsg_core.c | 20 +++++++
drivers/rpmsg/rpmsg_internal.h | 1 +
include/linux/rpmsg.h | 24 ++++++++
5 files changed, 183 insertions(+), 24 deletions(-)

--
2.7.4


2022-06-08 08:52:33

by Chris Lew

[permalink] [raw]
Subject: [PATCH 4/4] rpmsg: glink: Add support for rpmsg_rx_done

Add the implementation for the hooks of rpmsg_rx_done. If a client
signals they want to hold onto a buffer with RPMSG_DEFER in the rx_cb,
glink will move that intent to a deferred cleanup list. On the new
rpmsg rx_done call, the glink transport will search this deferred
cleanup list for the matching buffer and release the intent.

Signed-off-by: Chris Lew <[email protected]>
---
drivers/rpmsg/qcom_glink_native.c | 54 ++++++++++++++++++++++++++++++++++++---
1 file changed, 51 insertions(+), 3 deletions(-)

diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 799e602113a1..db0dcc04f393 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -146,6 +146,7 @@ enum {
* @riids: idr of all remote intents
* @intent_work: worker responsible for transmitting rx_done packets
* @done_intents: list of intents that needs to be announced rx_done
+ * @defer_intents: list of intents held by the client released by rpmsg_rx_done
* @buf: receive buffer, for gathering fragments
* @buf_offset: write offset in @buf
* @buf_size: size of current @buf
@@ -174,6 +175,7 @@ struct glink_channel {
struct idr riids;
struct work_struct intent_work;
struct list_head done_intents;
+ struct list_head defer_intents;

struct glink_core_rx_intent *buf;
int buf_offset;
@@ -232,6 +234,7 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
init_completion(&channel->intent_req_comp);

INIT_LIST_HEAD(&channel->done_intents);
+ INIT_LIST_HEAD(&channel->defer_intents);
INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work);

idr_init(&channel->liids);
@@ -261,6 +264,12 @@ static void qcom_glink_channel_release(struct kref *ref)
kfree(intent);
}
}
+ list_for_each_entry_safe(intent, tmp, &channel->defer_intents, node) {
+ if (!intent->reuse) {
+ kfree(intent->data);
+ kfree(intent);
+ }
+ }

idr_for_each_entry(&channel->liids, tmp, iid) {
kfree(tmp->data);
@@ -549,9 +558,10 @@ static void qcom_glink_rx_done_work(struct work_struct *work)
spin_unlock_irqrestore(&channel->intent_lock, flags);
}

-static void qcom_glink_rx_done(struct qcom_glink *glink,
+static void __qcom_glink_rx_done(struct qcom_glink *glink,
struct glink_channel *channel,
- struct glink_core_rx_intent *intent)
+ struct glink_core_rx_intent *intent,
+ bool defer)
{
int ret = -EAGAIN;

@@ -569,6 +579,14 @@ static void qcom_glink_rx_done(struct qcom_glink *glink,
spin_unlock(&channel->intent_lock);
}

+ /* Move intent to defer list until client calls rpmsg_rx_done */
+ if (defer) {
+ spin_lock(&channel->intent_lock);
+ list_add_tail(&intent->node, &channel->defer_intents);
+ spin_unlock(&channel->intent_lock);
+ return;
+ }
+
/* Schedule the sending of a rx_done indication */
spin_lock(&channel->intent_lock);
if (list_empty(&channel->done_intents))
@@ -581,6 +599,28 @@ static void qcom_glink_rx_done(struct qcom_glink *glink,
spin_unlock(&channel->intent_lock);
}

+static int qcom_glink_rx_done(struct rpmsg_endpoint *ept, void *data)
+{
+ struct glink_channel *channel = to_glink_channel(ept);
+ struct qcom_glink *glink = channel->glink;
+ struct glink_core_rx_intent *intent, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->intent_lock, flags);
+ list_for_each_entry_safe(intent, tmp, &channel->defer_intents, node) {
+ if (intent->data == data) {
+ list_del(&intent->node);
+ spin_unlock_irqrestore(&channel->intent_lock, flags);
+
+ qcom_glink_send_rx_done(glink, channel, intent, true);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(&channel->intent_lock, flags);
+
+ return -EINVAL;
+}
+
/**
* qcom_glink_receive_version() - receive version/features from remote system
*
@@ -841,6 +881,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
} __packed hdr;
unsigned int chunk_size;
unsigned int left_size;
+ bool rx_done_defer;
unsigned int rcid;
unsigned int liid;
int ret = 0;
@@ -935,7 +976,12 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
intent->offset = 0;
channel->buf = NULL;

- qcom_glink_rx_done(glink, channel, intent);
+ if (channel->ept.rx_done && ret == RPMSG_DEFER)
+ rx_done_defer = true;
+ else
+ rx_done_defer = false;
+
+ __qcom_glink_rx_done(glink, channel, intent, rx_done_defer);
}

advance_rx:
@@ -1212,6 +1258,7 @@ static struct rpmsg_endpoint *qcom_glink_create_ept(struct rpmsg_device *rpdev,
ept->cb = cb;
ept->priv = priv;
ept->ops = &glink_endpoint_ops;
+ ept->rx_done = true;

return ept;
}
@@ -1462,6 +1509,7 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
.sendto = qcom_glink_sendto,
.trysend = qcom_glink_trysend,
.trysendto = qcom_glink_trysendto,
+ .rx_done = qcom_glink_rx_done,
};

static void qcom_glink_rpdev_release(struct device *dev)
--
2.7.4

2022-07-18 08:28:30

by Arnaud POULIQUEN

[permalink] [raw]
Subject: Re: [PATCH 0/4] Introduction of rpmsg_rx_done

Hello Chris,

On 6/8/22 03:16, Chris Lew wrote:
> This series proposes an implementation for the rpmsg framework to do
> deferred cleanup of buffers provided in the rx callback. The current
> implementation assumes that the client is done with the buffer after
> returning from the rx callback.
>
> In some cases where the data size is large, the client may want to
> avoid copying the data in the rx callback for later processing. This
> series proposes two new facilities for signaling that they want to
> hold on to a buffer after the rx callback.
> They are:
> - New API rpmsg_rx_done() to tell the rpmsg framework the client is
> done with the buffer
> - New return codes for the rx callback to signal that the client will
> hold onto a buffer and later call rpmsg_rx_done()
>
> This series implements the qcom_glink_native backend for these new
> facilities.

The API you proposed seems to me quite smart and adaptable to the rpmsg
virtio backend.

My main concern is about the release of the buffer when the endpoint
is destroyed.

Does the buffer release should be handled by each services or by the
core?

I wonder if the buffer list could be managed by the core part by adding
the list in the rpmsg_endpoint structure. On destroy the core could call
the rx_done for each remaining buffers in list...

I let Bjorn and Mathieu advise on this...

Thanks,
Arnaud

>
> Chris Lew (4):
> rpmsg: core: Add rx done hooks
> rpmsg: char: Add support to use rpmsg_rx_done
> rpmsg: glink: Try to send rx done in irq
> rpmsg: glink: Add support for rpmsg_rx_done
>
> drivers/rpmsg/qcom_glink_native.c | 112 ++++++++++++++++++++++++++++++--------
> drivers/rpmsg/rpmsg_char.c | 50 ++++++++++++++++-
> drivers/rpmsg/rpmsg_core.c | 20 +++++++
> drivers/rpmsg/rpmsg_internal.h | 1 +
> include/linux/rpmsg.h | 24 ++++++++
> 5 files changed, 183 insertions(+), 24 deletions(-)
>

2022-07-18 16:57:24

by Mathieu Poirier

[permalink] [raw]
Subject: Re: [PATCH 0/4] Introduction of rpmsg_rx_done

On Mon, 18 Jul 2022 at 02:26, Arnaud POULIQUEN
<[email protected]> wrote:
>
> Hello Chris,
>
> On 6/8/22 03:16, Chris Lew wrote:
> > This series proposes an implementation for the rpmsg framework to do
> > deferred cleanup of buffers provided in the rx callback. The current
> > implementation assumes that the client is done with the buffer after
> > returning from the rx callback.
> >
> > In some cases where the data size is large, the client may want to
> > avoid copying the data in the rx callback for later processing. This
> > series proposes two new facilities for signaling that they want to
> > hold on to a buffer after the rx callback.
> > They are:
> > - New API rpmsg_rx_done() to tell the rpmsg framework the client is
> > done with the buffer
> > - New return codes for the rx callback to signal that the client will
> > hold onto a buffer and later call rpmsg_rx_done()
> >
> > This series implements the qcom_glink_native backend for these new
> > facilities.
>
> The API you proposed seems to me quite smart and adaptable to the rpmsg
> virtio backend.
>
> My main concern is about the release of the buffer when the endpoint
> is destroyed.
>
> Does the buffer release should be handled by each services or by the
> core?
>
> I wonder if the buffer list could be managed by the core part by adding
> the list in the rpmsg_endpoint structure. On destroy the core could call
> the rx_done for each remaining buffers in list...
>
> I let Bjorn and Mathieu advise on this...

Thanks for taking a look Arnaud. I'll get to this sortly.

>
> Thanks,
> Arnaud
>
> >
> > Chris Lew (4):
> > rpmsg: core: Add rx done hooks
> > rpmsg: char: Add support to use rpmsg_rx_done
> > rpmsg: glink: Try to send rx done in irq
> > rpmsg: glink: Add support for rpmsg_rx_done
> >
> > drivers/rpmsg/qcom_glink_native.c | 112 ++++++++++++++++++++++++++++++--------
> > drivers/rpmsg/rpmsg_char.c | 50 ++++++++++++++++-
> > drivers/rpmsg/rpmsg_core.c | 20 +++++++
> > drivers/rpmsg/rpmsg_internal.h | 1 +
> > include/linux/rpmsg.h | 24 ++++++++
> > 5 files changed, 183 insertions(+), 24 deletions(-)
> >

2022-07-26 18:12:39

by Mathieu Poirier

[permalink] [raw]
Subject: Re: [PATCH 0/4] Introduction of rpmsg_rx_done

On Tue, Jun 07, 2022 at 06:16:41PM -0700, Chris Lew wrote:
> This series proposes an implementation for the rpmsg framework to do
> deferred cleanup of buffers provided in the rx callback. The current
> implementation assumes that the client is done with the buffer after
> returning from the rx callback.
>
> In some cases where the data size is large, the client may want to
> avoid copying the data in the rx callback for later processing. This
> series proposes two new facilities for signaling that they want to
> hold on to a buffer after the rx callback.
> They are:
> - New API rpmsg_rx_done() to tell the rpmsg framework the client is
> done with the buffer
> - New return codes for the rx callback to signal that the client will
> hold onto a buffer and later call rpmsg_rx_done()
>
> This series implements the qcom_glink_native backend for these new
> facilities.
>
> Chris Lew (4):
> rpmsg: core: Add rx done hooks
> rpmsg: char: Add support to use rpmsg_rx_done
> rpmsg: glink: Try to send rx done in irq
> rpmsg: glink: Add support for rpmsg_rx_done
>
> drivers/rpmsg/qcom_glink_native.c | 112 ++++++++++++++++++++++++++++++--------
> drivers/rpmsg/rpmsg_char.c | 50 ++++++++++++++++-
> drivers/rpmsg/rpmsg_core.c | 20 +++++++
> drivers/rpmsg/rpmsg_internal.h | 1 +
> include/linux/rpmsg.h | 24 ++++++++
> 5 files changed, 183 insertions(+), 24 deletions(-)

I have started reviewing this set. Comments to come later today or tomorrow.

Thanks,
Mathieu

>
> --
> 2.7.4
>

2022-07-27 18:35:05

by Mathieu Poirier

[permalink] [raw]
Subject: Re: [PATCH 0/4] Introduction of rpmsg_rx_done

On Mon, Jul 18, 2022 at 10:54:30AM -0600, Mathieu Poirier wrote:
> On Mon, 18 Jul 2022 at 02:26, Arnaud POULIQUEN
> <[email protected]> wrote:
> >
> > Hello Chris,
> >
> > On 6/8/22 03:16, Chris Lew wrote:
> > > This series proposes an implementation for the rpmsg framework to do
> > > deferred cleanup of buffers provided in the rx callback. The current
> > > implementation assumes that the client is done with the buffer after
> > > returning from the rx callback.
> > >
> > > In some cases where the data size is large, the client may want to
> > > avoid copying the data in the rx callback for later processing. This
> > > series proposes two new facilities for signaling that they want to
> > > hold on to a buffer after the rx callback.
> > > They are:
> > > - New API rpmsg_rx_done() to tell the rpmsg framework the client is
> > > done with the buffer
> > > - New return codes for the rx callback to signal that the client will
> > > hold onto a buffer and later call rpmsg_rx_done()
> > >
> > > This series implements the qcom_glink_native backend for these new
> > > facilities.
> >
> > The API you proposed seems to me quite smart and adaptable to the rpmsg
> > virtio backend.
> >
> > My main concern is about the release of the buffer when the endpoint
> > is destroyed.
> >
> > Does the buffer release should be handled by each services or by the
> > core?
> >
> > I wonder if the buffer list could be managed by the core part by adding
> > the list in the rpmsg_endpoint structure. On destroy the core could call
> > the rx_done for each remaining buffers in list...

Arnaud has a valid point, though rpmst_endpoint_ops::destroy_ept() is there for
this kind of cleanup (and this patchet is making use of it).

I think we can leave things as they are now and consider moving to the core if
we see a trend in future submissions.

Thanks,
Mathieu

> >
> > I let Bjorn and Mathieu advise on this...
>
> Thanks for taking a look Arnaud. I'll get to this sortly.
>
> >
> > Thanks,
> > Arnaud
> >
> > >
> > > Chris Lew (4):
> > > rpmsg: core: Add rx done hooks
> > > rpmsg: char: Add support to use rpmsg_rx_done
> > > rpmsg: glink: Try to send rx done in irq
> > > rpmsg: glink: Add support for rpmsg_rx_done
> > >
> > > drivers/rpmsg/qcom_glink_native.c | 112 ++++++++++++++++++++++++++++++--------
> > > drivers/rpmsg/rpmsg_char.c | 50 ++++++++++++++++-
> > > drivers/rpmsg/rpmsg_core.c | 20 +++++++
> > > drivers/rpmsg/rpmsg_internal.h | 1 +
> > > include/linux/rpmsg.h | 24 ++++++++
> > > 5 files changed, 183 insertions(+), 24 deletions(-)
> > >

2022-07-27 19:24:26

by Mathieu Poirier

[permalink] [raw]
Subject: Re: [PATCH 4/4] rpmsg: glink: Add support for rpmsg_rx_done

On Tue, Jun 07, 2022 at 06:16:45PM -0700, Chris Lew wrote:
> Add the implementation for the hooks of rpmsg_rx_done. If a client
> signals they want to hold onto a buffer with RPMSG_DEFER in the rx_cb,
> glink will move that intent to a deferred cleanup list. On the new
> rpmsg rx_done call, the glink transport will search this deferred
> cleanup list for the matching buffer and release the intent.
>
> Signed-off-by: Chris Lew <[email protected]>
> ---
> drivers/rpmsg/qcom_glink_native.c | 54 ++++++++++++++++++++++++++++++++++++---
> 1 file changed, 51 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
> index 799e602113a1..db0dcc04f393 100644
> --- a/drivers/rpmsg/qcom_glink_native.c
> +++ b/drivers/rpmsg/qcom_glink_native.c
> @@ -146,6 +146,7 @@ enum {
> * @riids: idr of all remote intents
> * @intent_work: worker responsible for transmitting rx_done packets
> * @done_intents: list of intents that needs to be announced rx_done
> + * @defer_intents: list of intents held by the client released by rpmsg_rx_done
> * @buf: receive buffer, for gathering fragments
> * @buf_offset: write offset in @buf
> * @buf_size: size of current @buf
> @@ -174,6 +175,7 @@ struct glink_channel {
> struct idr riids;
> struct work_struct intent_work;
> struct list_head done_intents;
> + struct list_head defer_intents;
>
> struct glink_core_rx_intent *buf;
> int buf_offset;
> @@ -232,6 +234,7 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
> init_completion(&channel->intent_req_comp);
>
> INIT_LIST_HEAD(&channel->done_intents);
> + INIT_LIST_HEAD(&channel->defer_intents);
> INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work);
>
> idr_init(&channel->liids);
> @@ -261,6 +264,12 @@ static void qcom_glink_channel_release(struct kref *ref)
> kfree(intent);
> }
> }
> + list_for_each_entry_safe(intent, tmp, &channel->defer_intents, node) {
> + if (!intent->reuse) {
> + kfree(intent->data);
> + kfree(intent);
> + }
> + }
>
> idr_for_each_entry(&channel->liids, tmp, iid) {
> kfree(tmp->data);
> @@ -549,9 +558,10 @@ static void qcom_glink_rx_done_work(struct work_struct *work)
> spin_unlock_irqrestore(&channel->intent_lock, flags);
> }
>
> -static void qcom_glink_rx_done(struct qcom_glink *glink,
> +static void __qcom_glink_rx_done(struct qcom_glink *glink,
> struct glink_channel *channel,
> - struct glink_core_rx_intent *intent)
> + struct glink_core_rx_intent *intent,
> + bool defer)
> {
> int ret = -EAGAIN;
>
> @@ -569,6 +579,14 @@ static void qcom_glink_rx_done(struct qcom_glink *glink,
> spin_unlock(&channel->intent_lock);
> }
>
> + /* Move intent to defer list until client calls rpmsg_rx_done */
> + if (defer) {
> + spin_lock(&channel->intent_lock);
> + list_add_tail(&intent->node, &channel->defer_intents);
> + spin_unlock(&channel->intent_lock);
> + return;
> + }
> +
> /* Schedule the sending of a rx_done indication */
> spin_lock(&channel->intent_lock);
> if (list_empty(&channel->done_intents))
> @@ -581,6 +599,28 @@ static void qcom_glink_rx_done(struct qcom_glink *glink,
> spin_unlock(&channel->intent_lock);
> }
>
> +static int qcom_glink_rx_done(struct rpmsg_endpoint *ept, void *data)
> +{
> + struct glink_channel *channel = to_glink_channel(ept);
> + struct qcom_glink *glink = channel->glink;
> + struct glink_core_rx_intent *intent, *tmp;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&channel->intent_lock, flags);
> + list_for_each_entry_safe(intent, tmp, &channel->defer_intents, node) {
> + if (intent->data == data) {
> + list_del(&intent->node);
> + spin_unlock_irqrestore(&channel->intent_lock, flags);
> +
> + qcom_glink_send_rx_done(glink, channel, intent, true);
> + return 0;
> + }
> + }
> + spin_unlock_irqrestore(&channel->intent_lock, flags);
> +
> + return -EINVAL;
> +}
> +
> /**
> * qcom_glink_receive_version() - receive version/features from remote system
> *
> @@ -841,6 +881,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
> } __packed hdr;
> unsigned int chunk_size;
> unsigned int left_size;
> + bool rx_done_defer;
> unsigned int rcid;
> unsigned int liid;
> int ret = 0;
> @@ -935,7 +976,12 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
> intent->offset = 0;
> channel->buf = NULL;
>
> - qcom_glink_rx_done(glink, channel, intent);
> + if (channel->ept.rx_done && ret == RPMSG_DEFER)

I don't see where @ret could be set to RPMSG_DEFER in this function...

Thanks,
Mathieu


> + rx_done_defer = true;
> + else
> + rx_done_defer = false;
> +
> + __qcom_glink_rx_done(glink, channel, intent, rx_done_defer);
> }
>
> advance_rx:
> @@ -1212,6 +1258,7 @@ static struct rpmsg_endpoint *qcom_glink_create_ept(struct rpmsg_device *rpdev,
> ept->cb = cb;
> ept->priv = priv;
> ept->ops = &glink_endpoint_ops;
> + ept->rx_done = true;
>
> return ept;
> }
> @@ -1462,6 +1509,7 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
> .sendto = qcom_glink_sendto,
> .trysend = qcom_glink_trysend,
> .trysendto = qcom_glink_trysendto,
> + .rx_done = qcom_glink_rx_done,
> };
>
> static void qcom_glink_rpdev_release(struct device *dev)
> --
> 2.7.4
>