CPTS module supports capturing timestamp for every packet it receives,
add a new function named "am65_cpts_rx_find_ts()" to get the timestamp
of received packets from CPTS FIFO.
Add another function named "am65_cpts_rx_timestamp()" which internally
calls "am65_cpts_rx_find_ts()" function and timestamps the received
PTP packets.
Signed-off-by: Chintan Vankar <[email protected]>
---
Changes from v1 to v2:
- Add skb_reset_mac_header() in am65_cpts_rx_timestamp() to configure
skb MAC header properties for packets which are to be timestamped.
drivers/net/ethernet/ti/am65-cpts.c | 89 +++++++++++++++++++++--------
drivers/net/ethernet/ti/am65-cpts.h | 11 ++--
2 files changed, 72 insertions(+), 28 deletions(-)
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index c66618d91c28..6c1d571c5e0b 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -859,29 +859,6 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
return delay;
}
-/**
- * am65_cpts_rx_enable - enable rx timestamping
- * @cpts: cpts handle
- * @en: enable
- *
- * This functions enables rx packets timestamping. The CPTS can timestamp all
- * rx packets.
- */
-void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
-{
- u32 val;
-
- mutex_lock(&cpts->ptp_clk_lock);
- val = am65_cpts_read32(cpts, control);
- if (en)
- val |= AM65_CPTS_CONTROL_TSTAMP_EN;
- else
- val &= ~AM65_CPTS_CONTROL_TSTAMP_EN;
- am65_cpts_write32(cpts, val, control);
- mutex_unlock(&cpts->ptp_clk_lock);
-}
-EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
-
static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
{
unsigned int ptp_class = ptp_classify_raw(skb);
@@ -906,6 +883,72 @@ static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
return 1;
}
+static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, struct sk_buff *skb,
+ int ev_type, u32 skb_mtype_seqid)
+{
+ struct list_head *this, *next;
+ struct am65_cpts_event *event;
+ unsigned long flags;
+ u32 mtype_seqid;
+ u64 ns = 0;
+
+ am65_cpts_fifo_read(cpts);
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ continue;
+ }
+
+ mtype_seqid = event->event1 &
+ (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
+ AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK |
+ AM65_CPTS_EVENT_1_EVENT_TYPE_MASK);
+
+ if (mtype_seqid == skb_mtype_seqid) {
+ ns = event->timestamp;
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return ns;
+}
+
+void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+{
+ struct am65_cpts_skb_cb_data *skb_cb = (struct am65_cpts_skb_cb_data *)skb->cb;
+ struct skb_shared_hwtstamps *ssh;
+ int ret;
+ u64 ns;
+
+ /* am65_cpts_rx_timestamp() is called before eth_type_trans(), so
+ * skb MAC Hdr properties are not configured yet. Hence need to
+ * reset skb MAC header here
+ */
+ skb_reset_mac_header(skb);
+ ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
+ return; /* if not PTP class packet */
+
+ skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_RX << AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
+
+ dev_dbg(cpts->dev, "%s mtype seqid %08x\n", __func__, skb_cb->skb_mtype_seqid);
+
+ ns = am65_cpts_find_rx_ts(cpts, skb, AM65_CPTS_EV_RX, skb_cb->skb_mtype_seqid);
+ if (!ns)
+ return;
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_rx_timestamp);
+
/**
* am65_cpts_tx_timestamp - save tx packet for timestamping
* @cpts: cpts handle
diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h
index 6e14df0be113..6099d772799d 100644
--- a/drivers/net/ethernet/ti/am65-cpts.h
+++ b/drivers/net/ethernet/ti/am65-cpts.h
@@ -22,9 +22,9 @@ void am65_cpts_release(struct am65_cpts *cpts);
struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node);
int am65_cpts_phc_index(struct am65_cpts *cpts);
+void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
-void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en);
u64 am65_cpts_ns_gettime(struct am65_cpts *cpts);
int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
struct am65_cpts_estf_cfg *cfg);
@@ -48,17 +48,18 @@ static inline int am65_cpts_phc_index(struct am65_cpts *cpts)
return -1;
}
-static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
+static inline void am65_cpts_rx_timestamp(struct am65_cpts *cpts,
struct sk_buff *skb)
{
}
-static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts,
- struct sk_buff *skb)
+static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
{
}
-static inline void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
+static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
{
}
--
2.34.1
On Tue, 2024-03-12 at 15:32 +0530, Chintan Vankar wrote:
> diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
> index c66618d91c28..6c1d571c5e0b 100644
> --- a/drivers/net/ethernet/ti/am65-cpts.c
> +++ b/drivers/net/ethernet/ti/am65-cpts.c
> @@ -859,29 +859,6 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
> return delay;
> }
>
> -/**
> - * am65_cpts_rx_enable - enable rx timestamping
> - * @cpts: cpts handle
> - * @en: enable
> - *
> - * This functions enables rx packets timestamping. The CPTS can timestamp all
> - * rx packets.
> - */
> -void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
> -{
> - u32 val;
> -
> - mutex_lock(&cpts->ptp_clk_lock);
> - val = am65_cpts_read32(cpts, control);
> - if (en)
> - val |= AM65_CPTS_CONTROL_TSTAMP_EN;
> - else
> - val &= ~AM65_CPTS_CONTROL_TSTAMP_EN;
> - am65_cpts_write32(cpts, val, control);
> - mutex_unlock(&cpts->ptp_clk_lock);
> -}
> -EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
It looks like the above chunk will cause a transient build break, as
the function is still in use and the caller will be removed by the next
patch. I guess you should move this chunk there.
> -
> static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
> {
> unsigned int ptp_class = ptp_classify_raw(skb);
> @@ -906,6 +883,72 @@ static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
> return 1;
> }
>
> +static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, struct sk_buff *skb,
> + int ev_type, u32 skb_mtype_seqid)
> +{
> + struct list_head *this, *next;
> + struct am65_cpts_event *event;
> + unsigned long flags;
> + u32 mtype_seqid;
> + u64 ns = 0;
> +
> + am65_cpts_fifo_read(cpts);
> + spin_lock_irqsave(&cpts->lock, flags);
> + list_for_each_safe(this, next, &cpts->events) {
> + event = list_entry(this, struct am65_cpts_event, list);
> + if (time_after(jiffies, event->tmo)) {
> + list_del_init(&event->list);
> + list_add(&event->list, &cpts->pool);
> + continue;
> + }
> +
> + mtype_seqid = event->event1 &
> + (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
> + AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK |
> + AM65_CPTS_EVENT_1_EVENT_TYPE_MASK);
> +
> + if (mtype_seqid == skb_mtype_seqid) {
> + ns = event->timestamp;
> + list_del_init(&event->list);
> + list_add(&event->list, &cpts->pool);
> + break;
> + }
> + }
> + spin_unlock_irqrestore(&cpts->lock, flags);
Ouch, you have to acquire an additional lock per packet and a lot of
cacheline dithering.
Not strictly necessary for this series, but I suggest to invest some
time reconsidering this schema, it looks bad from performance pov.
Possibly protecting the list with RCU and leaving the recycle to the
producer could help.
Additionally net-next is currently closed for the merge window, please
post the new revision (including the target tree in the subj prefix)
when net-next will re-open in ~2weeks.
Cheers,
Paolo
On 12/03/24 16:20, Paolo Abeni wrote:
> On Tue, 2024-03-12 at 15:32 +0530, Chintan Vankar wrote:
>> diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
>> index c66618d91c28..6c1d571c5e0b 100644
>> --- a/drivers/net/ethernet/ti/am65-cpts.c
>> +++ b/drivers/net/ethernet/ti/am65-cpts.c
>> @@ -859,29 +859,6 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
>> return delay;
>> }
>>
>> -/**
>> - * am65_cpts_rx_enable - enable rx timestamping
>> - * @cpts: cpts handle
>> - * @en: enable
>> - *
>> - * This functions enables rx packets timestamping. The CPTS can timestamp all
>> - * rx packets.
>> - */
>> -void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
>> -{
>> - u32 val;
>> -
>> - mutex_lock(&cpts->ptp_clk_lock);
>> - val = am65_cpts_read32(cpts, control);
>> - if (en)
>> - val |= AM65_CPTS_CONTROL_TSTAMP_EN;
>> - else
>> - val &= ~AM65_CPTS_CONTROL_TSTAMP_EN;
>> - am65_cpts_write32(cpts, val, control);
>> - mutex_unlock(&cpts->ptp_clk_lock);
>> -}
>> -EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
>
> It looks like the above chunk will cause a transient build break, as
> the function is still in use and the caller will be removed by the next
> patch. I guess you should move this chunk there.
Thank you for your suggestion. I will update the patch with this
change in the next version.
>
>> -
>> static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
>> {
>> unsigned int ptp_class = ptp_classify_raw(skb);
>> @@ -906,6 +883,72 @@ static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
>> return 1;
>> }
>>
>> +static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, struct sk_buff *skb,
>> + int ev_type, u32 skb_mtype_seqid)
>> +{
>> + struct list_head *this, *next;
>> + struct am65_cpts_event *event;
>> + unsigned long flags;
>> + u32 mtype_seqid;
>> + u64 ns = 0;
>> +
>> + am65_cpts_fifo_read(cpts);
>> + spin_lock_irqsave(&cpts->lock, flags);
>> + list_for_each_safe(this, next, &cpts->events) {
>> + event = list_entry(this, struct am65_cpts_event, list);
>> + if (time_after(jiffies, event->tmo)) {
>> + list_del_init(&event->list);
>> + list_add(&event->list, &cpts->pool);
>> + continue;
>> + }
>> +
>> + mtype_seqid = event->event1 &
>> + (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
>> + AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK |
>> + AM65_CPTS_EVENT_1_EVENT_TYPE_MASK);
>> +
>> + if (mtype_seqid == skb_mtype_seqid) {
>> + ns = event->timestamp;
>> + list_del_init(&event->list);
>> + list_add(&event->list, &cpts->pool);
>> + break;
>> + }
>> + }
>> + spin_unlock_irqrestore(&cpts->lock, flags);
>
> Ouch, you have to acquire an additional lock per packet and a lot of
> cacheline dithering.
>
> Not strictly necessary for this series, but I suggest to invest some
> time reconsidering this schema, it looks bad from performance pov.
>
> Possibly protecting the list with RCU and leaving the recycle to the
> producer could help.
Thank you for pointing out this. I will consider your point and spend
some time on it.
>
> Additionally net-next is currently closed for the merge window, please
> post the new revision (including the target tree in the subj prefix)
> when net-next will re-open in ~2weeks.
>
Thank you for information. I will update the patch and post its next
version with above mentioned changes.
> Cheers,
>
> Paolo
>