2022-05-24 13:34:11

by Viktor Barna

[permalink] [raw]
Subject: [RFC v2 85/96] cl8k: add tx.h

From: Viktor Barna <[email protected]>

(Part of the split. Please, take a look at the cover letter for more
details).

Signed-off-by: Viktor Barna <[email protected]>
---
drivers/net/wireless/celeno/cl8k/tx.h | 467 ++++++++++++++++++++++++++
1 file changed, 467 insertions(+)
create mode 100644 drivers/net/wireless/celeno/cl8k/tx.h

diff --git a/drivers/net/wireless/celeno/cl8k/tx.h b/drivers/net/wireless/celeno/cl8k/tx.h
new file mode 100644
index 000000000000..d36a7d703df6
--- /dev/null
+++ b/drivers/net/wireless/celeno/cl8k/tx.h
@@ -0,0 +1,467 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/* Copyright(c) 2019-2022, Celeno Communications Ltd. */
+
+#ifndef CL_TX_H
+#define CL_TX_H
+
+#include <linux/interrupt.h>
+
+#include "sta_info.h"
+#include "vif.h"
+#include "ipc_shared.h"
+#include "fw.h"
+#include "wrs.h"
+
+enum cl_queue_type {
+ QUEUE_TYPE_SINGLE,
+ QUEUE_TYPE_AGG,
+ QUEUE_TYPE_BCMC,
+
+ QUEUE_TYPE_MAX
+};
+
+#define QUEUE_IDX(sta, ac) ((sta) + (ac) * FW_MAX_NUM_STA)
+
+#define TX_PUSH_LOGGER_SIZE 256
+
+#define BCMC_POLL_TIMEOUT 50
+
+enum cl_tx_push_logger_param {
+ TX_PUSH_LOGGER_DRV_CNT,
+ TX_PUSH_LOGGER_FW_CNT,
+ TX_PUSH_LOGGER_PKT_PUSHED,
+ TX_PUSH_LOGGER_MAX,
+};
+
+struct cl_tx_push_cntrs {
+ u32 tx_push_cntr_hist[TXDESC_AGG_Q_SIZE_MAX + 1];
+ u32 tx_push_logger[TX_PUSH_LOGGER_SIZE][TX_PUSH_LOGGER_MAX];
+ u32 tx_push_logger_idx;
+};
+
+struct cl_tx_queue {
+ struct list_head sched_list;
+ struct list_head hdrs;
+ struct cl_sta *cl_sta;
+ bool sched;
+ u16 fw_free_space;
+ u16 fw_max_size;
+ u8 type;
+ u8 tid;
+ u8 hw_index;
+ u16 index;
+ u16 max_packets;
+ u16 num_packets;
+ u32 total_packets;
+ u32 total_fw_push_desc;
+ u32 total_fw_push_skb;
+ u32 total_fw_cfm;
+ u32 dump_queue_full;
+ u32 dump_dma_map_fail;
+ u32 stats_hw_amsdu_cnt[CL_AMSDU_TX_PAYLOAD_MAX];
+ u32 stats_sw_amsdu_cnt[MAX_TX_SW_AMSDU_PACKET];
+ u32 hist_xmit_to_push[DELAY_HIST_SIZE];
+ u32 hist_push_to_cfm[DELAY_HIST_SIZE];
+ struct cl_tx_push_cntrs push_cntrs_db;
+};
+
+/*
+ * struct cl_tx_queues:
+ * This structure holds all driver TX queues,
+ * The queues buffer frames pushed by upper layer and push them to lower IPC layer.
+ */
+struct cl_tx_queues {
+ struct cl_tx_queue agg[IPC_MAX_BA_SESSIONS];
+ struct cl_tx_queue single[MAX_SINGLE_QUEUES];
+ struct cl_tx_queue bcmc;
+};
+
+struct cl_req_agg_db {
+ bool is_used;
+ u8 sta_idx;
+ u8 tid;
+};
+
+#define INC_SN(sn) (((sn) + 0x10) & IEEE80211_SCTL_SEQ)
+#define DEC_SN(sn) (((sn) - 0x10) & IEEE80211_SCTL_SEQ)
+
+#define CL_TX_LIFETIME_MS 4000
+
+#define CL_SKB_DATA_ALIGN_SZ 4
+#define CL_SKB_DATA_ALIGN_MSK (CL_SKB_DATA_ALIGN_SZ - 1)
+#define CL_SKB_DATA_ALIGN_PADS(x) \
+ ((CL_SKB_DATA_ALIGN_SZ - ((ptrdiff_t)(x) & CL_SKB_DATA_ALIGN_MSK)) & CL_SKB_DATA_ALIGN_MSK)
+
+#define CL_TX_MAX_FRAME_LEN_SINGLE 4096
+#define CL_TX_MAX_FRAME_LEN_AGG 2000
+
+struct cl_hw_tx_status {
+ u32 mcs_index : 7; /* [6:0] */
+ u32 is_bcmc : 1; /* [7] */
+ u32 num_mpdu_retries : 4; /* [11:8] */
+ u32 rsv : 4; /* [15:12] */
+ u32 format_mod : 4; /* [19:16] */
+ u32 bw_requested : 2; /* [21:20] */
+ u32 bf : 1; /* [22] */
+ u32 frm_successful : 1; /* [23] */
+ u32 bw_transmitted : 2; /* [25:24] */
+ u32 freespace_inc_skip : 1; /* [26] */
+ u32 keep_skb : 1; /* [27] */
+ u32 gi : 2; /* [29:28] */
+ u32 descriptor_done_sw : 1; /* [30] */
+ u32 descriptor_done_hw : 1; /* [31] */
+};
+
+enum cl_tx_flags {
+ CL_TX_EN_DFS,
+ CL_TX_EN_SCAN
+};
+
+enum cl_tx_single_frame_type {
+ CL_TX_SINGLE_FRAME_TYPE_QOS_DATA,
+ CL_TX_SINGLE_FRAME_TYPE_QOS_NULL,
+ CL_TX_SINGLE_FRAME_TYPE_MANAGEMENT,
+ CL_TX_SINGLE_FRAME_TYPE_OTHER
+};
+
+struct cl_tx_db {
+ bool force_amsdu;
+ bool block_bcn;
+ bool block_prob_resp;
+};
+
+struct cl_tx_drop_cntr {
+ u32 radio_off;
+ u32 in_recovery;
+ u32 short_length;
+ u32 pending_full;
+ u32 packet_limit;
+ u32 dev_flags;
+ u32 tx_disable;
+ u32 length_limit;
+ u32 txhdr_alloc_fail;
+ u32 queue_null;
+ u32 amsdu_alloc_fail;
+ u32 amsdu_dma_map_err;
+ u32 build_hdr_fail;
+ u32 key_disable;
+ u32 queue_flush;
+ u32 probe_response;
+ u32 sta_null_in_agg;
+ u32 sta_stop_tx;
+};
+
+struct cl_tx_forward_cntr {
+ u32 tx_start;
+ u32 drv_fast_agg;
+ u32 drv_fast_single;
+ u32 to_mac;
+ u32 from_mac_single;
+ u32 from_mac_agg;
+};
+
+struct cl_tx_transfer_cntr {
+ u32 single_to_agg;
+ u32 agg_to_single;
+};
+
+struct cl_tx_packet_cntr {
+ struct cl_tx_forward_cntr forward;
+ struct cl_tx_drop_cntr drop;
+ struct cl_tx_transfer_cntr transfer;
+};
+
+struct cl_cpu_cntr {
+ u32 tx_agg[CPU_MAX_NUM];
+ u32 tx_single[CPU_MAX_NUM];
+};
+
+static inline bool cl_tx_ctrl_is_amsdu(struct ieee80211_tx_info *tx_info)
+{
+ return !!(tx_info->control.flags & IEEE80211_TX_CTRL_AMSDU);
+}
+
+static inline bool cl_tx_ctrl_is_eapol(struct ieee80211_tx_info *tx_info)
+{
+ return !!(tx_info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO);
+}
+
+struct cl_agg_cfm_queue {
+ struct list_head head;
+ struct cl_tx_queue *tx_queue;
+ u16 ssn;
+};
+
+/* Structure containing the parameters of the MM_AGG_TX_REPORT_IND message. */
+struct cl_agg_tx_report {
+ __le32 rate_cntrl_info;
+ __le32 rate_cntrl_info_he;
+
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u32 sta_idx : 8,
+ is_sta_ps : 1,
+ bw_requested : 2,
+ is_agg : 1,
+ ba_not_received : 1,
+ ba_received_empty : 1,
+ bf : 1,
+ is_fallback : 1,
+ mu_su_gid : 6,
+ mu_mimo_valid : 1,
+ mu_ofdma_valid : 1,
+ rate_fix_mcs1 : 1,
+ rsv0 : 7;
+
+ u32 success : 9,
+ fail : 9,
+ below_baw_cnt : 9,
+ num_prot_retries : 5;
+
+ u32 success_after_retry : 9,
+ success_more_one_retry : 9,
+ retry_limit_reached : 9,
+ is_retry : 1,
+ is_rts_retry_limit_reached : 1,
+ prot_type : 3;
+
+ u32 rssi1 : 8,
+ rssi2 : 8,
+ rssi3 : 8,
+ rssi4 : 8;
+
+ u32 rssi5 : 8,
+ rssi6 : 8,
+ rsv1 : 16;
+#else
+ u32 rsv0 : 7,
+ rate_fix_mcs1 : 1,
+ mu_ofdma_valid : 1,
+ mu_mimo_valid : 1,
+ mu_su_gid : 6,
+ is_fallback : 1,
+ bf : 1,
+ ba_received_empty : 1,
+ ba_not_received : 1,
+ is_agg : 1,
+ bw_requested : 2,
+ is_sta_ps : 1,
+ sta_idx : 8;
+
+ u32 num_prot_retries : 5,
+ below_baw_cnt : 9,
+ fail : 9,
+ success : 9;
+
+ u32 prot_type : 3,
+ is_rts_retry_limit_reached : 1,
+ is_retry : 1,
+ retry_limit_reached : 9,
+ success_more_one_retry : 9,
+ success_after_retry : 9;
+
+ u32 rssi4 : 8,
+ rssi3 : 8,
+ rssi2 : 8,
+ rssi1 : 8;
+
+ u32 rsv1 : 16,
+ rssi6 : 8,
+ rssi5 : 8;
+#endif
+ u16 new_ssn;
+ u8 tx_queue_idx;
+
+};
+
+void cl_agg_tx_report_handler(struct cl_hw *cl_hw, struct cl_sta *cl_sta,
+ struct cl_agg_tx_report *agg_report);
+void cl_agg_tx_report_simulate_for_single(struct cl_hw *cl_hw, struct cl_sta *cl_sta,
+ struct cl_hw_tx_status *status);
+
+/* Per RA/TID Data for AMPDU TX */
+struct cl_baw {
+ u8 fw_agg_idx;
+ bool amsdu;
+ bool action_start;
+ u16 ssn;
+ u16 tid_seq;
+ struct sk_buff_head pending;
+};
+
+enum cl_amsdu_result {
+ CL_AMSDU_ANCHOR_SET,
+ CL_AMSDU_SUB_FRAME_SET,
+ CL_AMSDU_SKIP,
+ CL_AMSDU_FAILED
+};
+
+/* Max size of 802.11 WLAN header */
+#define CL_WLAN_HEADER_MAX_SIZE 36
+
+#define CL_AMSDU_MIN_AGG_SIZE 3
+#define CL_AMSDU_CONST_LEN 256
+
+struct cl_amsdu_txhdr {
+ struct list_head list;
+ struct list_head list_pool;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+};
+
+struct cl_amsdu_ctrl {
+ struct cl_sw_txhdr *sw_txhdr;
+ u16 rem_len;
+ u16 max_len;
+ u16 hdrlen;
+ u8 packet_cnt;
+ bool is_sw_amsdu;
+};
+
+struct cl_sw_txhdr {
+ struct list_head list_pool;
+ struct list_head tx_queue_list;
+ struct list_head cfm_list;
+ struct ieee80211_hdr *hdr80211;
+ struct cl_tx_queue *tx_queue;
+ struct cl_sta *cl_sta;
+ struct cl_vif *cl_vif;
+ struct cl_amsdu_txhdr amsdu_txhdr;
+ u8 hw_queue : 3,
+ is_bcn : 1,
+ tid : 4;
+ u8 ac : 2,
+ is_sw_amsdu : 1,
+ sw_amsdu_packet_cnt : 4,
+ rsv : 1;
+ /*
+ * singles queue index used to push the txdesc to the ipc layer
+ * this issue solve race condition in which we
+ * CFM of packet that associated with disconnected STA and has invalid
+ * cl_sta pointerinside this struct
+ */
+ u8 sta_idx;
+ __le16 fc;
+ struct sk_buff *skb;
+ struct txdesc txdesc;
+ size_t map_len;
+ u16 total_pkt_len;
+};
+
+void cl_tx_init(struct cl_hw *cl_hw);
+void cl_tx_check_start_ba_session(struct cl_hw *cl_hw,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb);
+void cl_tx_bcns_tasklet(unsigned long data);
+void cl_tx_single_free_skb(struct cl_hw *cl_hw, struct sk_buff *skb);
+void cl_tx_single(struct cl_hw *cl_hw, struct cl_sta *cl_sta,
+ struct sk_buff *skb, bool is_vns, bool lock);
+void cl_tx_fast_single(struct cl_hw *cl_hw, struct cl_sta *cl_sta,
+ struct sk_buff *skb, bool lock);
+void cl_tx_agg_prep(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr,
+ u16 frame_len, u8 hdr_pads, bool hdr_conv);
+void cl_tx_agg(struct cl_hw *cl_hw, struct cl_sta *cl_sta,
+ struct sk_buff *skb, bool hdr_conv, bool lock);
+void cl_tx_fast_agg(struct cl_hw *cl_hw, struct cl_sta *cl_sta,
+ struct sk_buff *skb, bool lock);
+u16 cl_tx_prepare_wlan_hdr(struct cl_hw *cl_hw, struct cl_sta *cl_sta,
+ struct sk_buff *skb, struct ieee80211_hdr *hdr);
+void cl_tx_wlan_to_8023(struct sk_buff *skb);
+int cl_tx_8023_to_wlan(struct cl_hw *cl_hw, struct sk_buff *skb, struct cl_sta *cl_sta, u8 tid);
+void cl_tx_push(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr, struct cl_tx_queue *tx_queue);
+void cl_tx_bcn_mesh_task(unsigned long data);
+void cl_tx_en(struct cl_hw *cl_hw, u8 reason, bool enable);
+void cl_tx_off(struct cl_hw *cl_hw);
+void cl_tx_drop_skb(struct sk_buff *skb);
+void cl_tx_update_hist_tstamp(struct cl_tx_queue *tx_queue, struct sk_buff *skb,
+ u32 tstamp_hist[DELAY_HIST_SIZE], bool update_skb_ktime);
+bool cl_is_tx_allowed(struct cl_hw *cl_hw);
+void cl_agg_cfm_init(struct cl_hw *cl_hw);
+void cl_agg_cfm_add(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr, u8 agg_idx);
+void cl_agg_cfm_free_head_skb(struct cl_hw *cl_hw,
+ struct cl_agg_cfm_queue *cfm_queue,
+ u8 ba_queue_idx);
+void cl_agg_cfm_flush_all(struct cl_hw *cl_hw);
+void cl_agg_cfm_poll_empty(struct cl_hw *cl_hw, u8 agg_idx, bool flush);
+void cl_agg_cfm_poll_empty_sta(struct cl_hw *cl_hw, struct cl_sta *cl_sta);
+void cl_agg_cfm_clear_tim_bit_sta(struct cl_hw *cl_hw, struct cl_sta *cl_sta);
+void cl_agg_cfm_set_ssn(struct cl_hw *cl_hw, u16 ssn, u8 idx);
+void cl_agg_cfm_set_tx_queue(struct cl_hw *cl_hw, struct cl_tx_queue *tx_queue, u8 idx);
+void cl_baw_init(struct cl_sta *cl_sta);
+void cl_baw_start(struct cl_baw *baw, u16 ssn);
+void cl_baw_operational(struct cl_hw *cl_hw, struct cl_baw *baw,
+ u8 fw_agg_idx, bool amsdu_supported);
+void cl_baw_stop(struct cl_baw *baw);
+void cl_baw_pending_to_agg(struct cl_hw *cl_hw,
+ struct cl_sta *cl_sta,
+ u8 tid);
+void cl_baw_pending_to_single(struct cl_hw *cl_hw,
+ struct cl_sta *cl_sta,
+ struct cl_baw *baw);
+void cl_baw_pending_purge(struct cl_baw *baw);
+
+void cl_bcmc_cfm_init(struct cl_hw *cl_hw);
+void cl_bcmc_cfm_add(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr);
+struct cl_sw_txhdr *cl_bcmc_cfm_find(struct cl_hw *cl_hw, dma_addr_t dma_addr,
+ bool keep_in_list);
+void cl_bcmc_cfm_flush_queue(struct cl_hw *cl_hw, struct cl_vif *cl_vif);
+void cl_bcmc_cfm_poll_empty_per_vif(struct cl_hw *cl_hw,
+ struct cl_vif *cl_vif);
+
+struct cl_single_cfm_queue {
+ struct list_head head;
+};
+
+void cl_single_cfm_init(struct cl_hw *cl_hw);
+void cl_single_cfm_add(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr, u32 queue_idx);
+struct cl_sw_txhdr *cl_single_cfm_find(struct cl_hw *cl_hw, u32 queue_idx,
+ dma_addr_t dma_addr);
+void cl_single_cfm_flush_all(struct cl_hw *cl_hw);
+void cl_single_cfm_flush_sta(struct cl_hw *cl_hw, u8 sta_idx);
+void cl_single_cfm_poll_empty(struct cl_hw *cl_hw, u32 queue_idx);
+void cl_single_cfm_poll_empty_sta(struct cl_hw *cl_hw, u8 sta_idx);
+void cl_single_cfm_clear_tim_bit_sta(struct cl_hw *cl_hw, u8 sta_idx);
+
+int cl_sw_txhdr_init(struct cl_hw *cl_hw);
+void cl_sw_txhdr_deinit(struct cl_hw *cl_hw);
+struct cl_sw_txhdr *cl_sw_txhdr_alloc(struct cl_hw *cl_hw);
+void cl_sw_txhdr_free(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr);
+void cl_tx_amsdu_anchor_init(struct cl_amsdu_ctrl *amsdu_anchor);
+void cl_tx_amsdu_anchor_reset(struct cl_amsdu_ctrl *amsdu_anchor);
+void cl_tx_amsdu_set_max_len(struct cl_hw *cl_hw, struct cl_sta *cl_sta, u8 tid);
+void cl_tx_amsdu_first_sub_frame(struct cl_sw_txhdr *sw_txhdr, struct cl_sta *cl_sta,
+ struct sk_buff *skb, u8 tid);
+void cl_tx_amsdu_flush_sub_frames(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr);
+void cl_tx_amsdu_transfer_single(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr);
+int cl_tx_amsdu_set(struct cl_hw *cl_hw, struct cl_sta *cl_sta, struct sk_buff *skb, u8 tid);
+void cl_tx_amsdu_unset(struct cl_sw_txhdr *sw_txhdr);
+
+int cl_tx_amsdu_txhdr_init(struct cl_hw *cl_hw);
+void cl_tx_amsdu_txhdr_deinit(struct cl_hw *cl_hw);
+void cl_tx_amsdu_txhdr_free(struct cl_hw *cl_hw, struct cl_amsdu_txhdr *amsdu_txhdr);
+
+void cl_txq_init(struct cl_hw *cl_hw);
+void cl_txq_stop(struct cl_hw *cl_hw);
+struct cl_tx_queue *cl_txq_get(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr);
+void cl_txq_push(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr);
+void cl_txq_sched(struct cl_hw *cl_hw, struct cl_tx_queue *tx_queue);
+void cl_txq_agg_alloc(struct cl_hw *cl_hw, struct cl_sta *cl_sta,
+ struct mm_ba_add_cfm *ba_add_cfm, u16 buf_size);
+void cl_txq_agg_free(struct cl_hw *cl_hw, struct cl_tx_queue *tx_queue,
+ struct cl_sta *cl_sta, u8 tid);
+void cl_txq_agg_stop(struct cl_sta *cl_sta, u8 tid);
+void cl_txq_sta_add(struct cl_hw *cl_hw, struct cl_sta *cl_sta);
+void cl_txq_sta_remove(struct cl_hw *cl_hw, u8 sta_idx);
+void cl_txq_transfer_agg_to_single(struct cl_hw *cl_hw, struct cl_tx_queue *agg_queue);
+void cl_txq_flush_agg(struct cl_hw *cl_hw, struct cl_tx_queue *tx_queue, bool lock);
+void cl_txq_flush_all_agg(struct cl_hw *cl_hw);
+void cl_txq_flush_all_single(struct cl_hw *cl_hw);
+void cl_txq_flush_sta(struct cl_hw *cl_hw, struct cl_sta *cl_sta);
+void cl_txq_agg_request_add(struct cl_hw *cl_hw, u8 sta_idx, u8 tid);
+void cl_txq_agg_request_del(struct cl_hw *cl_hw, u8 sta_idx, u8 tid);
+bool cl_txq_is_agg_available(struct cl_hw *cl_hw);
+bool cl_txq_is_fw_empty(struct cl_tx_queue *tx_queue);
+bool cl_txq_is_fw_full(struct cl_tx_queue *tx_queue);
+u16 cl_txq_desc_in_fw(struct cl_tx_queue *tx_queue);
+bool cl_txq_frames_pending(struct cl_hw *cl_hw);
+
+#endif /* CL_TX_H */
--
2.36.1