This patchset introduces the Ethernet Switch Driver for Freescale/NXP SoCs
with DPAA2 (DataPath Acceleration Architecture v2). The driver manages
switch objects discovered on the fsl-mc bus. A description of the driver
can be found in the associated README file.
The patchset consists of:
* A set of libraries containing APIs for configuring and controlling
Management Complex (MC) switch objects
* The DPAA2 Ethernet Switch driver
* Patch adding ethtool support
Limitations:
* no support for control traffic to/from CPU
* only DPSW ports can be added to a bridge
Resending in order to add the netdev list (and remove the arm one), based
on Andrew Lunn's suggestion.
Razvan Stefanescu (6):
staging: fsl-dpaa2/ethsw: Add APIs for DPSW object
staging: fsl-dpaa2/ethsw: Add Freescale DPAA2 Ethernet Switch driver
staging: fsl-dpaa2/ethsw: Add ethtool support
staging: fsl-dpaa2/ethsw: Add maintainer for Ethernet Switch driver
staging: fsl-dpaa2/ethsw: Add README
staging: fsl-dpaa2/ethsw: Add TODO
MAINTAINERS | 6 +
drivers/staging/fsl-dpaa2/Kconfig | 8 +
drivers/staging/fsl-dpaa2/Makefile | 1 +
drivers/staging/fsl-dpaa2/ethsw/Makefile | 7 +
drivers/staging/fsl-dpaa2/ethsw/README | 106 ++
drivers/staging/fsl-dpaa2/ethsw/TODO | 14 +
drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 371 ++++++
drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 1147 +++++++++++++++++
drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 611 +++++++++
drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c | 207 +++
drivers/staging/fsl-dpaa2/ethsw/ethsw.c | 1526 +++++++++++++++++++++++
drivers/staging/fsl-dpaa2/ethsw/ethsw.h | 91 ++
12 files changed, 4095 insertions(+)
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/README
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/TODO
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.c
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.h
--
1.9.1
Add the command build/parse APIs for operating on DPSW objects through
the DPAA2 Management Complex.
Signed-off-by: Razvan Stefanescu <[email protected]>
---
drivers/staging/fsl-dpaa2/Kconfig | 8 +
drivers/staging/fsl-dpaa2/Makefile | 1 +
drivers/staging/fsl-dpaa2/ethsw/Makefile | 7 +
drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 358 +++++++++
drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 1115 ++++++++++++++++++++++++++++
drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 579 +++++++++++++++
6 files changed, 2068 insertions(+)
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/Makefile
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.c
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/dpsw.h
diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig
index dfff675..8a508ef 100644
--- a/drivers/staging/fsl-dpaa2/Kconfig
+++ b/drivers/staging/fsl-dpaa2/Kconfig
@@ -16,3 +16,11 @@ config FSL_DPAA2_ETH
---help---
Ethernet driver for Freescale DPAA2 SoCs, using the
Freescale MC bus driver
+
+config FSL_DPAA2_ETHSW
+ tristate "Freescale DPAA2 Ethernet Switch"
+ depends on FSL_DPAA2
+ depends on NET_SWITCHDEV
+ ---help---
+ Driver for Freescale DPAA2 Ethernet Switch. Select
+ BRIDGE to have support for bridge tools.
diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile
index 0836ba8..6cfd76b 100644
--- a/drivers/staging/fsl-dpaa2/Makefile
+++ b/drivers/staging/fsl-dpaa2/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/
+obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/
diff --git a/drivers/staging/fsl-dpaa2/ethsw/Makefile b/drivers/staging/fsl-dpaa2/ethsw/Makefile
new file mode 100644
index 0000000..db137f7
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Freescale DPAA2 Ethernet Switch
+#
+
+obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
+
+dpaa2-ethsw-objs := dpsw.o
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
new file mode 100644
index 0000000..ddfd820
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
@@ -0,0 +1,358 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPSW_CMD_H
+#define __FSL_DPSW_CMD_H
+
+/* DPSW Version */
+#define DPSW_VER_MAJOR 8
+#define DPSW_VER_MINOR 0
+
+#define DPSW_CMD_BASE_VERSION 1
+#define DPSW_CMD_ID_OFFSET 4
+
+#define DPSW_CMD_ID(id) (((id) << DPSW_CMD_ID_OFFSET) | DPSW_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPSW_CMDID_CLOSE DPSW_CMD_ID(0x800)
+#define DPSW_CMDID_OPEN DPSW_CMD_ID(0x802)
+
+#define DPSW_CMDID_GET_API_VERSION DPSW_CMD_ID(0xa02)
+
+#define DPSW_CMDID_ENABLE DPSW_CMD_ID(0x002)
+#define DPSW_CMDID_DISABLE DPSW_CMD_ID(0x003)
+#define DPSW_CMDID_GET_ATTR DPSW_CMD_ID(0x004)
+#define DPSW_CMDID_RESET DPSW_CMD_ID(0x005)
+
+#define DPSW_CMDID_SET_IRQ_ENABLE DPSW_CMD_ID(0x012)
+
+#define DPSW_CMDID_SET_IRQ_MASK DPSW_CMD_ID(0x014)
+
+#define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016)
+#define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017)
+
+#define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030)
+#define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031)
+
+#define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_ID(0x034)
+
+#define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D)
+#define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E)
+
+#define DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH DPSW_CMD_ID(0x044)
+
+#define DPSW_CMDID_IF_GET_LINK_STATE DPSW_CMD_ID(0x046)
+#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
+#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
+
+#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
+#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061)
+#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
+
+#define DPSW_CMDID_VLAN_REMOVE_IF DPSW_CMD_ID(0x064)
+#define DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED DPSW_CMD_ID(0x065)
+#define DPSW_CMDID_VLAN_REMOVE_IF_FLOODING DPSW_CMD_ID(0x066)
+#define DPSW_CMDID_VLAN_REMOVE DPSW_CMD_ID(0x067)
+
+#define DPSW_CMDID_FDB_ADD_UNICAST DPSW_CMD_ID(0x084)
+#define DPSW_CMDID_FDB_REMOVE_UNICAST DPSW_CMD_ID(0x085)
+#define DPSW_CMDID_FDB_ADD_MULTICAST DPSW_CMD_ID(0x086)
+#define DPSW_CMDID_FDB_REMOVE_MULTICAST DPSW_CMD_ID(0x087)
+#define DPSW_CMDID_FDB_SET_LEARNING_MODE DPSW_CMD_ID(0x088)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPSW_MASK(field) \
+ GENMASK(DPSW_##field##_SHIFT + DPSW_##field##_SIZE - 1, \
+ DPSW_##field##_SHIFT)
+#define dpsw_set_field(var, field, val) \
+ ((var) |= (((val) << DPSW_##field##_SHIFT) & DPSW_MASK(field)))
+#define dpsw_get_field(var, field) \
+ (((var) & DPSW_MASK(field)) >> DPSW_##field##_SHIFT)
+#define dpsw_get_bit(var, bit) \
+ (((var) >> (bit)) & GENMASK(0, 0))
+
+struct dpsw_cmd_open {
+ __le32 dpsw_id;
+};
+
+#define DPSW_COMPONENT_TYPE_SHIFT 0
+#define DPSW_COMPONENT_TYPE_SIZE 4
+
+struct dpsw_cmd_create {
+ /* cmd word 0 */
+ __le16 num_ifs;
+ u8 max_fdbs;
+ u8 max_meters_per_if;
+ /* from LSB: only the first 4 bits */
+ u8 component_type;
+ u8 pad[3];
+ /* cmd word 1 */
+ __le16 max_vlans;
+ __le16 max_fdb_entries;
+ __le16 fdb_aging_time;
+ __le16 max_fdb_mc_groups;
+ /* cmd word 2 */
+ __le64 options;
+};
+
+struct dpsw_cmd_destroy {
+ __le32 dpsw_id;
+};
+
+#define DPSW_ENABLE_SHIFT 0
+#define DPSW_ENABLE_SIZE 1
+
+struct dpsw_rsp_is_enabled {
+ /* from LSB: enable:1 */
+ u8 enabled;
+};
+
+struct dpsw_cmd_set_irq_enable {
+ u8 enable_state;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dpsw_cmd_get_irq_enable {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpsw_rsp_get_irq_enable {
+ u8 enable_state;
+};
+
+struct dpsw_cmd_set_irq_mask {
+ __le32 mask;
+ u8 irq_index;
+};
+
+struct dpsw_cmd_get_irq_mask {
+ __le32 pad;
+ u8 irq_index;
+};
+
+struct dpsw_rsp_get_irq_mask {
+ __le32 mask;
+};
+
+struct dpsw_cmd_get_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+struct dpsw_rsp_get_irq_status {
+ __le32 status;
+};
+
+struct dpsw_cmd_clear_irq_status {
+ __le32 status;
+ u8 irq_index;
+};
+
+#define DPSW_COMPONENT_TYPE_SHIFT 0
+#define DPSW_COMPONENT_TYPE_SIZE 4
+
+struct dpsw_rsp_get_attr {
+ /* cmd word 0 */
+ __le16 num_ifs;
+ u8 max_fdbs;
+ u8 num_fdbs;
+ __le16 max_vlans;
+ __le16 num_vlans;
+ /* cmd word 1 */
+ __le16 max_fdb_entries;
+ __le16 fdb_aging_time;
+ __le32 dpsw_id;
+ /* cmd word 2 */
+ __le16 mem_size;
+ __le16 max_fdb_mc_groups;
+ u8 max_meters_per_if;
+ /* from LSB only the first 4 bits */
+ u8 component_type;
+ __le16 pad;
+ /* cmd word 3 */
+ __le64 options;
+};
+
+struct dpsw_cmd_if_set_flooding {
+ __le16 if_id;
+ /* from LSB: enable:1 */
+ u8 enable;
+};
+
+struct dpsw_cmd_if_set_broadcast {
+ __le16 if_id;
+ /* from LSB: enable:1 */
+ u8 enable;
+};
+
+#define DPSW_VLAN_ID_SHIFT 0
+#define DPSW_VLAN_ID_SIZE 12
+#define DPSW_DEI_SHIFT 12
+#define DPSW_DEI_SIZE 1
+#define DPSW_PCP_SHIFT 13
+#define DPSW_PCP_SIZE 3
+
+struct dpsw_cmd_if_set_tci {
+ __le16 if_id;
+ /* from LSB: VLAN_ID:12 DEI:1 PCP:3 */
+ __le16 conf;
+};
+
+#define DPSW_STATE_SHIFT 0
+#define DPSW_STATE_SIZE 4
+
+struct dpsw_cmd_if_set_stp {
+ __le16 if_id;
+ __le16 vlan_id;
+ /* only the first LSB 4 bits */
+ u8 state;
+};
+
+#define DPSW_COUNTER_TYPE_SHIFT 0
+#define DPSW_COUNTER_TYPE_SIZE 5
+
+struct dpsw_cmd_if_get_counter {
+ __le16 if_id;
+ /* from LSB: type:5 */
+ u8 type;
+};
+
+struct dpsw_rsp_if_get_counter {
+ __le64 pad;
+ __le64 counter;
+};
+
+struct dpsw_cmd_if {
+ __le16 if_id;
+};
+
+struct dpsw_cmd_if_set_max_frame_length {
+ __le16 if_id;
+ __le16 frame_length;
+};
+
+struct dpsw_cmd_if_get_link_state {
+ __le16 if_id;
+};
+
+#define DPSW_UP_SHIFT 0
+#define DPSW_UP_SIZE 1
+
+struct dpsw_rsp_if_get_link_state {
+ /* cmd word 0 */
+ __le32 pad0;
+ u8 up;
+ u8 pad1[3];
+ /* cmd word 1 */
+ __le32 rate;
+ __le32 pad2;
+ /* cmd word 2 */
+ __le64 options;
+};
+
+struct dpsw_vlan_add {
+ __le16 fdb_id;
+ __le16 vlan_id;
+};
+
+struct dpsw_cmd_vlan_manage_if {
+ /* cmd word 0 */
+ __le16 pad0;
+ __le16 vlan_id;
+ __le32 pad1;
+ /* cmd word 1-4 */
+ __le64 if_id[4];
+};
+
+struct dpsw_cmd_vlan_remove {
+ __le16 pad;
+ __le16 vlan_id;
+};
+
+struct dpsw_cmd_fdb_add {
+ __le32 pad;
+ __le16 fdb_aging_time;
+ __le16 num_fdb_entries;
+};
+
+struct dpsw_rsp_fdb_add {
+ __le16 fdb_id;
+};
+
+struct dpsw_cmd_fdb_remove {
+ __le16 fdb_id;
+};
+
+#define DPSW_ENTRY_TYPE_SHIFT 0
+#define DPSW_ENTRY_TYPE_SIZE 4
+
+struct dpsw_cmd_fdb_unicast_op {
+ /* cmd word 0 */
+ __le16 fdb_id;
+ u8 mac_addr[6];
+ /* cmd word 1 */
+ __le16 if_egress;
+ /* only the first 4 bits from LSB */
+ u8 type;
+};
+
+struct dpsw_cmd_fdb_multicast_op {
+ /* cmd word 0 */
+ __le16 fdb_id;
+ __le16 num_ifs;
+ /* only the first 4 bits from LSB */
+ u8 type;
+ u8 pad[3];
+ /* cmd word 1 */
+ u8 mac_addr[6];
+ __le16 pad2;
+ /* cmd word 2-5 */
+ __le64 if_id[4];
+};
+
+#define DPSW_LEARNING_MODE_SHIFT 0
+#define DPSW_LEARNING_MODE_SIZE 4
+
+struct dpsw_cmd_fdb_set_learning_mode {
+ __le16 fdb_id;
+ /* only the first 4 bits from LSB */
+ u8 mode;
+};
+
+struct dpsw_rsp_get_api_version {
+ __le16 version_major;
+ __le16 version_minor;
+};
+
+#endif /* __FSL_DPSW_CMD_H */
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
new file mode 100644
index 0000000..f36b92b
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
@@ -0,0 +1,1115 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "../../fsl-mc/include/mc.h"
+#include "dpsw.h"
+#include "dpsw-cmd.h"
+
+static void build_if_id_bitmap(__le64 *bmap,
+ const u16 *id,
+ const u16 num_ifs)
+{
+ int i;
+
+ for (i = 0; (i < num_ifs) && (i < DPSW_MAX_IF); i++) {
+ if (id[i] < DPSW_MAX_IF)
+ bmap[id[i] / 64] |= BIT_MASK(id[i] % 64);
+ }
+}
+
+/**
+ * dpsw_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpsw_id: DPSW unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpsw_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpsw_id,
+ u16 *token)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpsw_cmd_open *)cmd.params;
+ cmd_params->dpsw_id = cpu_to_le32(dpsw_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpsw_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_enable() - Enable DPSW functionality
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_disable() - Disable DPSW functionality
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_reset() - Reset the DPSW, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_set_irq_enable *)cmd.params;
+ dpsw_set_field(cmd_params->enable_state, ENABLE, en);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @irq_index: The interrupt index to configure
+ * @mask: Event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_get_irq_status() - Get the current status of any pending interrupts
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_get_irq_status *cmd_params;
+ struct dpsw_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dpsw_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPCI object
+ * @irq_index: The interrupt index to configure
+ * @status: bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_clear_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_get_attributes() - Retrieve DPSW attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @attr: Returned DPSW attributes
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpsw_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_rsp_get_attr *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_get_attr *)cmd.params;
+ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
+ attr->max_fdbs = rsp_params->max_fdbs;
+ attr->num_fdbs = rsp_params->num_fdbs;
+ attr->max_vlans = le16_to_cpu(rsp_params->max_vlans);
+ attr->num_vlans = le16_to_cpu(rsp_params->num_vlans);
+ attr->max_fdb_entries = le16_to_cpu(rsp_params->max_fdb_entries);
+ attr->fdb_aging_time = le16_to_cpu(rsp_params->fdb_aging_time);
+ attr->id = le32_to_cpu(rsp_params->dpsw_id);
+ attr->mem_size = le16_to_cpu(rsp_params->mem_size);
+ attr->max_fdb_mc_groups = le16_to_cpu(rsp_params->max_fdb_mc_groups);
+ attr->max_meters_per_if = rsp_params->max_meters_per_if;
+ attr->options = le64_to_cpu(rsp_params->options);
+ attr->component_type = dpsw_get_field(rsp_params->component_type,
+ COMPONENT_TYPE);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_get_link_state - Return the link state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface id
+ * @state: Link state 1 - linkup, 0 - link down or disconnected
+ *
+ * @Return '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ struct dpsw_link_state *state)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_if_get_link_state *cmd_params;
+ struct dpsw_rsp_if_get_link_state *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_LINK_STATE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_get_link_state *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_if_get_link_state *)cmd.params;
+ state->rate = le32_to_cpu(rsp_params->rate);
+ state->options = le64_to_cpu(rsp_params->options);
+ state->up = dpsw_get_field(rsp_params->up, UP);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_set_flooding() - Enable Disable flooding for particular interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @en: 1 - enable, 0 - disable
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ int en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_flooding *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_FLOODING,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_flooding *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ dpsw_set_field(cmd_params->enable, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_set_broadcast() - Enable/disable broadcast for particular interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @en: 1 - enable, 0 - disable
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ int en)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_broadcast *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_BROADCAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_broadcast *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ dpsw_set_field(cmd_params->enable, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_set_tci() - Set default VLAN Tag Control Information (TCI)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: Tag Control Information Configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ const struct dpsw_tci_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_tci *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_TCI,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_tci *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ dpsw_set_field(cmd_params->conf, VLAN_ID, cfg->vlan_id);
+ dpsw_set_field(cmd_params->conf, DEI, cfg->dei);
+ dpsw_set_field(cmd_params->conf, PCP, cfg->pcp);
+ cmd_params->conf = cpu_to_le16(cmd_params->conf);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_set_stp() - Function sets Spanning Tree Protocol (STP) state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @cfg: STP State configuration parameters
+ *
+ * The following STP states are supported -
+ * blocking, listening, learning, forwarding and disabled.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ const struct dpsw_stp_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_stp *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_STP,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_stp *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id);
+ dpsw_set_field(cmd_params->state, STATE, cfg->state);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_get_counter() - Get specific counter of particular interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @type: Counter type
+ * @counter: return value
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ enum dpsw_counter type,
+ u64 *counter)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_if_get_counter *cmd_params;
+ struct dpsw_rsp_if_get_counter *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_GET_COUNTER,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_get_counter *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ dpsw_set_field(cmd_params->type, COUNTER_TYPE, type);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpsw_rsp_if_get_counter *)cmd.params;
+ *counter = le64_to_cpu(rsp_params->counter);
+
+ return 0;
+}
+
+/**
+ * dpsw_if_enable() - Enable Interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_disable() - Disable Interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_DISABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_if_set_max_frame_length() - Set Maximum Receive frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface Identifier
+ * @frame_length: Maximum Frame Length
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ u16 frame_length)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_max_frame_length *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_max_frame_length *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->frame_length = cpu_to_le16(frame_length);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_add() - Adding new VLAN to DPSW.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: VLAN configuration
+ *
+ * Only VLAN ID and FDB ID are required parameters here.
+ * 12 bit VLAN ID is defined in IEEE802.1Q.
+ * Adding a duplicate VLAN ID is not allowed.
+ * FDB ID can be shared across multiple VLANs. Shared learning
+ * is obtained by calling dpsw_vlan_add for multiple VLAN IDs
+ * with same fdb_id
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_add(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id,
+ const struct dpsw_vlan_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_vlan_add *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_vlan_add *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(cfg->fdb_id);
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_add_if() - Adding a set of interfaces to an existing VLAN.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces to add
+ *
+ * It adds only interfaces not belonging to this VLAN yet,
+ * otherwise an error is generated and an entire command is
+ * ignored. This function can be called numerous times always
+ * providing required interfaces delta.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_add_if_untagged() - Defining a set of interfaces that should be
+ * transmitted as untagged.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces that should be transmitted as untagged
+ *
+ * These interfaces should already belong to this VLAN.
+ * By default all interfaces are transmitted as tagged.
+ * Providing un-existing interface or untagged interface that is
+ * configured untagged already generates an error and the entire
+ * command is ignored.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_ADD_IF_UNTAGGED,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_remove_if() - Remove interfaces from an existing VLAN.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces that should be removed
+ *
+ * Interfaces must belong to this VLAN, otherwise an error
+ * is returned and an the command is ignored
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_remove_if_untagged() - Define a set of interfaces that should be
+ * converted from transmitted as untagged to transmit as tagged.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ * @cfg: Set of interfaces that should be removed
+ *
+ * Interfaces provided by API have to belong to this VLAN and
+ * configured untagged, otherwise an error is returned and the
+ * command is ignored
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_manage_if *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE_IF_UNTAGGED,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_manage_if *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_vlan_remove() - Remove an entire VLAN
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @vlan_id: VLAN Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_vlan_remove *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_VLAN_REMOVE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_vlan_remove *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_add_unicast() - Function adds an unicast entry into MAC lookup table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Unicast entry configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 fdb_id,
+ const struct dpsw_fdb_unicast_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_unicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_UNICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_remove_unicast() - removes an entry from MAC lookup table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Unicast entry configuration
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 fdb_id,
+ const struct dpsw_fdb_unicast_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_unicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_UNICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_unicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+ cmd_params->if_egress = cpu_to_le16(cfg->if_egress);
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_add_multicast() - Add a set of egress interfaces to multi-cast group
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Multicast entry configuration
+ *
+ * If group doesn't exist, it will be created.
+ * It adds only interfaces not belonging to this multicast group
+ * yet, otherwise error will be generated and the command is
+ * ignored.
+ * This function may be called numerous times always providing
+ * required interfaces delta.
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 fdb_id,
+ const struct dpsw_fdb_multicast_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_multicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_ADD_MULTICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_remove_multicast() - Removing interfaces from an existing multicast
+ * group.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @cfg: Multicast entry configuration
+ *
+ * Interfaces provided by this API have to exist in the group,
+ * otherwise an error will be returned and an entire command
+ * ignored. If there is no interface left in the group,
+ * an entire group is deleted
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 fdb_id,
+ const struct dpsw_fdb_multicast_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_multicast_op *cmd_params;
+ int i;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_REMOVE_MULTICAST,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_multicast_op *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ dpsw_set_field(cmd_params->type, ENTRY_TYPE, cfg->type);
+ build_if_id_bitmap(cmd_params->if_id, cfg->if_id, cfg->num_ifs);
+ for (i = 0; i < 6; i++)
+ cmd_params->mac_addr[i] = cfg->mac_addr[5 - i];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_fdb_set_learning_mode() - Define FDB learning mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @fdb_id: Forwarding Database Identifier
+ * @mode: Learning mode
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 fdb_id,
+ enum dpsw_fdb_learning_mode mode)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_fdb_set_learning_mode *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_FDB_SET_LEARNING_MODE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_fdb_set_learning_mode *)cmd.params;
+ cmd_params->fdb_id = cpu_to_le16(fdb_id);
+ dpsw_set_field(cmd_params->mode, LEARNING_MODE, mode);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpsw_get_api_version() - Get Data Path Switch API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path switch API
+ * @minor_ver: Minor version of data path switch API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_rsp_get_api_version *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpsw_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->version_major);
+ *minor_ver = le16_to_cpu(rsp_params->version_minor);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
new file mode 100644
index 0000000..e9c2906
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
@@ -0,0 +1,579 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPSW_H
+#define __FSL_DPSW_H
+
+/* Data Path L2-Switch API
+ * Contains API for handling DPSW topology and functionality
+ */
+
+struct fsl_mc_io;
+
+/**
+ * DPSW general definitions
+ */
+
+/**
+ * Maximum number of traffic class priorities
+ */
+#define DPSW_MAX_PRIORITIES 8
+/**
+ * Maximum number of interfaces
+ */
+#define DPSW_MAX_IF 64
+
+int dpsw_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpsw_id,
+ u16 *token);
+
+int dpsw_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * DPSW options
+ */
+
+/**
+ * Disable flooding
+ */
+#define DPSW_OPT_FLOODING_DIS 0x0000000000000001ULL
+/**
+ * Disable Multicast
+ */
+#define DPSW_OPT_MULTICAST_DIS 0x0000000000000004ULL
+/**
+ * Support control interface
+ */
+#define DPSW_OPT_CTRL_IF_DIS 0x0000000000000010ULL
+/**
+ * Disable flooding metering
+ */
+#define DPSW_OPT_FLOODING_METERING_DIS 0x0000000000000020ULL
+/**
+ * Enable metering
+ */
+#define DPSW_OPT_METERING_EN 0x0000000000000040ULL
+
+/**
+ * enum dpsw_component_type - component type of a bridge
+ * @DPSW_COMPONENT_TYPE_C_VLAN: A C-VLAN component of an
+ * enterprise VLAN bridge or of a Provider Bridge used
+ * to process C-tagged frames
+ * @DPSW_COMPONENT_TYPE_S_VLAN: An S-VLAN component of a
+ * Provider Bridge
+ *
+ */
+enum dpsw_component_type {
+ DPSW_COMPONENT_TYPE_C_VLAN = 0,
+ DPSW_COMPONENT_TYPE_S_VLAN
+};
+
+/**
+ * struct dpsw_cfg - DPSW configuration
+ * @num_ifs: Number of external and internal interfaces
+ * @adv: Advanced parameters; default is all zeros;
+ * use this structure to change default settings
+ */
+struct dpsw_cfg {
+ u16 num_ifs;
+ /**
+ * struct adv - Advanced parameters
+ * @options: Enable/Disable DPSW features (bitmap)
+ * @max_vlans: Maximum Number of VLAN's; 0 - indicates default 16
+ * @max_meters_per_if: Number of meters per interface
+ * @max_fdbs: Maximum Number of FDB's; 0 - indicates default 16
+ * @max_fdb_entries: Number of FDB entries for default FDB table;
+ * 0 - indicates default 1024 entries.
+ * @fdb_aging_time: Default FDB aging time for default FDB table;
+ * 0 - indicates default 300 seconds
+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
+ * 0 - indicates default 32
+ * @component_type: Indicates the component type of this bridge
+ */
+ struct {
+ u64 options;
+ u16 max_vlans;
+ u8 max_meters_per_if;
+ u8 max_fdbs;
+ u16 max_fdb_entries;
+ u16 fdb_aging_time;
+ u16 max_fdb_mc_groups;
+ enum dpsw_component_type component_type;
+ } adv;
+};
+
+int dpsw_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpsw_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpsw_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * DPSW IRQ Index and Events
+ */
+
+#define DPSW_IRQ_INDEX_IF 0x0000
+#define DPSW_IRQ_INDEX_L2SW 0x0001
+
+/**
+ * IRQ event - Indicates that the link state changed
+ */
+#define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001
+
+/**
+ * struct dpsw_irq_cfg - IRQ configuration
+ * @addr: Address that must be written to signal a message-based interrupt
+ * @val: Value to write into irq_addr address
+ * @irq_num: A user defined number associated with this IRQ
+ */
+struct dpsw_irq_cfg {
+ u64 addr;
+ u32 val;
+ int irq_num;
+};
+
+int dpsw_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dpsw_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dpsw_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dpsw_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
+
+/**
+ * struct dpsw_attr - Structure representing DPSW attributes
+ * @id: DPSW object ID
+ * @options: Enable/Disable DPSW features
+ * @max_vlans: Maximum Number of VLANs
+ * @max_meters_per_if: Number of meters per interface
+ * @max_fdbs: Maximum Number of FDBs
+ * @max_fdb_entries: Number of FDB entries for default FDB table;
+ * 0 - indicates default 1024 entries.
+ * @fdb_aging_time: Default FDB aging time for default FDB table;
+ * 0 - indicates default 300 seconds
+ * @max_fdb_mc_groups: Number of multicast groups in each FDB table;
+ * 0 - indicates default 32
+ * @mem_size: DPSW frame storage memory size
+ * @num_ifs: Number of interfaces
+ * @num_vlans: Current number of VLANs
+ * @num_fdbs: Current number of FDBs
+ * @component_type: Component type of this bridge
+ */
+struct dpsw_attr {
+ int id;
+ u64 options;
+ u16 max_vlans;
+ u8 max_meters_per_if;
+ u8 max_fdbs;
+ u16 max_fdb_entries;
+ u16 fdb_aging_time;
+ u16 max_fdb_mc_groups;
+ u16 num_ifs;
+ u16 mem_size;
+ u16 num_vlans;
+ u8 num_fdbs;
+ enum dpsw_component_type component_type;
+};
+
+int dpsw_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpsw_attr *attr);
+
+/**
+ * enum dpsw_action - Action selection for special/control frames
+ * @DPSW_ACTION_DROP: Drop frame
+ * @DPSW_ACTION_REDIRECT: Redirect frame to control port
+ */
+enum dpsw_action {
+ DPSW_ACTION_DROP = 0,
+ DPSW_ACTION_REDIRECT = 1
+};
+
+/**
+ * struct dpsw_link_state - Structure representing DPSW link state
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
+ * @up: 0 - covers two cases: down and disconnected, 1 - up
+ */
+struct dpsw_link_state {
+ u32 rate;
+ u64 options;
+ u8 up;
+};
+
+int dpsw_if_get_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ struct dpsw_link_state *state);
+
+int dpsw_if_set_flooding(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ int en);
+
+int dpsw_if_set_broadcast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ int en);
+
+/**
+ * struct dpsw_tci_cfg - Tag Control Information (TCI) configuration
+ * @pcp: Priority Code Point (PCP): a 3-bit field which refers
+ * to the IEEE 802.1p priority
+ * @dei: Drop Eligible Indicator (DEI): a 1-bit field. May be used
+ * separately or in conjunction with PCP to indicate frames
+ * eligible to be dropped in the presence of congestion
+ * @vlan_id: VLAN Identifier (VID): a 12-bit field specifying the VLAN
+ * to which the frame belongs. The hexadecimal values
+ * of 0x000 and 0xFFF are reserved;
+ * all other values may be used as VLAN identifiers,
+ * allowing up to 4,094 VLANs
+ */
+struct dpsw_tci_cfg {
+ u8 pcp;
+ u8 dei;
+ u16 vlan_id;
+};
+
+int dpsw_if_set_tci(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ const struct dpsw_tci_cfg *cfg);
+
+/**
+ * enum dpsw_stp_state - Spanning Tree Protocol (STP) states
+ * @DPSW_STP_STATE_BLOCKING: Blocking state
+ * @DPSW_STP_STATE_LISTENING: Listening state
+ * @DPSW_STP_STATE_LEARNING: Learning state
+ * @DPSW_STP_STATE_FORWARDING: Forwarding state
+ *
+ */
+enum dpsw_stp_state {
+ DPSW_STP_STATE_DISABLED = 0,
+ DPSW_STP_STATE_LISTENING = 1,
+ DPSW_STP_STATE_LEARNING = 2,
+ DPSW_STP_STATE_FORWARDING = 3,
+ DPSW_STP_STATE_BLOCKING = 0
+};
+
+/**
+ * struct dpsw_stp_cfg - Spanning Tree Protocol (STP) Configuration
+ * @vlan_id: VLAN ID STP state
+ * @state: STP state
+ */
+struct dpsw_stp_cfg {
+ u16 vlan_id;
+ enum dpsw_stp_state state;
+};
+
+int dpsw_if_set_stp(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ const struct dpsw_stp_cfg *cfg);
+
+/**
+ * enum dpsw_accepted_frames - Types of frames to accept
+ * @DPSW_ADMIT_ALL: The device accepts VLAN tagged, untagged and
+ * priority tagged frames
+ * @DPSW_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
+ * Priority-Tagged frames received on this interface.
+ *
+ */
+enum dpsw_accepted_frames {
+ DPSW_ADMIT_ALL = 1,
+ DPSW_ADMIT_ONLY_VLAN_TAGGED = 3
+};
+
+/**
+ * enum dpsw_counter - Counters types
+ * @DPSW_CNT_ING_FRAME: Counts ingress frames
+ * @DPSW_CNT_ING_BYTE: Counts ingress bytes
+ * @DPSW_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
+ * @DPSW_CNT_ING_FRAME_DISCARD: Counts discarded ingress frame
+ * @DPSW_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
+ * @DPSW_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
+ * @DPSW_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
+ * @DPSW_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
+ * @DPSW_CNT_EGR_FRAME: Counts egress frames
+ * @DPSW_CNT_EGR_BYTE: Counts eEgress bytes
+ * @DPSW_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
+ * @DPSW_CNT_EGR_STP_FRAME_DISCARD: Counts egress STP discarded frames
+ */
+enum dpsw_counter {
+ DPSW_CNT_ING_FRAME = 0x0,
+ DPSW_CNT_ING_BYTE = 0x1,
+ DPSW_CNT_ING_FLTR_FRAME = 0x2,
+ DPSW_CNT_ING_FRAME_DISCARD = 0x3,
+ DPSW_CNT_ING_MCAST_FRAME = 0x4,
+ DPSW_CNT_ING_MCAST_BYTE = 0x5,
+ DPSW_CNT_ING_BCAST_FRAME = 0x6,
+ DPSW_CNT_ING_BCAST_BYTES = 0x7,
+ DPSW_CNT_EGR_FRAME = 0x8,
+ DPSW_CNT_EGR_BYTE = 0x9,
+ DPSW_CNT_EGR_FRAME_DISCARD = 0xa,
+ DPSW_CNT_EGR_STP_FRAME_DISCARD = 0xb
+};
+
+int dpsw_if_get_counter(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ enum dpsw_counter type,
+ u64 *counter);
+
+int dpsw_if_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id);
+
+int dpsw_if_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id);
+
+int dpsw_if_set_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ u16 frame_length);
+
+/**
+ * struct dpsw_vlan_cfg - VLAN Configuration
+ * @fdb_id: Forwarding Data Base
+ */
+struct dpsw_vlan_cfg {
+ u16 fdb_id;
+};
+
+int dpsw_vlan_add(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id,
+ const struct dpsw_vlan_cfg *cfg);
+
+/**
+ * struct dpsw_vlan_if_cfg - Set of VLAN Interfaces
+ * @num_ifs: The number of interfaces that are assigned to the egress
+ * list for this VLAN
+ * @if_id: The set of interfaces that are
+ * assigned to the egress list for this VLAN
+ */
+struct dpsw_vlan_if_cfg {
+ u16 num_ifs;
+ u16 if_id[DPSW_MAX_IF];
+};
+
+int dpsw_vlan_add_if(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_add_if_untagged(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_remove_if(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_remove_if_untagged(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id,
+ const struct dpsw_vlan_if_cfg *cfg);
+
+int dpsw_vlan_remove(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 vlan_id);
+
+/**
+ * enum dpsw_fdb_entry_type - FDB Entry type - Static/Dynamic
+ * @DPSW_FDB_ENTRY_STATIC: Static entry
+ * @DPSW_FDB_ENTRY_DINAMIC: Dynamic entry
+ */
+enum dpsw_fdb_entry_type {
+ DPSW_FDB_ENTRY_STATIC = 0,
+ DPSW_FDB_ENTRY_DINAMIC = 1
+};
+
+/**
+ * struct dpsw_fdb_unicast_cfg - Unicast entry configuration
+ * @type: Select static or dynamic entry
+ * @mac_addr: MAC address
+ * @if_egress: Egress interface ID
+ */
+struct dpsw_fdb_unicast_cfg {
+ enum dpsw_fdb_entry_type type;
+ u8 mac_addr[6];
+ u16 if_egress;
+};
+
+int dpsw_fdb_add_unicast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 fdb_id,
+ const struct dpsw_fdb_unicast_cfg *cfg);
+
+int dpsw_fdb_remove_unicast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 fdb_id,
+ const struct dpsw_fdb_unicast_cfg *cfg);
+
+/**
+ * struct dpsw_fdb_multicast_cfg - Multi-cast entry configuration
+ * @type: Select static or dynamic entry
+ * @mac_addr: MAC address
+ * @num_ifs: Number of external and internal interfaces
+ * @if_id: Egress interface IDs
+ */
+struct dpsw_fdb_multicast_cfg {
+ enum dpsw_fdb_entry_type type;
+ u8 mac_addr[6];
+ u16 num_ifs;
+ u16 if_id[DPSW_MAX_IF];
+};
+
+int dpsw_fdb_add_multicast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 fdb_id,
+ const struct dpsw_fdb_multicast_cfg *cfg);
+
+int dpsw_fdb_remove_multicast(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 fdb_id,
+ const struct dpsw_fdb_multicast_cfg *cfg);
+
+/**
+ * enum dpsw_fdb_learning_mode - Auto-learning modes
+ * @DPSW_FDB_LEARNING_MODE_DIS: Disable Auto-learning
+ * @DPSW_FDB_LEARNING_MODE_HW: Enable HW auto-Learning
+ * @DPSW_FDB_LEARNING_MODE_NON_SECURE: Enable None secure learning by CPU
+ * @DPSW_FDB_LEARNING_MODE_SECURE: Enable secure learning by CPU
+ *
+ * NONE - SECURE LEARNING
+ * SMAC found DMAC found CTLU Action
+ * v v Forward frame to
+ * 1. DMAC destination
+ * - v Forward frame to
+ * 1. DMAC destination
+ * 2. Control interface
+ * v - Forward frame to
+ * 1. Flooding list of interfaces
+ * - - Forward frame to
+ * 1. Flooding list of interfaces
+ * 2. Control interface
+ * SECURE LEARING
+ * SMAC found DMAC found CTLU Action
+ * v v Forward frame to
+ * 1. DMAC destination
+ * - v Forward frame to
+ * 1. Control interface
+ * v - Forward frame to
+ * 1. Flooding list of interfaces
+ * - - Forward frame to
+ * 1. Control interface
+ */
+enum dpsw_fdb_learning_mode {
+ DPSW_FDB_LEARNING_MODE_DIS = 0,
+ DPSW_FDB_LEARNING_MODE_HW = 1,
+ DPSW_FDB_LEARNING_MODE_NON_SECURE = 2,
+ DPSW_FDB_LEARNING_MODE_SECURE = 3
+};
+
+int dpsw_fdb_set_learning_mode(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 fdb_id,
+ enum dpsw_fdb_learning_mode mode);
+
+/**
+ * struct dpsw_fdb_attr - FDB Attributes
+ * @max_fdb_entries: Number of FDB entries
+ * @fdb_aging_time: Aging time in seconds
+ * @learning_mode: Learning mode
+ * @num_fdb_mc_groups: Current number of multicast groups
+ * @max_fdb_mc_groups: Maximum number of multicast groups
+ */
+struct dpsw_fdb_attr {
+ u16 max_fdb_entries;
+ u16 fdb_aging_time;
+ enum dpsw_fdb_learning_mode learning_mode;
+ u16 num_fdb_mc_groups;
+ u16 max_fdb_mc_groups;
+};
+
+int dpsw_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+#endif /* __FSL_DPSW_H */
--
1.9.1
Signed-off-by: Razvan Stefanescu <[email protected]>
---
MAINTAINERS | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/MAINTAINERS b/MAINTAINERS
index 2281af4..cfd4f74 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4297,6 +4297,12 @@ L: [email protected]
S: Maintained
F: drivers/staging/fsl-dpaa2/ethernet
+DPAA2 ETHERNET SWITCH DRIVER
+M: Razvan Stefanescu <[email protected]>
+L: [email protected]
+S: Maintained
+F: drivers/staging/fsl-dpaa2/ethsw
+
DPT_I2O SCSI RAID DRIVER
M: Adaptec OEM Raid Solutions <[email protected]>
L: [email protected]
--
1.9.1
Add a TODO file describing what needs to be added/changed before the driver
can be moved out of staging.
Signed-off-by: Razvan Stefanescu <[email protected]>
---
drivers/staging/fsl-dpaa2/ethsw/TODO | 14 ++++++++++++++
1 file changed, 14 insertions(+)
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/TODO
diff --git a/drivers/staging/fsl-dpaa2/ethsw/TODO b/drivers/staging/fsl-dpaa2/ethsw/TODO
new file mode 100644
index 0000000..d3f12c3
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethsw/TODO
@@ -0,0 +1,14 @@
+* Add I/O capabilities on switch port netdevices. This will allow control
+traffic to reach the CPU.
+* Add ACL to redirect control traffic to CPU.
+* Add support for displaying learned FDB entries
+* MC firmware uprev; the DPAA2 objects used by the Ethernet Switch driver
+need to be kept in sync with binary interface changes in MC
+* refine README file
+* cleanup
+
+NOTE: At least first three of the above are required before getting the
+DPAA2 Ethernet Switch driver out of staging. Another requirement is that
+the fsl-mc bus driver is moved to drivers/bus and dpio driver is moved to
+drivers/soc (this is required for I/O).
+
--
1.9.1
Add a README file describing the driver architecture, components and
interfaces.
Signed-off-by: Razvan Stefanescu <[email protected]>
---
drivers/staging/fsl-dpaa2/ethsw/README | 106 +++++++++++++++++++++++++++++++++
1 file changed, 106 insertions(+)
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/README
diff --git a/drivers/staging/fsl-dpaa2/ethsw/README b/drivers/staging/fsl-dpaa2/ethsw/README
new file mode 100644
index 0000000..f6fc07f
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethsw/README
@@ -0,0 +1,106 @@
+DPAA2 Ethernet Switch driver
+============================
+
+This file provides documentation for the DPAA2 Ethernet Switch driver
+
+
+Contents
+========
+ Supported Platforms
+ Architecture Overview
+ Creating an Ethernet Switch
+ Features
+
+
+ Supported Platforms
+===================
+This driver provides networking support for Freescale LS2085A, LS2088A
+DPAA2 SoCs.
+
+
+Architecture Overview
+=====================
+The Ethernet Switch in the DPAA2 architecture consists of several hardware
+resources that provide the functionality. These are allocated and
+configured via the Management Complex (MC) portals. MC abstracts most of
+these resources as DPAA2 objects and exposes ABIs through which they can
+be configured and controlled.
+
+For a more detailed description of the DPAA2 architecture and its object
+abstractions see:
+ drivers/staging/fsl-mc/README.txt
+
+The Ethernet Switch is built on top of a Datapath Switch (DPSW) object.
+
+Configuration interface:
+
+ ---------------------
+ | DPAA2 Switch driver |
+ ---------------------
+ .
+ .
+ ----------
+ | DPSW API |
+ ----------
+ . software
+ ================= . ==============
+ . hardware
+ ---------------------
+ | MC hardware portals |
+ ---------------------
+ .
+ .
+ ------
+ | DPSW |
+ ------
+
+Driver uses the switch device driver model and exposes each switch port as
+a network interface, which can be included in a bridge. Traffic switched
+between ports is offloaded into the hardware. Exposed network interfaces
+are not used for I/O, they are used just for configuration. This
+limitation is going to be addressed in the future.
+
+The DPSW can have ports connected to DPNIs or to PHYs via DPMACs.
+
+
+ [ethA] [ethB] [ethC] [ethD] [ethE] [ethF]
+ : : : : : :
+ : : : : : :
+[eth drv] [eth drv] [ ethsw drv ]
+ : : : : : : kernel
+========================================================================
+ : : : : : : hardware
+ [DPNI] [DPNI] [============= DPSW =================]
+ | | | | | |
+ | ---------- | [DPMAC] [DPMAC]
+ ------------------------------- | |
+ | |
+ [PHY] [PHY]
+
+For a more detailed description of the Ethernet switch device driver model
+see:
+ Documentation/networking/switchdev.txt
+
+Creating an Ethernet Switch
+===========================
+A device is created for the switch objects probed on the MC bus. Each DPSW
+has a number of properties which determine the configuration options and
+associated hardware resources.
+
+A DPSW object (and the other DPAA2 objects needed for a DPAA2 switch) can
+be added to a container on the MC bus in one of two ways: statically,
+through a Datapath Layout Binary file (DPL) that is parsed by MC at boot
+time; or created dynamically at runtime, via the DPAA2 objects APIs.
+
+Features
+========
+Driver configures DPSW to perform hardware switching offload of
+unicast/multicast/broadcast (VLAN tagged or untagged) traffic between its
+ports.
+
+It allows configuration of hardware learning, flooding, multicast groups,
+port VLAN configuration and STP state.
+
+Static entries can be added/removed from the FDB.
+
+Hardware statistics for each port are provided through ethtool -S option.
--
1.9.1
Introduce the DPAA2 Ethernet Switch driver, which manages Datapath Switch
(DPSW) objects discovered on the MC bus.
Suggested-by: Alexandru Marginean <[email protected]>
Signed-off-by: Razvan Stefanescu <[email protected]>
---
drivers/staging/fsl-dpaa2/ethsw/Makefile | 2 +-
drivers/staging/fsl-dpaa2/ethsw/ethsw.c | 1523 ++++++++++++++++++++++++++++++
drivers/staging/fsl-dpaa2/ethsw/ethsw.h | 88 ++
3 files changed, 1612 insertions(+), 1 deletion(-)
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.c
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.h
diff --git a/drivers/staging/fsl-dpaa2/ethsw/Makefile b/drivers/staging/fsl-dpaa2/ethsw/Makefile
index db137f7..a6d72d1 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/Makefile
+++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
-dpaa2-ethsw-objs := dpsw.o
+dpaa2-ethsw-objs := ethsw.o dpsw.o
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
new file mode 100644
index 0000000..ae86078
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -0,0 +1,1523 @@
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/kthread.h>
+#include <linux/workqueue.h>
+
+#include "../../fsl-mc/include/mc.h"
+
+#include "ethsw.h"
+
+static struct workqueue_struct *ethsw_owq;
+
+/* Minimal supported DPSW version */
+#define DPSW_MIN_VER_MAJOR 8
+#define DPSW_MIN_VER_MINOR 0
+
+#define DEFAULT_VLAN_ID 1
+
+static int ethsw_add_vlan(struct ethsw_core *ethsw, u16 vid)
+{
+ int err;
+
+ struct dpsw_vlan_cfg vcfg = {
+ .fdb_id = 0,
+ };
+
+ if (ethsw->vlans[vid]) {
+ dev_err(ethsw->dev, "VLAN already configured\n");
+ return -EEXIST;
+ }
+
+ err = dpsw_vlan_add(ethsw->mc_io, 0,
+ ethsw->dpsw_handle, vid, &vcfg);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
+ return err;
+ }
+ ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;
+
+ return 0;
+}
+
+static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
+ u16 vid, u16 flags)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_vlan_if_cfg vcfg;
+ bool is_oper;
+ int err, err2;
+
+ if (port_priv->vlans[vid]) {
+ netdev_warn(netdev, "VLAN %d already configured\n", vid);
+ return -EEXIST;
+ }
+
+ vcfg.num_ifs = 1;
+ vcfg.if_id[0] = port_priv->idx;
+ err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
+ return err;
+ }
+
+ port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
+
+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
+ err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ vid, &vcfg);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_vlan_add_if_untagged err %d\n", err);
+ return err;
+ }
+ port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
+ }
+
+ if (flags & BRIDGE_VLAN_INFO_PVID) {
+ struct dpsw_tci_cfg tci_cfg = {
+ .pcp = 0,
+ .dei = 0,
+ .vlan_id = vid,
+ };
+
+ /* Interface needs to be down to change PVID */
+ is_oper = netif_oper_up(netdev);
+ if (is_oper) {
+ err = dpsw_if_disable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_if_disable err %d\n", err);
+ return err;
+ }
+ }
+
+ err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &tci_cfg);
+ if (!err) {
+ /* Delete previous PVID info and mark the new one */
+ if (port_priv->pvid)
+ port_priv->vlans[port_priv->pvid]
+ ^= ETHSW_VLAN_PVID;
+
+ port_priv->vlans[vid] |= ETHSW_VLAN_PVID;
+ port_priv->pvid = vid;
+ } else {
+ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
+ }
+
+ if (is_oper) {
+ err2 = dpsw_if_enable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (err2) {
+ netdev_err(netdev,
+ "dpsw_if_enable err %d\n", err2);
+ return err2;
+ }
+ }
+ }
+
+ return err;
+}
+
+static int ethsw_set_learning(struct ethsw_core *ethsw, u8 flag)
+{
+ enum dpsw_fdb_learning_mode learn_mode;
+ int err;
+
+ if (flag)
+ learn_mode = DPSW_FDB_LEARNING_MODE_HW;
+ else
+ learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
+
+ err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
+ learn_mode);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n", err);
+ return err;
+ }
+ ethsw->learning = !!flag;
+
+ return 0;
+}
+
+static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, u8 flag)
+{
+ int err;
+
+ err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx, (int)flag);
+ if (err) {
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_set_learning_mode err %d\n", err);
+ return err;
+ }
+ port_priv->flood = !!flag;
+
+ return 0;
+}
+
+static int ethsw_port_set_stp_state(struct ethsw_port_priv *port_priv, u8 state)
+{
+ struct dpsw_stp_cfg stp_cfg = {
+ .vlan_id = DEFAULT_VLAN_ID,
+ .state = state,
+ };
+ int err;
+
+ if (!netif_oper_up(port_priv->netdev) || state == port_priv->stp_state)
+ return 0; /* Nothing to do */
+
+ err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx, &stp_cfg);
+ if (err) {
+ netdev_err(port_priv->netdev,
+ "dpsw_if_set_stp err %d\n", err);
+ return err;
+ }
+
+ port_priv->stp_state = state;
+
+ return 0;
+}
+
+static int ethsw_dellink_switch(struct ethsw_core *ethsw, u16 vid)
+{
+ struct ethsw_port_priv *ppriv_local = NULL;
+ int i, err;
+
+ if (!ethsw->vlans[vid])
+ return -ENOENT;
+
+ err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
+ return err;
+ }
+ ethsw->vlans[vid] = 0;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ ppriv_local = ethsw->ports[i];
+ ppriv_local->vlans[vid] = 0;
+ }
+
+ return 0;
+}
+
+static int ethsw_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_unicast_cfg entry = {0};
+ int err;
+
+ entry.if_egress = port_priv->idx;
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ ether_addr_copy(entry.mac_addr, addr);
+
+ err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ 0, &entry);
+ if (err)
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_add_unicast err %d\n", err);
+ return err;
+}
+
+static int ethsw_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_unicast_cfg entry = {0};
+ int err;
+
+ entry.if_egress = port_priv->idx;
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ ether_addr_copy(entry.mac_addr, addr);
+
+ err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ 0, &entry);
+ if (err)
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_remove_unicast err %d\n", err);
+ return err;
+}
+
+static int ethsw_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_multicast_cfg entry = {0};
+ int err;
+
+ ether_addr_copy(entry.mac_addr, addr);
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ entry.num_ifs = 1;
+ entry.if_id[0] = port_priv->idx;
+
+ err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ 0, &entry);
+ if (err)
+ netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
+ err);
+ return err;
+}
+
+static int ethsw_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
+ const unsigned char *addr)
+{
+ struct dpsw_fdb_multicast_cfg entry = {0};
+ int err;
+
+ ether_addr_copy(entry.mac_addr, addr);
+ entry.type = DPSW_FDB_ENTRY_STATIC;
+ entry.num_ifs = 1;
+ entry.if_id[0] = port_priv->idx;
+
+ err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ 0, &entry);
+ if (err)
+ netdev_err(port_priv->netdev,
+ "dpsw_fdb_remove_multicast err %d\n", err);
+ return err;
+}
+
+static void port_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ u64 tmp;
+ int err;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_FRAME, &stats->rx_packets);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_EGR_FRAME, &stats->tx_packets);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_BYTE, &stats->rx_bytes);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_FRAME_DISCARD,
+ &stats->rx_dropped);
+ if (err)
+ goto error;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_ING_FLTR_FRAME,
+ &tmp);
+ if (err)
+ goto error;
+ stats->rx_dropped += tmp;
+
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ DPSW_CNT_EGR_FRAME_DISCARD,
+ &stats->tx_dropped);
+ if (err)
+ goto error;
+
+ return;
+
+error:
+ netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
+}
+
+static bool port_has_offload_stats(const struct net_device *netdev,
+ int attr_id)
+{
+ return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
+}
+
+static int port_get_offload_stats(int attr_id,
+ const struct net_device *netdev,
+ void *sp)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ port_get_stats((struct net_device *)netdev, sp);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int port_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
+ 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ (u16)ETHSW_L2_MAX_FRM(mtu));
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_if_set_max_frame_length() err %d\n", err);
+ return err;
+ }
+
+ netdev->mtu = mtu;
+ return 0;
+}
+
+static int port_carrier_state_sync(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpsw_link_state state;
+ int err;
+
+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx, &state);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
+ return err;
+ }
+
+ WARN_ONCE(state.up > 1, "Garbage read into link_state");
+
+ if (state.up != port_priv->link_state) {
+ if (state.up)
+ netif_carrier_on(netdev);
+ else
+ netif_carrier_off(netdev);
+ port_priv->link_state = state.up;
+ }
+ return 0;
+}
+
+static int port_open(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ /* No need to allow Tx as control interface is disabled */
+ netif_tx_stop_all_queues(netdev);
+
+ err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n", err);
+ return err;
+ }
+
+ /* sync carrier state */
+ err = port_carrier_state_sync(netdev);
+ if (err) {
+ netdev_err(netdev,
+ "port_carrier_state_sync err %d\n", err);
+ goto err_carrier_sync;
+ }
+
+ return 0;
+
+err_carrier_sync:
+ dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+ return err;
+}
+
+static int port_stop(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t port_dropframe(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ /* we don't support I/O for now, drop the frame */
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops ethsw_port_ops = {
+ .ndo_open = port_open,
+ .ndo_stop = port_stop,
+
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = port_change_mtu,
+ .ndo_has_offload_stats = port_has_offload_stats,
+ .ndo_get_offload_stats = port_get_offload_stats,
+
+ .ndo_start_xmit = port_dropframe,
+};
+
+static void ethsw_links_state_update(struct ethsw_core *ethsw)
+{
+ int i;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ port_carrier_state_sync(ethsw->ports[i]->netdev);
+}
+
+static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t ethsw_irq0_handler_thread(int irq_num, void *arg)
+{
+ struct device *dev = (struct device *)arg;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+
+ /* Mask the events and the if_id reserved bits to be cleared on read */
+ u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
+ int err;
+
+ err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, &status);
+ if (err) {
+ dev_err(dev, "Can't get irq status (err %d)", err);
+
+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
+ if (err)
+ dev_err(dev, "Can't clear irq status (err %d)", err);
+ goto out;
+ }
+
+ if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
+ ethsw_links_state_update(ethsw);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
+ struct fsl_mc_device_irq *irq;
+ int err;
+
+ err = fsl_mc_allocate_irqs(sw_dev);
+ if (err) {
+ dev_err(dev, "MC irqs allocation failed\n");
+ return err;
+ }
+
+ if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
+ err = -EINVAL;
+ goto free_irq;
+ }
+
+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 0);
+ if (err) {
+ dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
+ goto free_irq;
+ }
+
+ irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
+
+ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
+ ethsw_irq0_handler,
+ ethsw_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(dev), dev);
+ if (err) {
+ dev_err(dev, "devm_request_threaded_irq(): %d", err);
+ goto free_irq;
+ }
+
+ err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, mask);
+ if (err) {
+ dev_err(dev, "dpsw_set_irq_mask(): %d", err);
+ goto free_devm_irq;
+ }
+
+ err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 1);
+ if (err) {
+ dev_err(dev, "dpsw_set_irq_enable(): %d", err);
+ goto free_devm_irq;
+ }
+
+ return 0;
+
+free_devm_irq:
+ devm_free_irq(dev, irq->msi_desc->irq, dev);
+free_irq:
+ fsl_mc_free_irqs(sw_dev);
+ return err;
+}
+
+static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ struct fsl_mc_device_irq *irq;
+
+ irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
+ dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, 0);
+ fsl_mc_free_irqs(sw_dev);
+}
+
+static int swdev_port_attr_get(struct net_device *netdev,
+ struct switchdev_attr *attr)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = 1;
+ attr->u.ppid.id[0] = port_priv->ethsw_data->dev_id;
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ attr->u.brport_flags =
+ (port_priv->ethsw_data->learning ? BR_LEARNING : 0) |
+ (port_priv->flood ? BR_FLOOD : 0);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
+ attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int port_attr_stp_state_set(struct net_device *netdev,
+ struct switchdev_trans *trans,
+ u8 state)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ return ethsw_port_set_stp_state(port_priv, state);
+}
+
+static int port_attr_br_flags_set(struct net_device *netdev,
+ struct switchdev_trans *trans,
+ unsigned long flags)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err = 0;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ /* Learning is enabled per switch */
+ err = ethsw_set_learning(port_priv->ethsw_data, flags & BR_LEARNING);
+ if (err)
+ goto exit;
+
+ err = ethsw_port_set_flood(port_priv, flags & BR_FLOOD);
+
+exit:
+ return err;
+}
+
+static int swdev_port_attr_set(struct net_device *netdev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = port_attr_stp_state_set(netdev, trans,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = port_attr_br_flags_set(netdev, trans,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ /* VLANs are supported by default */
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int port_vlans_add(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int vid, err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ if (!port_priv->ethsw_data->vlans[vid]) {
+ /* this is a new VLAN */
+ err = ethsw_add_vlan(port_priv->ethsw_data, vid);
+ if (err)
+ return err;
+
+ port_priv->ethsw_data->vlans[vid] |= ETHSW_VLAN_GLOBAL;
+ }
+ err = ethsw_port_add_vlan(port_priv, vid, vlan->flags);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int port_lookup_address(struct net_device *netdev, int is_uc,
+ const unsigned char *addr)
+{
+ struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(netdev);
+ list_for_each_entry(ha, &list->list, list) {
+ if (ether_addr_equal(ha->addr, addr)) {
+ netif_addr_unlock_bh(netdev);
+ return 1;
+ }
+ }
+ netif_addr_unlock_bh(netdev);
+ return 0;
+}
+
+static int port_mdb_add(struct net_device *netdev,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct switchdev_trans *trans)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ /* Check if address is already set on this port */
+ if (port_lookup_address(netdev, 0, mdb->addr))
+ return -EEXIST;
+
+ err = ethsw_port_fdb_add_mc(port_priv, mdb->addr);
+ if (err)
+ return err;
+
+ err = dev_mc_add(netdev, mdb->addr);
+ if (err)
+ netdev_err(netdev, "dev_mc_add err %d\n", err);
+
+ return err;
+}
+
+static int swdev_port_obj_add(struct net_device *netdev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans)
+{
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = port_vlans_add(netdev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj),
+ trans);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = port_mdb_add(netdev,
+ SWITCHDEV_OBJ_PORT_MDB(obj),
+ trans);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
+{
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct net_device *netdev = port_priv->netdev;
+ struct dpsw_vlan_if_cfg vcfg;
+ int i, err, err2;
+ bool is_oper;
+
+ if (!port_priv->vlans[vid])
+ return -ENOENT;
+
+ if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
+ struct dpsw_tci_cfg tci_cfg = { 0 };
+ /* Interface needs to be down to change PVID */
+ is_oper = netif_oper_up(netdev);
+
+ if (is_oper) {
+ err = dpsw_if_disable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_disable err %d\n",
+ err);
+ goto exit_err;
+ }
+ }
+
+ err = dpsw_if_set_tci(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx, &tci_cfg);
+ if (!err) {
+ port_priv->vlans[vid] &= ~ETHSW_VLAN_PVID;
+ port_priv->pvid = 0;
+ } else {
+ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
+ }
+
+ if (is_oper) {
+ err2 = dpsw_if_enable(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ port_priv->idx);
+ if (err2) {
+ netdev_err(netdev, "dpsw_if_enable err %d\n",
+ err2);
+ return err2;
+ }
+ }
+
+ if (err)
+ goto exit_err;
+ }
+
+ vcfg.num_ifs = 1;
+ vcfg.if_id[0] = port_priv->idx;
+ if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
+ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
+ ethsw->dpsw_handle,
+ vid, &vcfg);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_vlan_remove_if_untagged err %d\n",
+ err);
+ }
+ port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
+ }
+
+ if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
+ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ vid, &vcfg);
+ if (err) {
+ netdev_err(netdev,
+ "dpsw_vlan_remove_if err %d\n", err);
+ return err;
+ }
+ port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
+
+ /* Delete VLAN from switch if it is no longer configured on
+ * any port
+ */
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
+ if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
+ return 0; /* Found a port member in VID */
+
+ ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
+
+ err = ethsw_dellink_switch(ethsw, vid);
+ if (err)
+ goto exit_err;
+ }
+
+ return 0;
+exit_err:
+ return err;
+}
+
+static int port_vlans_del(struct net_device *netdev,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int vid, err;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ err = ethsw_port_del_vlan(port_priv, vid);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int port_mdb_del(struct net_device *netdev,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int err;
+
+ if (!port_lookup_address(netdev, 0, mdb->addr))
+ return -ENOENT;
+
+ err = ethsw_port_fdb_del_mc(port_priv, mdb->addr);
+ if (err)
+ return err;
+
+ err = dev_mc_del(netdev, mdb->addr);
+ if (err) {
+ netdev_err(netdev, "dev_mc_del err %d\n", err);
+ return err;
+ }
+
+ return err;
+}
+
+static int swdev_port_obj_del(struct net_device *netdev,
+ const struct switchdev_obj *obj)
+{
+ int err;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ err = port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ return err;
+}
+
+static const struct switchdev_ops ethsw_port_switchdev_ops = {
+ .switchdev_port_attr_get = swdev_port_attr_get,
+ .switchdev_port_attr_set = swdev_port_attr_set,
+ .switchdev_port_obj_add = swdev_port_obj_add,
+ .switchdev_port_obj_del = swdev_port_obj_del,
+};
+
+/* For the moment, only flood setting needs to be updated */
+static int port_bridge_join(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
+ /* Enable flooding */
+ return ethsw_port_set_flood(port_priv, 1);
+}
+
+static int port_bridge_leave(struct net_device *netdev)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+
+ /* Disable flooding */
+ return ethsw_port_set_flood(port_priv, 0);
+}
+
+static int port_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct net_device *upper_dev;
+ int err = 0;
+
+ if (netdev->netdev_ops != ðsw_port_ops)
+ return NOTIFY_DONE;
+
+ /* Handle just upper dev link/unlink for the moment */
+ if (event == NETDEV_CHANGEUPPER) {
+ upper_dev = info->upper_dev;
+ if (netif_is_bridge_master(upper_dev)) {
+ if (info->linking)
+ err = port_bridge_join(netdev);
+ else
+ err = port_bridge_leave(netdev);
+ }
+ }
+
+ return notifier_from_errno(err);
+}
+
+static struct notifier_block port_nb __read_mostly = {
+ .notifier_call = port_netdevice_event,
+};
+
+struct ethsw_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+static void ethsw_switchdev_event_work(struct work_struct *work)
+{
+ struct ethsw_switchdev_event_work *switchdev_work =
+ container_of(work, struct ethsw_switchdev_event_work, work);
+ struct net_device *dev = switchdev_work->dev;
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct ethsw_port_priv *port_priv;
+
+ rtnl_lock();
+ port_priv = netdev_priv(dev);
+ fdb_info = &switchdev_work->fdb_info;
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ ethsw_port_fdb_add_uc(netdev_priv(dev), fdb_info->addr);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ ethsw_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
+ break;
+ }
+
+ rtnl_unlock();
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(dev);
+}
+
+/* Called under rcu_read_lock() */
+static int port_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct ethsw_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (!switchdev_work)
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, ethsw_switchdev_event_work);
+ switchdev_work->dev = dev;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+
+ /* Take a reference on the device to avoid being freed. */
+ dev_hold(dev);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ queue_work(ethsw_owq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block port_switchdev_nb = {
+ .notifier_call = port_switchdev_event,
+};
+
+static int ethsw_register_notifier(struct device *dev)
+{
+ int err;
+
+ err = register_netdevice_notifier(&port_nb);
+ if (err) {
+ dev_err(dev, "Failed to register netdev notifier\n");
+ return err;
+ }
+
+ err = register_switchdev_notifier(&port_switchdev_nb);
+ if (err) {
+ dev_err(dev, "Failed to register switchdev notifier\n");
+ goto err_switchdev_nb;
+ }
+
+ return 0;
+
+err_switchdev_nb:
+ unregister_netdevice_notifier(&port_nb);
+ return err;
+}
+
+static int ethsw_open(struct ethsw_core *ethsw)
+{
+ struct ethsw_port_priv *port_priv = NULL;
+ int i, err;
+
+ err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
+ return err;
+ }
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ port_priv = ethsw->ports[i];
+ err = dev_open(port_priv->netdev);
+ if (err) {
+ netdev_err(port_priv->netdev, "dev_open err %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int ethsw_stop(struct ethsw_core *ethsw)
+{
+ struct ethsw_port_priv *port_priv = NULL;
+ int i, err;
+
+ destroy_workqueue(ethsw_owq);
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ port_priv = ethsw->ports[i];
+ dev_close(port_priv->netdev);
+ }
+
+ err = dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(ethsw->dev, "dpsw_disable err %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int ethsw_init(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ u16 version_major, version_minor, i;
+ struct dpsw_stp_cfg stp_cfg;
+ int err;
+
+ ethsw->dev_id = sw_dev->obj_desc.id;
+
+ err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_open err %d\n", err);
+ return err;
+ }
+
+ err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ ðsw->sw_attr);
+ if (err) {
+ dev_err(dev, "dpsw_get_attributes err %d\n", err);
+ goto err_close;
+ }
+
+ err = dpsw_get_api_version(ethsw->mc_io, 0,
+ &version_major,
+ &version_minor);
+ if (err) {
+ dev_err(dev, "dpsw_get_api_version err %d\n", err);
+ goto err_close;
+ }
+
+ /* Minimum supported DPSW version check */
+ if (version_major < DPSW_MIN_VER_MAJOR ||
+ (version_major == DPSW_MIN_VER_MAJOR &&
+ version_minor < DPSW_MIN_VER_MINOR)) {
+ dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or greater.\n",
+ version_major,
+ version_minor,
+ DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
+ err = -ENOTSUPP;
+ goto err_close;
+ }
+
+ err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err) {
+ dev_err(dev, "dpsw_reset err %d\n", err);
+ goto err_close;
+ }
+
+ err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
+ DPSW_FDB_LEARNING_MODE_HW);
+ if (err) {
+ dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
+ goto err_close;
+ }
+
+ stp_cfg.vlan_id = DEFAULT_VLAN_ID;
+ stp_cfg.state = DPSW_STP_STATE_FORWARDING;
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
+ &stp_cfg);
+ if (err) {
+ dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
+ err, i);
+ goto err_close;
+ }
+
+ err = dpsw_if_set_broadcast(ethsw->mc_io, 0,
+ ethsw->dpsw_handle, i, 1);
+ if (err) {
+ dev_err(dev,
+ "dpsw_if_set_broadcast err %d for port %d\n",
+ err, i);
+ goto err_close;
+ }
+ }
+
+ ethsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
+ "ethsw");
+ if (!ethsw_owq) {
+ err = -ENOMEM;
+ goto err_close;
+ }
+
+ err = ethsw_register_notifier(dev);
+ if (err)
+ goto err_destroy_ordered_workqueue;
+
+ return 0;
+
+err_destroy_ordered_workqueue:
+ destroy_workqueue(ethsw_owq);
+
+err_close:
+ dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ return err;
+}
+
+static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
+{
+ const char def_mcast[ETH_ALEN] = {0x01, 0x00, 0x5e, 0x00, 0x00, 0x01};
+ struct net_device *netdev = port_priv->netdev;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ struct dpsw_tci_cfg tci_cfg = {0};
+ struct dpsw_vlan_if_cfg vcfg;
+ int err;
+
+ /* Switch starts with all ports configured to VLAN 1. Need to
+ * remove this setting to allow configuration at bridge join
+ */
+ vcfg.num_ifs = 1;
+ vcfg.if_id[0] = port_priv->idx;
+
+ err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DEFAULT_VLAN_ID, &vcfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_vlan_remove_if_untagged err %d\n",
+ err);
+ return err;
+ }
+
+ err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ port_priv->idx, &tci_cfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
+ return err;
+ }
+
+ err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DEFAULT_VLAN_ID, &vcfg);
+ if (err) {
+ netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err);
+ return err;
+ }
+
+ err = ethsw_port_fdb_add_mc(port_priv, def_mcast);
+
+ return err;
+}
+
+static void ethsw_takedown(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ int err;
+
+ err = unregister_switchdev_notifier(&port_switchdev_nb);
+ if (err)
+ dev_err(dev,
+ "Failed to unregister switchdev notifier (%d)\n", err);
+
+ err = unregister_netdevice_notifier(&port_nb);
+ if (err)
+ dev_err(dev,
+ "Failed to unregister netdev notifier (%d)\n", err);
+
+ err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
+ if (err)
+ dev_warn(dev, "dpsw_close err %d\n", err);
+}
+
+static int ethsw_remove(struct fsl_mc_device *sw_dev)
+{
+ struct ethsw_port_priv *port_priv;
+ struct ethsw_core *ethsw;
+ struct device *dev;
+ int i;
+
+ dev = &sw_dev->dev;
+ ethsw = dev_get_drvdata(dev);
+
+ ethsw_teardown_irqs(sw_dev);
+
+ rtnl_lock();
+ ethsw_stop(ethsw);
+ rtnl_unlock();
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ port_priv = ethsw->ports[i];
+ unregister_netdev(port_priv->netdev);
+ free_netdev(port_priv->netdev);
+ }
+ kfree(ethsw->ports);
+
+ ethsw_takedown(sw_dev);
+ fsl_mc_portal_free(ethsw->mc_io);
+
+ kfree(ethsw);
+
+ dev_set_drvdata(dev, NULL);
+
+ return 0;
+}
+
+static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
+{
+ struct ethsw_port_priv *port_priv;
+ struct device *dev = ethsw->dev;
+ struct net_device *port_netdev;
+ int err;
+
+ port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
+ if (!port_netdev) {
+ dev_err(dev, "alloc_etherdev error\n");
+ return -ENOMEM;
+ }
+
+ port_priv = netdev_priv(port_netdev);
+ port_priv->netdev = port_netdev;
+ port_priv->ethsw_data = ethsw;
+
+ port_priv->idx = port_idx;
+ port_priv->stp_state = BR_STATE_FORWARDING;
+
+ /* Flooding is implicitly enabled */
+ port_priv->flood = true;
+
+ SET_NETDEV_DEV(port_netdev, dev);
+ port_netdev->netdev_ops = ðsw_port_ops;
+ port_netdev->switchdev_ops = ðsw_port_switchdev_ops;
+
+ /* Set MTU limits */
+ port_netdev->min_mtu = ETH_MIN_MTU;
+ port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
+
+ err = register_netdev(port_netdev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev error %d\n", err);
+ free_netdev(port_netdev);
+ return err;
+ }
+
+ ethsw->ports[port_idx] = port_priv;
+
+ return ethsw_port_init(port_priv, port_idx);
+}
+
+static int ethsw_probe(struct fsl_mc_device *sw_dev)
+{
+ struct device *dev = &sw_dev->dev;
+ struct ethsw_core *ethsw;
+ int err;
+ u16 i, j;
+
+ /* Allocate switch core*/
+ ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
+
+ if (!ethsw)
+ return -ENOMEM;
+
+ ethsw->dev = dev;
+ dev_set_drvdata(dev, ethsw);
+
+ err = fsl_mc_portal_allocate(sw_dev, 0, ðsw->mc_io);
+ if (err) {
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ goto err_free_drvdata;
+ }
+
+ err = ethsw_init(sw_dev);
+ if (err)
+ goto err_free_cmdport;
+
+ /* DEFAULT_VLAN_ID is implicitly configured on the switch */
+ ethsw->vlans[DEFAULT_VLAN_ID] = ETHSW_VLAN_MEMBER;
+
+ /* Learning is implicitly enabled */
+ ethsw->learning = true;
+
+ ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
+ GFP_KERNEL);
+ if (!(ethsw->ports)) {
+ err = -ENOMEM;
+ goto err_takedown;
+ }
+
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ err = ethsw_probe_port(ethsw, i);
+ if (err) {
+ /* Cleanup previous ports only */
+ for (j = 0; j < i; j++) {
+ unregister_netdev(ethsw->ports[j]->netdev);
+ free_netdev(ethsw->ports[j]->netdev);
+ }
+ goto err_takedown;
+ }
+ }
+
+ /* Switch starts up enabled */
+ rtnl_lock();
+ err = ethsw_open(ethsw);
+ rtnl_unlock();
+ if (err)
+ goto err_free_ports;
+
+ /* Setup IRQs */
+ err = ethsw_setup_irqs(sw_dev);
+ if (err)
+ goto err_stop;
+
+ dev_info(dev, "probed %d port switch\n", ethsw->sw_attr.num_ifs);
+ return 0;
+
+err_stop:
+ rtnl_lock();
+ ethsw_stop(ethsw);
+ rtnl_unlock();
+
+err_free_ports:
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ unregister_netdev(ethsw->ports[i]->netdev);
+ free_netdev(ethsw->ports[i]->netdev);
+ }
+ kfree(ethsw->ports);
+
+err_takedown:
+ ethsw_takedown(sw_dev);
+
+err_free_cmdport:
+ fsl_mc_portal_free(ethsw->mc_io);
+
+err_free_drvdata:
+ kfree(ethsw);
+ dev_set_drvdata(dev, NULL);
+
+ return err;
+}
+
+static const struct fsl_mc_device_id ethsw_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpsw",
+ },
+ { .vendor = 0x0 }
+};
+MODULE_DEVICE_TABLE(fslmc, ethsw_match_id_table);
+
+static struct fsl_mc_driver eth_sw_drv = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ethsw_probe,
+ .remove = ethsw_remove,
+ .match_id_table = ethsw_match_id_table
+};
+
+module_fsl_mc_driver(eth_sw_drv);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
new file mode 100644
index 0000000..8c1d645
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
@@ -0,0 +1,88 @@
+/* Copyright 2014-2017 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ETHSW_H
+#define __ETHSW_H
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <uapi/linux/if_bridge.h>
+#include <net/switchdev.h>
+#include <linux/if_bridge.h>
+
+#include "dpsw.h"
+
+/* Number of IRQs supported */
+#define DPSW_IRQ_NUM 2
+
+#define ETHSW_VLAN_MEMBER 1
+#define ETHSW_VLAN_UNTAGGED 2
+#define ETHSW_VLAN_PVID 4
+#define ETHSW_VLAN_GLOBAL 8
+
+/* Maximum Frame Length supported by HW (currently 10k) */
+#define DPAA2_MFL (10 * 1024)
+#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
+
+struct ethsw_core;
+
+/* Per port private data */
+struct ethsw_port_priv {
+ struct net_device *netdev;
+ u16 idx;
+ struct ethsw_core *ethsw_data;
+ u8 link_state;
+ u8 stp_state;
+ bool flood;
+
+ u8 vlans[VLAN_VID_MASK + 1];
+ u16 pvid;
+};
+
+/* Switch data */
+struct ethsw_core {
+ struct device *dev;
+ struct fsl_mc_io *mc_io;
+ u16 dpsw_handle;
+ struct dpsw_attr sw_attr;
+ int dev_id;
+ struct ethsw_port_priv **ports;
+
+ u8 vlans[VLAN_VID_MASK + 1];
+ bool learning;
+};
+
+#endif /* __ETHSW_H */
--
1.9.1
Add driver information, link details and hardware statistics to be
reported via ethtool -S.
Signed-off-by: Razvan Stefanescu <[email protected]>
---
drivers/staging/fsl-dpaa2/ethsw/Makefile | 2 +-
drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h | 13 ++
drivers/staging/fsl-dpaa2/ethsw/dpsw.c | 32 ++++
drivers/staging/fsl-dpaa2/ethsw/dpsw.h | 32 ++++
drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c | 207 ++++++++++++++++++++++++
drivers/staging/fsl-dpaa2/ethsw/ethsw.c | 3 +
drivers/staging/fsl-dpaa2/ethsw/ethsw.h | 3 +
7 files changed, 291 insertions(+), 1 deletion(-)
create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
diff --git a/drivers/staging/fsl-dpaa2/ethsw/Makefile b/drivers/staging/fsl-dpaa2/ethsw/Makefile
index a6d72d1..de92cd9 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/Makefile
+++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
-dpaa2-ethsw-objs := ethsw.o dpsw.o
+dpaa2-ethsw-objs := ethsw.o ethsw-ethtool.o dpsw.o
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
index ddfd820..06b71122 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw-cmd.h
@@ -74,6 +74,8 @@
#define DPSW_CMDID_IF_SET_FLOODING DPSW_CMD_ID(0x047)
#define DPSW_CMDID_IF_SET_BROADCAST DPSW_CMD_ID(0x048)
+#define DPSW_CMDID_IF_SET_LINK_CFG DPSW_CMD_ID(0x04C)
+
#define DPSW_CMDID_VLAN_ADD DPSW_CMD_ID(0x060)
#define DPSW_CMDID_VLAN_ADD_IF DPSW_CMD_ID(0x061)
#define DPSW_CMDID_VLAN_ADD_IF_UNTAGGED DPSW_CMD_ID(0x062)
@@ -262,6 +264,17 @@ struct dpsw_cmd_if_set_max_frame_length {
__le16 frame_length;
};
+struct dpsw_cmd_if_set_link_cfg {
+ /* cmd word 0 */
+ __le16 if_id;
+ u8 pad[6];
+ /* cmd word 1 */
+ __le32 rate;
+ __le32 pad1;
+ /* cmd word 2 */
+ __le64 options;
+};
+
struct dpsw_cmd_if_get_link_state {
__le16 if_id;
};
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
index f36b92b..601172a 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.c
@@ -383,6 +383,38 @@ int dpsw_get_attributes(struct fsl_mc_io *mc_io,
}
/**
+ * dpsw_if_set_link_cfg() - Set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: Interface id
+ * @cfg: Link configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ struct dpsw_link_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ struct dpsw_cmd_if_set_link_cfg *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_SET_LINK_CFG,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpsw_cmd_if_set_link_cfg *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->rate = cpu_to_le32(cfg->rate);
+ cmd_params->options = cpu_to_le64(cfg->options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
* dpsw_if_get_link_state - Return the link state
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
index e9c2906..5feadc2 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/dpsw.h
@@ -245,6 +245,38 @@ enum dpsw_action {
};
/**
+ * Enable auto-negotiation
+ */
+#define DPSW_LINK_OPT_AUTONEG 0x0000000000000001ULL
+/**
+ * Enable half-duplex mode
+ */
+#define DPSW_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
+/**
+ * Enable pause frames
+ */
+#define DPSW_LINK_OPT_PAUSE 0x0000000000000004ULL
+/**
+ * Enable a-symmetric pause frames
+ */
+#define DPSW_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+
+/**
+ * struct dpsw_link_cfg - Structure representing DPSW link configuration
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
+ */
+struct dpsw_link_cfg {
+ u32 rate;
+ u64 options;
+};
+
+int dpsw_if_set_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ struct dpsw_link_cfg *cfg);
+/**
* struct dpsw_link_state - Structure representing DPSW link state
* @rate: Rate
* @options: Mask of available options; use 'DPSW_LINK_OPT_<X>' values
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
new file mode 100644
index 0000000..dcd49d2
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
@@ -0,0 +1,207 @@
+/* Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ethsw.h"
+
+static struct {
+ enum dpsw_counter id;
+ char name[ETH_GSTRING_LEN];
+} ethsw_ethtool_counters[] = {
+ {DPSW_CNT_ING_FRAME, "rx frames"},
+ {DPSW_CNT_ING_BYTE, "rx bytes"},
+ {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"},
+ {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
+ {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
+ {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
+ {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
+ {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
+ {DPSW_CNT_EGR_FRAME, "tx frames"},
+ {DPSW_CNT_EGR_BYTE, "tx bytes"},
+ {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
+
+};
+
+#define ETHSW_NUM_COUNTERS ARRAY_SIZE(ethsw_ethtool_counters)
+
+static void ethsw_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ u16 version_major, version_minor;
+ int err;
+
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ethsw_drv_version, sizeof(drvinfo->version));
+
+ err = dpsw_get_api_version(port_priv->ethsw_data->mc_io, 0,
+ &version_major,
+ &version_minor);
+ if (err)
+ strlcpy(drvinfo->fw_version, "N/A",
+ sizeof(drvinfo->fw_version));
+ else
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u", version_major, version_minor);
+
+ strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
+ sizeof(drvinfo->bus_info));
+}
+
+static int
+ethsw_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpsw_link_state state = {0};
+ int err = 0;
+
+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ &state);
+ if (err) {
+ netdev_err(netdev, "ERROR %d getting link state", err);
+ goto out;
+ }
+
+ /* At the moment, we have no way of interrogating the DPMAC
+ * from the DPSW side or there may not exist a DPMAC at all.
+ * Report only autoneg state, duplexity and speed.
+ */
+ if (state.options & DPSW_LINK_OPT_AUTONEG)
+ link_ksettings->base.autoneg = AUTONEG_ENABLE;
+ if (!(state.options & DPSW_LINK_OPT_HALF_DUPLEX))
+ link_ksettings->base.duplex = DUPLEX_FULL;
+ link_ksettings->base.speed = state.rate;
+
+out:
+ return err;
+}
+
+static int
+ethsw_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *link_ksettings)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpsw_link_cfg cfg = {0};
+ int err = 0;
+
+ netdev_dbg(netdev, "Setting link parameters...");
+
+ /* Due to a temporary MC limitation, the DPSW port must be down
+ * in order to be able to change link settings. Taking steps to let
+ * the user know that.
+ */
+ if (netif_running(netdev)) {
+ netdev_info(netdev, "Sorry, interface must be brought down first.\n");
+ return -EACCES;
+ }
+
+ cfg.rate = link_ksettings->base.speed;
+ if (link_ksettings->base.autoneg == AUTONEG_ENABLE)
+ cfg.options |= DPSW_LINK_OPT_AUTONEG;
+ else
+ cfg.options &= ~DPSW_LINK_OPT_AUTONEG;
+ if (link_ksettings->base.duplex == DUPLEX_HALF)
+ cfg.options |= DPSW_LINK_OPT_HALF_DUPLEX;
+ else
+ cfg.options &= ~DPSW_LINK_OPT_HALF_DUPLEX;
+
+ err = dpsw_if_set_link_cfg(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ &cfg);
+ if (err)
+ /* ethtool will be loud enough if we return an error; no point
+ * in putting our own error message on the console by default
+ */
+ netdev_dbg(netdev, "ERROR %d setting link cfg", err);
+
+ return err;
+}
+
+static int ethsw_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ETHSW_NUM_COUNTERS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void ethsw_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ETHSW_NUM_COUNTERS; i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ ethsw_ethtool_counters[i].name, ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static void ethsw_ethtool_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ int i, err;
+
+ memset(data, 0,
+ sizeof(u64) * ETHSW_NUM_COUNTERS);
+
+ for (i = 0; i < ETHSW_NUM_COUNTERS; i++) {
+ err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx,
+ ethsw_ethtool_counters[i].id,
+ &data[i]);
+ if (err)
+ netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n",
+ ethsw_ethtool_counters[i].name, err);
+ }
+}
+
+const struct ethtool_ops ethsw_port_ethtool_ops = {
+ .get_drvinfo = ethsw_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = ethsw_get_link_ksettings,
+ .set_link_ksettings = ethsw_set_link_ksettings,
+ .get_strings = ethsw_ethtool_get_strings,
+ .get_ethtool_stats = ethsw_ethtool_get_stats,
+ .get_sset_count = ethsw_ethtool_get_sset_count,
+};
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index ae86078..12bf88a 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -44,6 +44,8 @@
static struct workqueue_struct *ethsw_owq;
+const char ethsw_drv_version[] = "0.1";
+
/* Minimal supported DPSW version */
#define DPSW_MIN_VER_MAJOR 8
#define DPSW_MIN_VER_MINOR 0
@@ -1389,6 +1391,7 @@ static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
SET_NETDEV_DEV(port_netdev, dev);
port_netdev->netdev_ops = ðsw_port_ops;
+ port_netdev->ethtool_ops = ðsw_port_ethtool_ops;
port_netdev->switchdev_ops = ðsw_port_switchdev_ops;
/* Set MTU limits */
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
index 8c1d645..ba53cc5 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
@@ -57,6 +57,9 @@
#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
+extern const char ethsw_drv_version[];
+extern const struct ethtool_ops ethsw_port_ethtool_ops;
+
struct ethsw_core;
/* Per port private data */
--
1.9.1
> Introduce the DPAA2 Ethernet Switch driver, which manages Datapath Switch
> (DPSW) objects discovered on the MC bus.
>
> Suggested-by: Alexandru Marginean <[email protected]>
> Signed-off-by: Razvan Stefanescu <[email protected]>
> ---
> drivers/staging/fsl-dpaa2/ethsw/Makefile | 2 +-
> drivers/staging/fsl-dpaa2/ethsw/ethsw.c | 1523 ++++++++++++++++++++++++++++++
> drivers/staging/fsl-dpaa2/ethsw/ethsw.h | 88 ++
> 3 files changed, 1612 insertions(+), 1 deletion(-)
> create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.c
> create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.h
>
> diff --git a/drivers/staging/fsl-dpaa2/ethsw/Makefile b/drivers/staging/fsl-
> dpaa2/ethsw/Makefile
> index db137f7..a6d72d1 100644
> --- a/drivers/staging/fsl-dpaa2/ethsw/Makefile
> +++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
> @@ -4,4 +4,4 @@
>
> obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
>
> -dpaa2-ethsw-objs := dpsw.o
> +dpaa2-ethsw-objs := ethsw.o dpsw.o
> diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-
> dpaa2/ethsw/ethsw.c
> new file mode 100644
> index 0000000..ae86078
> --- /dev/null
> +++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
> @@ -0,0 +1,1523 @@
> +/* Copyright 2014-2016 Freescale Semiconductor Inc.
> + * Copyright 2017 NXP
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions are met:
> + * * Redistributions of source code must retain the above copyright
> + * notice, this list of conditions and the following disclaimer.
> + * * Redistributions in binary form must reproduce the above copyright
> + * notice, this list of conditions and the following disclaimer in the
> + * documentation and/or other materials provided with the distribution.
> + * * Neither the name of the above-listed copyright holders nor the
> + * names of any contributors may be used to endorse or promote products
> + * derived from this software without specific prior written permission.
> + *
> + *
> + * ALTERNATIVELY, this software may be distributed under the terms of the
> + * GNU General Public License ("GPL") as published by the Free Software
> + * Foundation, either version 2 of that License or (at your option) any
> + * later version.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
> + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
> + * POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#include <linux/module.h>
> +
> +#include <linux/interrupt.h>
> +#include <linux/msi.h>
> +#include <linux/kthread.h>
> +#include <linux/workqueue.h>
> +
> +#include "../../fsl-mc/include/mc.h"
> +
> +#include "ethsw.h"
> +
> +static struct workqueue_struct *ethsw_owq;
> +
> +/* Minimal supported DPSW version */
> +#define DPSW_MIN_VER_MAJOR 8
> +#define DPSW_MIN_VER_MINOR 0
> +
> +#define DEFAULT_VLAN_ID 1
> +
> +static int ethsw_add_vlan(struct ethsw_core *ethsw, u16 vid)
> +{
> + int err;
> +
> + struct dpsw_vlan_cfg vcfg = {
> + .fdb_id = 0,
> + };
> +
> + if (ethsw->vlans[vid]) {
> + dev_err(ethsw->dev, "VLAN already configured\n");
> + return -EEXIST;
> + }
> +
> + err = dpsw_vlan_add(ethsw->mc_io, 0,
> + ethsw->dpsw_handle, vid, &vcfg);
> + if (err) {
> + dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
> + return err;
> + }
> + ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;/
> +
> + return 0;
> +}
> +
> +static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
> + u16 vid, u16 flags)
> +{
> + struct ethsw_core *ethsw = port_priv->ethsw_data;
> + struct net_device *netdev = port_priv->netdev;
> + struct dpsw_vlan_if_cfg vcfg;
> + bool is_oper;
> + int err, err2;
Mild suggestion - s/err2/ret/, just because it sounds better, at least to me (same for similar situations in the rest of the file).
> +
> + if (port_priv->vlans[vid]) {
> + netdev_warn(netdev, "VLAN %d already configured\n", vid);
> + return -EEXIST;
> + }
> +
> + vcfg.num_ifs = 1;
> + vcfg.if_id[0] = port_priv->idx;
> + err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid, &vcfg);
> + if (err) {
> + netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
> + return err;
> + }
> +
> + port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
> +
> + if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
> + err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
> + ethsw->dpsw_handle,
> + vid, &vcfg);
> + if (err) {
> + netdev_err(netdev,
> + "dpsw_vlan_add_if_untagged err %d\n", err);
> + return err;
> + }
> + port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
> + }
> +
> + if (flags & BRIDGE_VLAN_INFO_PVID) {
> + struct dpsw_tci_cfg tci_cfg = {
> + .pcp = 0,
> + .dei = 0,
> + .vlan_id = vid,
> + };
> +
> + /* Interface needs to be down to change PVID */
> + is_oper = netif_oper_up(netdev);
> + if (is_oper) {
> + err = dpsw_if_disable(ethsw->mc_io, 0,
> + ethsw->dpsw_handle,
> + port_priv->idx);
> + if (err) {
> + netdev_err(netdev,
> + "dpsw_if_disable err %d\n", err);
> + return err;
> + }
> + }
> +
> + err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
> + port_priv->idx, &tci_cfg);
> + if (!err) {
> + /* Delete previous PVID info and mark the new one */
> + if (port_priv->pvid)
> + port_priv->vlans[port_priv->pvid]
> + ^= ETHSW_VLAN_PVID;
Can it be " &= ~ETHSW_VLAN_PVID" ? Are there other implications?
> +
> + port_priv->vlans[vid] |= ETHSW_VLAN_PVID;
> + port_priv->pvid = vid;
> + } else {
> + netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
> + }
> +
> + if (is_oper) {
> + err2 = dpsw_if_enable(ethsw->mc_io, 0,
> + ethsw->dpsw_handle,
> + port_priv->idx);
> + if (err2) {
> + netdev_err(netdev,
> + "dpsw_if_enable err %d\n", err2);
> + return err2;
> + }
> + }
> + }
> +
> + return err;
> +}
> +
> +static int ethsw_set_learning(struct ethsw_core *ethsw, u8 flag)
> +{
> + enum dpsw_fdb_learning_mode learn_mode;
> + int err;
> +
> + if (flag)
> + learn_mode = DPSW_FDB_LEARNING_MODE_HW;
> + else
> + learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
> +
> + err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
> + learn_mode);
> + if (err) {
> + dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n", err);
> + return err;
> + }
> + ethsw->learning = !!flag;
> +
> + return 0;
> +}
> +
> +static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, u8 flag)
> +{
> + int err;
> +
> + err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + port_priv->idx, (int)flag);
Why is this cast necessary? Can't the API be reworked to use u8 (or, better yet, bool) instead of int?
> + if (err) {
> + netdev_err(port_priv->netdev,
> + "dpsw_fdb_set_learning_mode err %d\n", err);
> + return err;
> + }
> + port_priv->flood = !!flag;
> +
> + return 0;
> +}
> +
> +static int ethsw_port_set_stp_state(struct ethsw_port_priv *port_priv, u8
> state)
> +{
> + struct dpsw_stp_cfg stp_cfg = {
> + .vlan_id = DEFAULT_VLAN_ID,
> + .state = state,
> + };
> + int err;
> +
> + if (!netif_oper_up(port_priv->netdev) || state == port_priv->stp_state)
> + return 0; /* Nothing to do */
> +
> + err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + port_priv->idx, &stp_cfg);
> + if (err) {
> + netdev_err(port_priv->netdev,
> + "dpsw_if_set_stp err %d\n", err);
> + return err;
> + }
> +
> + port_priv->stp_state = state;
> +
> + return 0;
> +}
> +
> +static int ethsw_dellink_switch(struct ethsw_core *ethsw, u16 vid)
> +{
> + struct ethsw_port_priv *ppriv_local = NULL;
> + int i, err;
> +
> + if (!ethsw->vlans[vid])
> + return -ENOENT;
> +
> + err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
> + if (err) {
> + dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
> + return err;
> + }
> + ethsw->vlans[vid] = 0;
> +
> + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
> + ppriv_local = ethsw->ports[i];
> + ppriv_local->vlans[vid] = 0;
> + }
> +
> + return 0;
> +}
> +
> +static int ethsw_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
> + const unsigned char *addr)
> +{
> + struct dpsw_fdb_unicast_cfg entry = {0};
> + int err;
> +
> + entry.if_egress = port_priv->idx;
> + entry.type = DPSW_FDB_ENTRY_STATIC;
> + ether_addr_copy(entry.mac_addr, addr);
> +
> + err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + 0, &entry);
> + if (err)
> + netdev_err(port_priv->netdev,
> + "dpsw_fdb_add_unicast err %d\n", err);
> + return err;
> +}
> +
> +static int ethsw_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
> + const unsigned char *addr)
> +{
> + struct dpsw_fdb_unicast_cfg entry = {0};
> + int err;
> +
> + entry.if_egress = port_priv->idx;
> + entry.type = DPSW_FDB_ENTRY_STATIC;
> + ether_addr_copy(entry.mac_addr, addr);
> +
> + err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + 0, &entry);
> + if (err)
> + netdev_err(port_priv->netdev,
> + "dpsw_fdb_remove_unicast err %d\n", err);
> + return err;
> +}
> +
> +static int ethsw_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
> + const unsigned char *addr)
> +{
> + struct dpsw_fdb_multicast_cfg entry = {0};
> + int err;
> +
> + ether_addr_copy(entry.mac_addr, addr);
> + entry.type = DPSW_FDB_ENTRY_STATIC;
> + entry.num_ifs = 1;
> + entry.if_id[0] = port_priv->idx;
> +
> + err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + 0, &entry);
> + if (err)
> + netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err %d\n",
> + err);
> + return err;
> +}
> +
> +static int ethsw_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
> + const unsigned char *addr)
> +{
> + struct dpsw_fdb_multicast_cfg entry = {0};
> + int err;
> +
> + ether_addr_copy(entry.mac_addr, addr);
> + entry.type = DPSW_FDB_ENTRY_STATIC;
> + entry.num_ifs = 1;
> + entry.if_id[0] = port_priv->idx;
> +
> + err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + 0, &entry);
> + if (err)
> + netdev_err(port_priv->netdev,
> + "dpsw_fdb_remove_multicast err %d\n", err);
> + return err;
> +}
> +
> +static void port_get_stats(struct net_device *netdev,
> + struct rtnl_link_stats64 *stats)
> +{
> + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> + u64 tmp;
> + int err;
> +
> + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + port_priv->idx,
> + DPSW_CNT_ING_FRAME, &stats->rx_packets);
> + if (err)
> + goto error;
> +
> + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + port_priv->idx,
> + DPSW_CNT_EGR_FRAME, &stats->tx_packets);
> + if (err)
> + goto error;
> +
> + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + port_priv->idx,
> + DPSW_CNT_ING_BYTE, &stats->rx_bytes);
> + if (err)
> + goto error;
> +
> + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + port_priv->idx,
> + DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
> + if (err)
> + goto error;
> +
> + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + port_priv->idx,
> + DPSW_CNT_ING_FRAME_DISCARD,
> + &stats->rx_dropped);
> + if (err)
> + goto error;
> +
> + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + port_priv->idx,
> + DPSW_CNT_ING_FLTR_FRAME,
> + &tmp);
> + if (err)
> + goto error;
> + stats->rx_dropped += tmp;
> +
> + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + port_priv->idx,
> + DPSW_CNT_EGR_FRAME_DISCARD,
> + &stats->tx_dropped);
> + if (err)
> + goto error;
> +
> + return;
> +
> +error:
> + netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
> +}
> +
> +static bool port_has_offload_stats(const struct net_device *netdev,
> + int attr_id)
> +{
> + return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
> +}
> +
> +static int port_get_offload_stats(int attr_id,
> + const struct net_device *netdev,
> + void *sp)
> +{
> + switch (attr_id) {
> + case IFLA_OFFLOAD_XSTATS_CPU_HIT:
> + port_get_stats((struct net_device *)netdev, sp);
> + return 0;
> + }
> +
> + return -EINVAL;
> +}
> +
> +static int port_change_mtu(struct net_device *netdev, int mtu)
> +{
> + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> + int err;
> +
> + err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
> + 0,
> + port_priv->ethsw_data->dpsw_handle,
> + port_priv->idx,
> + (u16)ETHSW_L2_MAX_FRM(mtu));
> + if (err) {
> + netdev_err(netdev,
> + "dpsw_if_set_max_frame_length() err %d\n", err);
> + return err;
> + }
> +
> + netdev->mtu = mtu;
> + return 0;
> +}
> +
> +static int port_carrier_state_sync(struct net_device *netdev)
> +{
> + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> + struct dpsw_link_state state;
> + int err;
> +
> + err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + port_priv->idx, &state);
> + if (err) {
> + netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
> + return err;
> + }
> +
> + WARN_ONCE(state.up > 1, "Garbage read into link_state");
> +
> + if (state.up != port_priv->link_state) {
> + if (state.up)
> + netif_carrier_on(netdev);
> + else
> + netif_carrier_off(netdev);
> + port_priv->link_state = state.up;
> + }
> + return 0;
> +}
> +
> +static int port_open(struct net_device *netdev)
> +{
> + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> + int err;
> +
> + /* No need to allow Tx as control interface is disabled */
> + netif_tx_stop_all_queues(netdev);
> +
> + err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + port_priv->idx);
> + if (err) {
> + netdev_err(netdev, "dpsw_if_enable err %d\n", err);
> + return err;
> + }
> +
> + /* sync carrier state */
> + err = port_carrier_state_sync(netdev);
> + if (err) {
> + netdev_err(netdev,
> + "port_carrier_state_sync err %d\n", err);
> + goto err_carrier_sync;
> + }
> +
> + return 0;
> +
> +err_carrier_sync:
> + dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + port_priv->idx);
> + return err;
> +}
> +
> +static int port_stop(struct net_device *netdev)
> +{
> + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> + int err;
> +
> + err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
> + port_priv->ethsw_data->dpsw_handle,
> + port_priv->idx);
> + if (err) {
> + netdev_err(netdev, "dpsw_if_disable err %d\n", err);
> + return err;
> + }
> +
> + return 0;
> +}
> +
> +static netdev_tx_t port_dropframe(struct sk_buff *skb,
> + struct net_device *netdev)
> +{
> + /* we don't support I/O for now, drop the frame */
> + dev_kfree_skb_any(skb);
> +
> + return NETDEV_TX_OK;
> +}
> +
> +static const struct net_device_ops ethsw_port_ops = {
> + .ndo_open = port_open,
> + .ndo_stop = port_stop,
> +
> + .ndo_set_mac_address = eth_mac_addr,
> + .ndo_change_mtu = port_change_mtu,
> + .ndo_has_offload_stats = port_has_offload_stats,
> + .ndo_get_offload_stats = port_get_offload_stats,
> +
> + .ndo_start_xmit = port_dropframe,
> +};
> +
> +static void ethsw_links_state_update(struct ethsw_core *ethsw)
> +{
> + int i;
> +
> + for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
> + port_carrier_state_sync(ethsw->ports[i]->netdev);
> +}
> +
> +static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
> +{
> + return IRQ_WAKE_THREAD;
> +}
> +
> +static irqreturn_t ethsw_irq0_handler_thread(int irq_num, void *arg)
> +{
> + struct device *dev = (struct device *)arg;
> + struct ethsw_core *ethsw = dev_get_drvdata(dev);
> +
> + /* Mask the events and the if_id reserved bits to be cleared on read */
> + u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
> + int err;
> +
> + err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
> + DPSW_IRQ_INDEX_IF, &status);
> + if (err) {
> + dev_err(dev, "Can't get irq status (err %d)", err);
> +
> + err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
> + DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
> + if (err)
> + dev_err(dev, "Can't clear irq status (err %d)", err);
> + goto out;
> + }
> +
> + if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
> + ethsw_links_state_update(ethsw);
> +
> +out:
> + return IRQ_HANDLED;
> +}
> +
> +static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
> +{
> + struct device *dev = &sw_dev->dev;
> + struct ethsw_core *ethsw = dev_get_drvdata(dev);
> + u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
> + struct fsl_mc_device_irq *irq;
> + int err;
> +
> + err = fsl_mc_allocate_irqs(sw_dev);
> + if (err) {
> + dev_err(dev, "MC irqs allocation failed\n");
> + return err;
> + }
> +
> + if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
> + err = -EINVAL;
> + goto free_irq;
> + }
> +
> + err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
> + DPSW_IRQ_INDEX_IF, 0);
> + if (err) {
> + dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
> + goto free_irq;
> + }
> +
> + irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
> +
> + err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
> + ethsw_irq0_handler,
> + ethsw_irq0_handler_thread,
> + IRQF_NO_SUSPEND | IRQF_ONESHOT,
> + dev_name(dev), dev);
> + if (err) {
> + dev_err(dev, "devm_request_threaded_irq(): %d", err);
> + goto free_irq;
> + }
> +
> + err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
> + DPSW_IRQ_INDEX_IF, mask);
> + if (err) {
> + dev_err(dev, "dpsw_set_irq_mask(): %d", err);
> + goto free_devm_irq;
> + }
> +
> + err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
> + DPSW_IRQ_INDEX_IF, 1);
> + if (err) {
> + dev_err(dev, "dpsw_set_irq_enable(): %d", err);
> + goto free_devm_irq;
> + }
> +
> + return 0;
> +
> +free_devm_irq:
> + devm_free_irq(dev, irq->msi_desc->irq, dev);
> +free_irq:
> + fsl_mc_free_irqs(sw_dev);
> + return err;
> +}
> +
> +static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
> +{
> + struct device *dev = &sw_dev->dev;
> + struct ethsw_core *ethsw = dev_get_drvdata(dev);
> + struct fsl_mc_device_irq *irq;
> +
> + irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
> + dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
> + DPSW_IRQ_INDEX_IF, 0);
You can still print an error message here, in case something goes wrong.
> + fsl_mc_free_irqs(sw_dev);
> +}
> +
> +static int swdev_port_attr_get(struct net_device *netdev,
> + struct switchdev_attr *attr)
> +{
> + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> +
> + switch (attr->id) {
> + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
> + attr->u.ppid.id_len = 1;
> + attr->u.ppid.id[0] = port_priv->ethsw_data->dev_id;
> + break;
> + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
> + attr->u.brport_flags =
> + (port_priv->ethsw_data->learning ? BR_LEARNING : 0) |
> + (port_priv->flood ? BR_FLOOD : 0);
> + break;
> + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
> + attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
> + break;
> + default:
> + return -EOPNOTSUPP;
> + }
> +
> + return 0;
> +}
> +
> +static int port_attr_stp_state_set(struct net_device *netdev,
> + struct switchdev_trans *trans,
> + u8 state)
> +{
> + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> +
> + if (switchdev_trans_ph_prepare(trans))
> + return 0;
> +
> + return ethsw_port_set_stp_state(port_priv, state);
> +}
> +
> +static int port_attr_br_flags_set(struct net_device *netdev,
> + struct switchdev_trans *trans,
> + unsigned long flags)
> +{
> + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> + int err = 0;
> +
> + if (switchdev_trans_ph_prepare(trans))
> + return 0;
> +
> + /* Learning is enabled per switch */
> + err = ethsw_set_learning(port_priv->ethsw_data, flags & BR_LEARNING);
> + if (err)
> + goto exit;
> +
> + err = ethsw_port_set_flood(port_priv, flags & BR_FLOOD);
> +
> +exit:
> + return err;
> +}
> +
> +static int swdev_port_attr_set(struct net_device *netdev,
> + const struct switchdev_attr *attr,
> + struct switchdev_trans *trans)
> +{
> + int err = 0;
> +
> + switch (attr->id) {
> + case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
> + err = port_attr_stp_state_set(netdev, trans,
> + attr->u.stp_state);
> + break;
> + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
> + err = port_attr_br_flags_set(netdev, trans,
> + attr->u.brport_flags);
> + break;
> + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
> + /* VLANs are supported by default */
> + break;
> + default:
> + err = -EOPNOTSUPP;
> + break;
> + }
> +
> + return err;
> +}
> +
> +static int port_vlans_add(struct net_device *netdev,
> + const struct switchdev_obj_port_vlan *vlan,
> + struct switchdev_trans *trans)
> +{
> + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> + int vid, err;
> +
> + if (switchdev_trans_ph_prepare(trans))
> + return 0;
> +
> + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
> + if (!port_priv->ethsw_data->vlans[vid]) {
> + /* this is a new VLAN */
> + err = ethsw_add_vlan(port_priv->ethsw_data, vid);
> + if (err)
> + return err;
> +
> + port_priv->ethsw_data->vlans[vid] |= ETHSW_VLAN_GLOBAL;
> + }
> + err = ethsw_port_add_vlan(port_priv, vid, vlan->flags);
> + if (err)
> + break;
> + }
> +
> + return err;
> +}
> +
> +static int port_lookup_address(struct net_device *netdev, int is_uc,
> + const unsigned char *addr)
> +{
> + struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
> + struct netdev_hw_addr *ha;
> +
> + netif_addr_lock_bh(netdev);
> + list_for_each_entry(ha, &list->list, list) {
> + if (ether_addr_equal(ha->addr, addr)) {
> + netif_addr_unlock_bh(netdev);
> + return 1;
> + }
> + }
> + netif_addr_unlock_bh(netdev);
> + return 0;
> +}
> +
> +static int port_mdb_add(struct net_device *netdev,
> + const struct switchdev_obj_port_mdb *mdb,
> + struct switchdev_trans *trans)
> +{
> + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> + int err;
> +
> + if (switchdev_trans_ph_prepare(trans))
> + return 0;
> +
> + /* Check if address is already set on this port */
> + if (port_lookup_address(netdev, 0, mdb->addr))
> + return -EEXIST;
> +
> + err = ethsw_port_fdb_add_mc(port_priv, mdb->addr);
> + if (err)
> + return err;
> +
> + err = dev_mc_add(netdev, mdb->addr);
> + if (err)
> + netdev_err(netdev, "dev_mc_add err %d\n", err);
In the error case, shouldn't there be a "ethsw_port_fdb_del_mc" ?
> +
> + return err;
> +}
> +
> +static int swdev_port_obj_add(struct net_device *netdev,
> + const struct switchdev_obj *obj,
> + struct switchdev_trans *trans)
> +{
> + int err;
> +
> + switch (obj->id) {
> + case SWITCHDEV_OBJ_ID_PORT_VLAN:
> + err = port_vlans_add(netdev,
> + SWITCHDEV_OBJ_PORT_VLAN(obj),
> + trans);
> + break;
> + case SWITCHDEV_OBJ_ID_PORT_MDB:
> + err = port_mdb_add(netdev,
> + SWITCHDEV_OBJ_PORT_MDB(obj),
> + trans);
> + break;
> + default:
> + err = -EOPNOTSUPP;
> + break;
> + }
> +
> + return err;
> +}
> +
> +static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
> +{
> + struct ethsw_core *ethsw = port_priv->ethsw_data;
> + struct net_device *netdev = port_priv->netdev;
> + struct dpsw_vlan_if_cfg vcfg;
> + int i, err, err2;
> + bool is_oper;
> +
> + if (!port_priv->vlans[vid])
> + return -ENOENT;
> +
> + if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
> + struct dpsw_tci_cfg tci_cfg = { 0 };
> + /* Interface needs to be down to change PVID */
> + is_oper = netif_oper_up(netdev);
> +
> + if (is_oper) {
> + err = dpsw_if_disable(ethsw->mc_io, 0,
> + ethsw->dpsw_handle,
> + port_priv->idx);
> + if (err) {
> + netdev_err(netdev, "dpsw_if_disable err %d\n",
> + err);
> + goto exit_err;
> + }
> + }
> +
> + err = dpsw_if_set_tci(ethsw->mc_io, 0,
> + ethsw->dpsw_handle,
> + port_priv->idx, &tci_cfg);
> + if (!err) {
> + port_priv->vlans[vid] &= ~ETHSW_VLAN_PVID;
> + port_priv->pvid = 0;
> + } else {
> + netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
> + }
> +
> + if (is_oper) {
> + err2 = dpsw_if_enable(ethsw->mc_io, 0,
> + ethsw->dpsw_handle,
> + port_priv->idx);
> + if (err2) {
> + netdev_err(netdev, "dpsw_if_enable err %d\n",
> + err2);
> + return err2;
> + }
> + }
> +
> + if (err)
> + goto exit_err;
> + }
> +
> + vcfg.num_ifs = 1;
> + vcfg.if_id[0] = port_priv->idx;
> + if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
> + err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
> + ethsw->dpsw_handle,
> + vid, &vcfg);
> + if (err) {
> + netdev_err(netdev,
> + "dpsw_vlan_remove_if_untagged err %d\n",
> + err);
> + }
> + port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
> + }
> +
> + if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
> + err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
> + vid, &vcfg);
> + if (err) {
> + netdev_err(netdev,
> + "dpsw_vlan_remove_if err %d\n", err);
> + return err;
> + }
> + port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
> +
> + /* Delete VLAN from switch if it is no longer configured on
> + * any port
> + */
> + for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
> + if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
> + return 0; /* Found a port member in VID */
> +
> + ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
> +
> + err = ethsw_dellink_switch(ethsw, vid);
> + if (err)
> + goto exit_err;
> + }
> +
> + return 0;
> +exit_err:
> + return err;
> +}
> +
> +static int port_vlans_del(struct net_device *netdev,
> + const struct switchdev_obj_port_vlan *vlan)
> +{
> + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> + int vid, err;
> +
> + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
> + err = ethsw_port_del_vlan(port_priv, vid);
> + if (err)
> + break;
> + }
> +
> + return err;
> +}
> +
> +static int port_mdb_del(struct net_device *netdev,
> + const struct switchdev_obj_port_mdb *mdb)
> +{
> + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> + int err;
> +
> + if (!port_lookup_address(netdev, 0, mdb->addr))
> + return -ENOENT;
> +
> + err = ethsw_port_fdb_del_mc(port_priv, mdb->addr);
> + if (err)
> + return err;
> +
> + err = dev_mc_del(netdev, mdb->addr);
> + if (err) {
> + netdev_err(netdev, "dev_mc_del err %d\n", err);
> + return err;
> + }
> +
> + return err;
> +}
> +
> +static int swdev_port_obj_del(struct net_device *netdev,
> + const struct switchdev_obj *obj)
> +{
> + int err;
> +
> + switch (obj->id) {
> + case SWITCHDEV_OBJ_ID_PORT_VLAN:
> + err = port_vlans_del(netdev, SWITCHDEV_OBJ_PORT_VLAN(obj));
> + break;
> + case SWITCHDEV_OBJ_ID_PORT_MDB:
> + err = port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
> + break;
> + default:
> + err = -EOPNOTSUPP;
> + break;
> + }
> + return err;
> +}
> +
> +static const struct switchdev_ops ethsw_port_switchdev_ops = {
> + .switchdev_port_attr_get = swdev_port_attr_get,
> + .switchdev_port_attr_set = swdev_port_attr_set,
> + .switchdev_port_obj_add = swdev_port_obj_add,
> + .switchdev_port_obj_del = swdev_port_obj_del,
> +};
> +
> +/* For the moment, only flood setting needs to be updated */
> +static int port_bridge_join(struct net_device *netdev)
> +{
> + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> +
> + /* Enable flooding */
> + return ethsw_port_set_flood(port_priv, 1);
> +}
> +
> +static int port_bridge_leave(struct net_device *netdev)
> +{
> + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> +
> + /* Disable flooding */
> + return ethsw_port_set_flood(port_priv, 0);
> +}
> +
> +static int port_netdevice_event(struct notifier_block *unused,
> + unsigned long event, void *ptr)
> +{
> + struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
> + struct netdev_notifier_changeupper_info *info = ptr;
> + struct net_device *upper_dev;
> + int err = 0;
> +
> + if (netdev->netdev_ops != ðsw_port_ops)
> + return NOTIFY_DONE;
> +
> + /* Handle just upper dev link/unlink for the moment */
> + if (event == NETDEV_CHANGEUPPER) {
> + upper_dev = info->upper_dev;
> + if (netif_is_bridge_master(upper_dev)) {
> + if (info->linking)
> + err = port_bridge_join(netdev);
> + else
> + err = port_bridge_leave(netdev);
> + }
> + }
> +
> + return notifier_from_errno(err);
> +}
> +
> +static struct notifier_block port_nb __read_mostly = {
> + .notifier_call = port_netdevice_event,
> +};
> +
> +struct ethsw_switchdev_event_work {
> + struct work_struct work;
> + struct switchdev_notifier_fdb_info fdb_info;
> + struct net_device *dev;
> + unsigned long event;
> +};
> +
> +static void ethsw_switchdev_event_work(struct work_struct *work)
> +{
> + struct ethsw_switchdev_event_work *switchdev_work =
> + container_of(work, struct ethsw_switchdev_event_work, work);
> + struct net_device *dev = switchdev_work->dev;
> + struct switchdev_notifier_fdb_info *fdb_info;
> + struct ethsw_port_priv *port_priv;
> +
> + rtnl_lock();
> + port_priv = netdev_priv(dev);
> + fdb_info = &switchdev_work->fdb_info;
> +
> + switch (switchdev_work->event) {
> + case SWITCHDEV_FDB_ADD_TO_DEVICE:
> + ethsw_port_fdb_add_uc(netdev_priv(dev), fdb_info->addr);
> + break;
> + case SWITCHDEV_FDB_DEL_TO_DEVICE:
> + ethsw_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
> + break;
> + }
> +
> + rtnl_unlock();
> + kfree(switchdev_work->fdb_info.addr);
> + kfree(switchdev_work);
> + dev_put(dev);
> +}
> +
> +/* Called under rcu_read_lock() */
> +static int port_switchdev_event(struct notifier_block *unused,
> + unsigned long event, void *ptr)
> +{
> + struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
> + struct ethsw_switchdev_event_work *switchdev_work;
> + struct switchdev_notifier_fdb_info *fdb_info = ptr;
> +
> + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
> + if (!switchdev_work)
> + return NOTIFY_BAD;
> +
> + INIT_WORK(&switchdev_work->work, ethsw_switchdev_event_work);
> + switchdev_work->dev = dev;
> + switchdev_work->event = event;
> +
> + switch (event) {
> + case SWITCHDEV_FDB_ADD_TO_DEVICE:
> + case SWITCHDEV_FDB_DEL_TO_DEVICE:
> + memcpy(&switchdev_work->fdb_info, ptr,
> + sizeof(switchdev_work->fdb_info));
> + switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
> + if (!switchdev_work->fdb_info.addr)
> + goto err_addr_alloc;
> +
> + ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
> + fdb_info->addr);
> +
> + /* Take a reference on the device to avoid being freed. */
> + dev_hold(dev);
> + break;
> + default:
> + return NOTIFY_DONE;
> + }
> +
> + queue_work(ethsw_owq, &switchdev_work->work);
> +
> + return NOTIFY_DONE;
> +
> +err_addr_alloc:
> + kfree(switchdev_work);
> + return NOTIFY_BAD;
> +}
> +
> +static struct notifier_block port_switchdev_nb = {
> + .notifier_call = port_switchdev_event,
> +};
> +
> +static int ethsw_register_notifier(struct device *dev)
> +{
> + int err;
> +
> + err = register_netdevice_notifier(&port_nb);
> + if (err) {
> + dev_err(dev, "Failed to register netdev notifier\n");
> + return err;
> + }
> +
> + err = register_switchdev_notifier(&port_switchdev_nb);
> + if (err) {
> + dev_err(dev, "Failed to register switchdev notifier\n");
> + goto err_switchdev_nb;
> + }
> +
> + return 0;
> +
> +err_switchdev_nb:
> + unregister_netdevice_notifier(&port_nb);
> + return err;
> +}
> +
> +static int ethsw_open(struct ethsw_core *ethsw)
Minor formatting error, tab in function signature - see following function as well.
> +{
> + struct ethsw_port_priv *port_priv = NULL;
> + int i, err;
> +
> + err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
> + if (err) {
> + dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
> + return err;
> + }
> +
> + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
> + port_priv = ethsw->ports[i];
> + err = dev_open(port_priv->netdev);
> + if (err) {
> + netdev_err(port_priv->netdev, "dev_open err %d\n", err);
> + return err;
> + }
> + }
> +
> + return 0;
> +}
> +
> +static int ethsw_stop(struct ethsw_core *ethsw)
> +{
> + struct ethsw_port_priv *port_priv = NULL;
> + int i, err;
> +
> + destroy_workqueue(ethsw_owq);
If workqueue is destroyed here, shouldn't it be alloc'd in ethsw_open?
> +
> + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
> + port_priv = ethsw->ports[i];
> + dev_close(port_priv->netdev);
> + }
> +
> + err = dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
> + if (err) {
> + dev_err(ethsw->dev, "dpsw_disable err %d\n", err);
> + return err;
> + }
> +
> + return 0;
> +}
> +
> +static int ethsw_init(struct fsl_mc_device *sw_dev)
> +{
> + struct device *dev = &sw_dev->dev;
> + struct ethsw_core *ethsw = dev_get_drvdata(dev);
> + u16 version_major, version_minor, i;
> + struct dpsw_stp_cfg stp_cfg;
> + int err;
> +
> + ethsw->dev_id = sw_dev->obj_desc.id;
> +
> + err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw->dpsw_handle);
> + if (err) {
> + dev_err(dev, "dpsw_open err %d\n", err);
> + return err;
> + }
> +
> + err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
> + ðsw->sw_attr);
> + if (err) {
> + dev_err(dev, "dpsw_get_attributes err %d\n", err);
> + goto err_close;
> + }
> +
> + err = dpsw_get_api_version(ethsw->mc_io, 0,
> + &version_major,
> + &version_minor);
> + if (err) {
> + dev_err(dev, "dpsw_get_api_version err %d\n", err);
> + goto err_close;
> + }
> +
> + /* Minimum supported DPSW version check */
> + if (version_major < DPSW_MIN_VER_MAJOR ||
> + (version_major == DPSW_MIN_VER_MAJOR &&
> + version_minor < DPSW_MIN_VER_MINOR)) {
> + dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d or
> greater.\n",
> + version_major,
> + version_minor,
> + DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
> + err = -ENOTSUPP;
> + goto err_close;
> + }
> +
> + err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
> + if (err) {
> + dev_err(dev, "dpsw_reset err %d\n", err);
> + goto err_close;
> + }
> +
> + err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, 0,
> + DPSW_FDB_LEARNING_MODE_HW);
> + if (err) {
> + dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
> + goto err_close;
> + }
> +
> + stp_cfg.vlan_id = DEFAULT_VLAN_ID;
> + stp_cfg.state = DPSW_STP_STATE_FORWARDING;
> +
> + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
> + err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
> + &stp_cfg);
> + if (err) {
> + dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
> + err, i);
> + goto err_close;
> + }
> +
> + err = dpsw_if_set_broadcast(ethsw->mc_io, 0,
> + ethsw->dpsw_handle, i, 1);
> + if (err) {
> + dev_err(dev,
> + "dpsw_if_set_broadcast err %d for port %d\n",
> + err, i);
> + goto err_close;
> + }
> + }
> +
> + ethsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
> + "ethsw");
> + if (!ethsw_owq) {
> + err = -ENOMEM;
> + goto err_close;
> + }
> +
> + err = ethsw_register_notifier(dev);
> + if (err)
> + goto err_destroy_ordered_workqueue;
> +
> + return 0;
> +
> +err_destroy_ordered_workqueue:
> + destroy_workqueue(ethsw_owq);
> +
> +err_close:
> + dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
> + return err;
> +}
> +
> +static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
> +{
> + const char def_mcast[ETH_ALEN] = {0x01, 0x00, 0x5e, 0x00, 0x00, 0x01};
> + struct net_device *netdev = port_priv->netdev;
> + struct ethsw_core *ethsw = port_priv->ethsw_data;
> + struct dpsw_tci_cfg tci_cfg = {0};
> + struct dpsw_vlan_if_cfg vcfg;
> + int err;
> +
> + /* Switch starts with all ports configured to VLAN 1. Need to
> + * remove this setting to allow configuration at bridge join
> + */
> + vcfg.num_ifs = 1;
> + vcfg.if_id[0] = port_priv->idx;
> +
> + err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw->dpsw_handle,
> + DEFAULT_VLAN_ID, &vcfg);
> + if (err) {
> + netdev_err(netdev, "dpsw_vlan_remove_if_untagged err %d\n",
> + err);
> + return err;
> + }
> +
> + err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
> + port_priv->idx, &tci_cfg);
> + if (err) {
> + netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
> + return err;
> + }
> +
> + err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
> + DEFAULT_VLAN_ID, &vcfg);
> + if (err) {
> + netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err);
> + return err;
> + }
> +
> + err = ethsw_port_fdb_add_mc(port_priv, def_mcast);
> +
> + return err;
> +}
> +
> +static void ethsw_takedown(struct fsl_mc_device *sw_dev)
> +{
> + struct device *dev = &sw_dev->dev;
> + struct ethsw_core *ethsw = dev_get_drvdata(dev);
> + int err;
> +
> + err = unregister_switchdev_notifier(&port_switchdev_nb);
> + if (err)
> + dev_err(dev,
> + "Failed to unregister switchdev notifier (%d)\n", err);
> +
> + err = unregister_netdevice_notifier(&port_nb);
> + if (err)
> + dev_err(dev,
> + "Failed to unregister netdev notifier (%d)\n", err);
Above 2 can be grouped into ethsw_unregister_notifier.
> +
> + err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
> + if (err)
> + dev_warn(dev, "dpsw_close err %d\n", err);
> +}
> +
> +static int ethsw_remove(struct fsl_mc_device *sw_dev)
> +{
> + struct ethsw_port_priv *port_priv;
> + struct ethsw_core *ethsw;
> + struct device *dev;
> + int i;
> +
> + dev = &sw_dev->dev;
> + ethsw = dev_get_drvdata(dev);
> +
> + ethsw_teardown_irqs(sw_dev);
> +
> + rtnl_lock();
> + ethsw_stop(ethsw);
> + rtnl_unlock();
> +
> + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
> + port_priv = ethsw->ports[i];
> + unregister_netdev(port_priv->netdev);
> + free_netdev(port_priv->netdev);
> + }
> + kfree(ethsw->ports);
> +
> + ethsw_takedown(sw_dev);
> + fsl_mc_portal_free(ethsw->mc_io);
> +
> + kfree(ethsw);
> +
> + dev_set_drvdata(dev, NULL);
> +
> + return 0;
> +}
> +
> +static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
> +{
> + struct ethsw_port_priv *port_priv;
> + struct device *dev = ethsw->dev;
> + struct net_device *port_netdev;
> + int err;
> +
> + port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
> + if (!port_netdev) {
> + dev_err(dev, "alloc_etherdev error\n");
> + return -ENOMEM;
> + }
> +
> + port_priv = netdev_priv(port_netdev);
> + port_priv->netdev = port_netdev;
> + port_priv->ethsw_data = ethsw;
> +
> + port_priv->idx = port_idx;
> + port_priv->stp_state = BR_STATE_FORWARDING;
> +
> + /* Flooding is implicitly enabled */
> + port_priv->flood = true;
> +
> + SET_NETDEV_DEV(port_netdev, dev);
> + port_netdev->netdev_ops = ðsw_port_ops;
> + port_netdev->switchdev_ops = ðsw_port_switchdev_ops;
> +
> + /* Set MTU limits */
> + port_netdev->min_mtu = ETH_MIN_MTU;
> + port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
> +
> + err = register_netdev(port_netdev);
> + if (err < 0) {
> + dev_err(dev, "register_netdev error %d\n", err);
> + free_netdev(port_netdev);
> + return err;
> + }
> +
> + ethsw->ports[port_idx] = port_priv;
> +
> + return ethsw_port_init(port_priv, port_idx);
> +}
> +
> +static int ethsw_probe(struct fsl_mc_device *sw_dev)
> +{
> + struct device *dev = &sw_dev->dev;
> + struct ethsw_core *ethsw;
> + int err;
> + u16 i, j;
> +
> + /* Allocate switch core*/
> + ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
> +
> + if (!ethsw)
> + return -ENOMEM;
> +
> + ethsw->dev = dev;
> + dev_set_drvdata(dev, ethsw);
> +
> + err = fsl_mc_portal_allocate(sw_dev, 0, ðsw->mc_io);
> + if (err) {
> + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
> + goto err_free_drvdata;
> + }
> +
> + err = ethsw_init(sw_dev);
> + if (err)
> + goto err_free_cmdport;
> +
> + /* DEFAULT_VLAN_ID is implicitly configured on the switch */
> + ethsw->vlans[DEFAULT_VLAN_ID] = ETHSW_VLAN_MEMBER;
> +
> + /* Learning is implicitly enabled */
> + ethsw->learning = true;
> +
> + ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
> + GFP_KERNEL);
> + if (!(ethsw->ports)) {
> + err = -ENOMEM;
> + goto err_takedown;
> + }
> +
> + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
> + err = ethsw_probe_port(ethsw, i);
> + if (err) {
> + /* Cleanup previous ports only */
> + for (j = 0; j < i; j++) {
I think you can go with
for (i--; i >= 0; i--)
or better yet:
goto err_free_ports
and refactor err_free_ports to look like:
for (i--; i >= 0; i--) {
...
}
Best regards,
Bogdan P.
> + unregister_netdev(ethsw->ports[j]->netdev);
> + free_netdev(ethsw->ports[j]->netdev);
> + }
> + goto err_takedown;
> + }
> + }
> +
> + /* Switch starts up enabled */
> + rtnl_lock();
> + err = ethsw_open(ethsw);
> + rtnl_unlock();
> + if (err)
> + goto err_free_ports;
> +
> + /* Setup IRQs */
> + err = ethsw_setup_irqs(sw_dev);
> + if (err)
> + goto err_stop;
> +
> + dev_info(dev, "probed %d port switch\n", ethsw->sw_attr.num_ifs);
> + return 0;
> +
> +err_stop:
> + rtnl_lock();
> + ethsw_stop(ethsw);
> + rtnl_unlock();
> +
> +err_free_ports:
> + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
> + unregister_netdev(ethsw->ports[i]->netdev);
> + free_netdev(ethsw->ports[i]->netdev);
> + }
> + kfree(ethsw->ports);
> +
> +err_takedown:
> + ethsw_takedown(sw_dev);
> +
> +err_free_cmdport:
> + fsl_mc_portal_free(ethsw->mc_io);
> +
> +err_free_drvdata:
> + kfree(ethsw);
> + dev_set_drvdata(dev, NULL);
> +
> + return err;
> +}
> +
> +static const struct fsl_mc_device_id ethsw_match_id_table[] = {
> + {
> + .vendor = FSL_MC_VENDOR_FREESCALE,
> + .obj_type = "dpsw",
> + },
> + { .vendor = 0x0 }
> +};
> +MODULE_DEVICE_TABLE(fslmc, ethsw_match_id_table);
> +
> +static struct fsl_mc_driver eth_sw_drv = {
> + .driver = {
> + .name = KBUILD_MODNAME,
> + .owner = THIS_MODULE,
> + },
> + .probe = ethsw_probe,
> + .remove = ethsw_remove,
> + .match_id_table = ethsw_match_id_table
> +};
> +
> +module_fsl_mc_driver(eth_sw_drv);
> +
> +MODULE_LICENSE("Dual BSD/GPL");
> +MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
> diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h b/drivers/staging/fsl-
> dpaa2/ethsw/ethsw.h
> new file mode 100644
> index 0000000..8c1d645
> --- /dev/null
> +++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
> @@ -0,0 +1,88 @@
> +/* Copyright 2014-2017 Freescale Semiconductor Inc.
> + * Copyright 2017 NXP
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions are met:
> + * * Redistributions of source code must retain the above copyright
> + * notice, this list of conditions and the following disclaimer.
> + * * Redistributions in binary form must reproduce the above copyright
> + * notice, this list of conditions and the following disclaimer in the
> + * documentation and/or other materials provided with the distribution.
> + * * Neither the name of the above-listed copyright holders nor the
> + * names of any contributors may be used to endorse or promote products
> + * derived from this software without specific prior written permission.
> + *
> + *
> + * ALTERNATIVELY, this software may be distributed under the terms of the
> + * GNU General Public License ("GPL") as published by the Free Software
> + * Foundation, either version 2 of that License or (at your option) any
> + * later version.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
> + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
> + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
> + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
> + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
> + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
> + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
> + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
> + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
> + * POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +#ifndef __ETHSW_H
> +#define __ETHSW_H
> +
> +#include <linux/netdevice.h>
> +#include <linux/etherdevice.h>
> +#include <linux/rtnetlink.h>
> +#include <linux/if_vlan.h>
> +#include <uapi/linux/if_bridge.h>
> +#include <net/switchdev.h>
> +#include <linux/if_bridge.h>
> +
> +#include "dpsw.h"
> +
> +/* Number of IRQs supported */
> +#define DPSW_IRQ_NUM 2
> +
> +#define ETHSW_VLAN_MEMBER 1
> +#define ETHSW_VLAN_UNTAGGED 2
> +#define ETHSW_VLAN_PVID 4
> +#define ETHSW_VLAN_GLOBAL 8
> +
> +/* Maximum Frame Length supported by HW (currently 10k) */
> +#define DPAA2_MFL (10 * 1024)
> +#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
> +#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
> +
> +struct ethsw_core;
> +
> +/* Per port private data */
> +struct ethsw_port_priv {
> + struct net_device *netdev;
> + u16 idx;
> + struct ethsw_core *ethsw_data;
> + u8 link_state;
> + u8 stp_state;
> + bool flood;
> +
> + u8 vlans[VLAN_VID_MASK + 1];
> + u16 pvid;
> +};
> +
> +/* Switch data */
> +struct ethsw_core {
> + struct device *dev;
> + struct fsl_mc_io *mc_io;
> + u16 dpsw_handle;
> + struct dpsw_attr sw_attr;
> + int dev_id;
> + struct ethsw_port_priv **ports;
> +
> + u8 vlans[VLAN_VID_MASK + 1];
> + bool learning;
> +};
> +
> +#endif /* __ETHSW_H */
> --
> 1.9.1
> -----Original Message-----
> From: Bogdan Purcareata
> Sent: Friday, September 29, 2017 16:36
> To: Razvan Stefanescu <[email protected]>;
> [email protected]
> Cc: [email protected]; [email protected];
> [email protected]; [email protected]; [email protected]; Alexandru Marginean
> <[email protected]>; Ruxandra Ioana Radulescu
> <[email protected]>; Laurentiu Tudor <[email protected]>;
> [email protected]
> Subject: RE: [RESEND PATCH 2/6] staging: fsl-dpaa2/ethsw: Add Freescale DPAA2
> Ethernet Switch driver
>
> > Introduce the DPAA2 Ethernet Switch driver, which manages Datapath Switch
> > (DPSW) objects discovered on the MC bus.
> >
> > Suggested-by: Alexandru Marginean <[email protected]>
> > Signed-off-by: Razvan Stefanescu <[email protected]>
> > ---
> > drivers/staging/fsl-dpaa2/ethsw/Makefile | 2 +-
> > drivers/staging/fsl-dpaa2/ethsw/ethsw.c | 1523
> ++++++++++++++++++++++++++++++
> > drivers/staging/fsl-dpaa2/ethsw/ethsw.h | 88 ++
> > 3 files changed, 1612 insertions(+), 1 deletion(-)
> > create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.c
> > create mode 100644 drivers/staging/fsl-dpaa2/ethsw/ethsw.h
> >
> > diff --git a/drivers/staging/fsl-dpaa2/ethsw/Makefile b/drivers/staging/fsl-
> > dpaa2/ethsw/Makefile
> > index db137f7..a6d72d1 100644
> > --- a/drivers/staging/fsl-dpaa2/ethsw/Makefile
> > +++ b/drivers/staging/fsl-dpaa2/ethsw/Makefile
> > @@ -4,4 +4,4 @@
> >
> > obj-$(CONFIG_FSL_DPAA2_ETHSW) += dpaa2-ethsw.o
> >
> > -dpaa2-ethsw-objs := dpsw.o
> > +dpaa2-ethsw-objs := ethsw.o dpsw.o
> > diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-
> > dpaa2/ethsw/ethsw.c
> > new file mode 100644
> > index 0000000..ae86078
> > --- /dev/null
> > +++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
> > @@ -0,0 +1,1523 @@
> > +/* Copyright 2014-2016 Freescale Semiconductor Inc.
> > + * Copyright 2017 NXP
> > + *
> > + * Redistribution and use in source and binary forms, with or without
> > + * modification, are permitted provided that the following conditions are met:
> > + * * Redistributions of source code must retain the above copyright
> > + * notice, this list of conditions and the following disclaimer.
> > + * * Redistributions in binary form must reproduce the above copyright
> > + * notice, this list of conditions and the following disclaimer in the
> > + * documentation and/or other materials provided with the distribution.
> > + * * Neither the name of the above-listed copyright holders nor the
> > + * names of any contributors may be used to endorse or promote products
> > + * derived from this software without specific prior written permission.
> > + *
> > + *
> > + * ALTERNATIVELY, this software may be distributed under the terms of the
> > + * GNU General Public License ("GPL") as published by the Free Software
> > + * Foundation, either version 2 of that License or (at your option) any
> > + * later version.
> > + *
> > + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS "AS IS"
> > + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> LIMITED TO, THE
> > + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
> PARTICULAR PURPOSE
> > + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
> CONTRIBUTORS BE
> > + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> > + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
> PROCUREMENT OF
> > + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
> BUSINESS
> > + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
> WHETHER IN
> > + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
> OTHERWISE)
> > + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
> ADVISED OF THE
> > + * POSSIBILITY OF SUCH DAMAGE.
> > + */
> > +
> > +#include <linux/module.h>
> > +
> > +#include <linux/interrupt.h>
> > +#include <linux/msi.h>
> > +#include <linux/kthread.h>
> > +#include <linux/workqueue.h>
> > +
> > +#include "../../fsl-mc/include/mc.h"
> > +
> > +#include "ethsw.h"
> > +
> > +static struct workqueue_struct *ethsw_owq;
> > +
> > +/* Minimal supported DPSW version */
> > +#define DPSW_MIN_VER_MAJOR 8
> > +#define DPSW_MIN_VER_MINOR 0
> > +
> > +#define DEFAULT_VLAN_ID 1
> > +
> > +static int ethsw_add_vlan(struct ethsw_core *ethsw, u16 vid)
> > +{
> > + int err;
> > +
> > + struct dpsw_vlan_cfg vcfg = {
> > + .fdb_id = 0,
> > + };
> > +
> > + if (ethsw->vlans[vid]) {
> > + dev_err(ethsw->dev, "VLAN already configured\n");
> > + return -EEXIST;
> > + }
> > +
> > + err = dpsw_vlan_add(ethsw->mc_io, 0,
> > + ethsw->dpsw_handle, vid, &vcfg);
> > + if (err) {
> > + dev_err(ethsw->dev, "dpsw_vlan_add err %d\n", err);
> > + return err;
> > + }
> > + ethsw->vlans[vid] = ETHSW_VLAN_MEMBER;/
> > +
> > + return 0;
> > +}
> > +
> > +static int ethsw_port_add_vlan(struct ethsw_port_priv *port_priv,
> > + u16 vid, u16 flags)
> > +{
> > + struct ethsw_core *ethsw = port_priv->ethsw_data;
> > + struct net_device *netdev = port_priv->netdev;
> > + struct dpsw_vlan_if_cfg vcfg;
> > + bool is_oper;
> > + int err, err2;
>
> Mild suggestion - s/err2/ret/, just because it sounds better, at least to me (same
> for similar situations in the rest of the file).
>
Thank you for your suggestion. I will make the change and send v2.
> > +
> > + if (port_priv->vlans[vid]) {
> > + netdev_warn(netdev, "VLAN %d already configured\n", vid);
> > + return -EEXIST;
> > + }
> > +
> > + vcfg.num_ifs = 1;
> > + vcfg.if_id[0] = port_priv->idx;
> > + err = dpsw_vlan_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, vid,
> &vcfg);
> > + if (err) {
> > + netdev_err(netdev, "dpsw_vlan_add_if err %d\n", err);
> > + return err;
> > + }
> > +
> > + port_priv->vlans[vid] = ETHSW_VLAN_MEMBER;
> > +
> > + if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
> > + err = dpsw_vlan_add_if_untagged(ethsw->mc_io, 0,
> > + ethsw->dpsw_handle,
> > + vid, &vcfg);
> > + if (err) {
> > + netdev_err(netdev,
> > + "dpsw_vlan_add_if_untagged err %d\n", err);
> > + return err;
> > + }
> > + port_priv->vlans[vid] |= ETHSW_VLAN_UNTAGGED;
> > + }
> > +
> > + if (flags & BRIDGE_VLAN_INFO_PVID) {
> > + struct dpsw_tci_cfg tci_cfg = {
> > + .pcp = 0,
> > + .dei = 0,
> > + .vlan_id = vid,
> > + };
> > +
> > + /* Interface needs to be down to change PVID */
> > + is_oper = netif_oper_up(netdev);
> > + if (is_oper) {
> > + err = dpsw_if_disable(ethsw->mc_io, 0,
> > + ethsw->dpsw_handle,
> > + port_priv->idx);
> > + if (err) {
> > + netdev_err(netdev,
> > + "dpsw_if_disable err %d\n", err);
> > + return err;
> > + }
> > + }
> > +
> > + err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
> > + port_priv->idx, &tci_cfg);
> > + if (!err) {
> > + /* Delete previous PVID info and mark the new one */
> > + if (port_priv->pvid)
> > + port_priv->vlans[port_priv->pvid]
> > + ^= ETHSW_VLAN_PVID;
>
> Can it be " &= ~ETHSW_VLAN_PVID" ? Are there other implications?
>
You are right. Flag must be un-set. Will update in v2, along with the other observations.
Thank you,
Razvan S.
> > +
> > + port_priv->vlans[vid] |= ETHSW_VLAN_PVID;
> > + port_priv->pvid = vid;
> > + } else {
> > + netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
> > + }
> > +
> > + if (is_oper) {
> > + err2 = dpsw_if_enable(ethsw->mc_io, 0,
> > + ethsw->dpsw_handle,
> > + port_priv->idx);
> > + if (err2) {
> > + netdev_err(netdev,
> > + "dpsw_if_enable err %d\n", err2);
> > + return err2;
> > + }
> > + }
> > + }
> > +
> > + return err;
> > +}
> > +
> > +static int ethsw_set_learning(struct ethsw_core *ethsw, u8 flag)
> > +{
> > + enum dpsw_fdb_learning_mode learn_mode;
> > + int err;
> > +
> > + if (flag)
> > + learn_mode = DPSW_FDB_LEARNING_MODE_HW;
> > + else
> > + learn_mode = DPSW_FDB_LEARNING_MODE_DIS;
> > +
> > + err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw-
> >dpsw_handle, 0,
> > + learn_mode);
> > + if (err) {
> > + dev_err(ethsw->dev, "dpsw_fdb_set_learning_mode err %d\n",
> err);
> > + return err;
> > + }
> > + ethsw->learning = !!flag;
> > +
> > + return 0;
> > +}
> > +
> > +static int ethsw_port_set_flood(struct ethsw_port_priv *port_priv, u8 flag)
> > +{
> > + int err;
> > +
> > + err = dpsw_if_set_flooding(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + port_priv->idx, (int)flag);
>
> Why is this cast necessary? Can't the API be reworked to use u8 (or, better yet,
> bool) instead of int?
>
> > + if (err) {
> > + netdev_err(port_priv->netdev,
> > + "dpsw_fdb_set_learning_mode err %d\n", err);
> > + return err;
> > + }
> > + port_priv->flood = !!flag;
> > +
> > + return 0;
> > +}
> > +
> > +static int ethsw_port_set_stp_state(struct ethsw_port_priv *port_priv, u8
> > state)
> > +{
> > + struct dpsw_stp_cfg stp_cfg = {
> > + .vlan_id = DEFAULT_VLAN_ID,
> > + .state = state,
> > + };
> > + int err;
> > +
> > + if (!netif_oper_up(port_priv->netdev) || state == port_priv->stp_state)
> > + return 0; /* Nothing to do */
> > +
> > + err = dpsw_if_set_stp(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + port_priv->idx, &stp_cfg);
> > + if (err) {
> > + netdev_err(port_priv->netdev,
> > + "dpsw_if_set_stp err %d\n", err);
> > + return err;
> > + }
> > +
> > + port_priv->stp_state = state;
> > +
> > + return 0;
> > +}
> > +
> > +static int ethsw_dellink_switch(struct ethsw_core *ethsw, u16 vid)
> > +{
> > + struct ethsw_port_priv *ppriv_local = NULL;
> > + int i, err;
> > +
> > + if (!ethsw->vlans[vid])
> > + return -ENOENT;
> > +
> > + err = dpsw_vlan_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, vid);
> > + if (err) {
> > + dev_err(ethsw->dev, "dpsw_vlan_remove err %d\n", err);
> > + return err;
> > + }
> > + ethsw->vlans[vid] = 0;
> > +
> > + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
> > + ppriv_local = ethsw->ports[i];
> > + ppriv_local->vlans[vid] = 0;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static int ethsw_port_fdb_add_uc(struct ethsw_port_priv *port_priv,
> > + const unsigned char *addr)
> > +{
> > + struct dpsw_fdb_unicast_cfg entry = {0};
> > + int err;
> > +
> > + entry.if_egress = port_priv->idx;
> > + entry.type = DPSW_FDB_ENTRY_STATIC;
> > + ether_addr_copy(entry.mac_addr, addr);
> > +
> > + err = dpsw_fdb_add_unicast(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + 0, &entry);
> > + if (err)
> > + netdev_err(port_priv->netdev,
> > + "dpsw_fdb_add_unicast err %d\n", err);
> > + return err;
> > +}
> > +
> > +static int ethsw_port_fdb_del_uc(struct ethsw_port_priv *port_priv,
> > + const unsigned char *addr)
> > +{
> > + struct dpsw_fdb_unicast_cfg entry = {0};
> > + int err;
> > +
> > + entry.if_egress = port_priv->idx;
> > + entry.type = DPSW_FDB_ENTRY_STATIC;
> > + ether_addr_copy(entry.mac_addr, addr);
> > +
> > + err = dpsw_fdb_remove_unicast(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + 0, &entry);
> > + if (err)
> > + netdev_err(port_priv->netdev,
> > + "dpsw_fdb_remove_unicast err %d\n", err);
> > + return err;
> > +}
> > +
> > +static int ethsw_port_fdb_add_mc(struct ethsw_port_priv *port_priv,
> > + const unsigned char *addr)
> > +{
> > + struct dpsw_fdb_multicast_cfg entry = {0};
> > + int err;
> > +
> > + ether_addr_copy(entry.mac_addr, addr);
> > + entry.type = DPSW_FDB_ENTRY_STATIC;
> > + entry.num_ifs = 1;
> > + entry.if_id[0] = port_priv->idx;
> > +
> > + err = dpsw_fdb_add_multicast(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + 0, &entry);
> > + if (err)
> > + netdev_err(port_priv->netdev, "dpsw_fdb_add_multicast err
> %d\n",
> > + err);
> > + return err;
> > +}
> > +
> > +static int ethsw_port_fdb_del_mc(struct ethsw_port_priv *port_priv,
> > + const unsigned char *addr)
> > +{
> > + struct dpsw_fdb_multicast_cfg entry = {0};
> > + int err;
> > +
> > + ether_addr_copy(entry.mac_addr, addr);
> > + entry.type = DPSW_FDB_ENTRY_STATIC;
> > + entry.num_ifs = 1;
> > + entry.if_id[0] = port_priv->idx;
> > +
> > + err = dpsw_fdb_remove_multicast(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + 0, &entry);
> > + if (err)
> > + netdev_err(port_priv->netdev,
> > + "dpsw_fdb_remove_multicast err %d\n", err);
> > + return err;
> > +}
> > +
> > +static void port_get_stats(struct net_device *netdev,
> > + struct rtnl_link_stats64 *stats)
> > +{
> > + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> > + u64 tmp;
> > + int err;
> > +
> > + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + port_priv->idx,
> > + DPSW_CNT_ING_FRAME, &stats->rx_packets);
> > + if (err)
> > + goto error;
> > +
> > + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + port_priv->idx,
> > + DPSW_CNT_EGR_FRAME, &stats->tx_packets);
> > + if (err)
> > + goto error;
> > +
> > + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + port_priv->idx,
> > + DPSW_CNT_ING_BYTE, &stats->rx_bytes);
> > + if (err)
> > + goto error;
> > +
> > + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + port_priv->idx,
> > + DPSW_CNT_EGR_BYTE, &stats->tx_bytes);
> > + if (err)
> > + goto error;
> > +
> > + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + port_priv->idx,
> > + DPSW_CNT_ING_FRAME_DISCARD,
> > + &stats->rx_dropped);
> > + if (err)
> > + goto error;
> > +
> > + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + port_priv->idx,
> > + DPSW_CNT_ING_FLTR_FRAME,
> > + &tmp);
> > + if (err)
> > + goto error;
> > + stats->rx_dropped += tmp;
> > +
> > + err = dpsw_if_get_counter(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + port_priv->idx,
> > + DPSW_CNT_EGR_FRAME_DISCARD,
> > + &stats->tx_dropped);
> > + if (err)
> > + goto error;
> > +
> > + return;
> > +
> > +error:
> > + netdev_err(netdev, "dpsw_if_get_counter err %d\n", err);
> > +}
> > +
> > +static bool port_has_offload_stats(const struct net_device *netdev,
> > + int attr_id)
> > +{
> > + return (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT);
> > +}
> > +
> > +static int port_get_offload_stats(int attr_id,
> > + const struct net_device *netdev,
> > + void *sp)
> > +{
> > + switch (attr_id) {
> > + case IFLA_OFFLOAD_XSTATS_CPU_HIT:
> > + port_get_stats((struct net_device *)netdev, sp);
> > + return 0;
> > + }
> > +
> > + return -EINVAL;
> > +}
> > +
> > +static int port_change_mtu(struct net_device *netdev, int mtu)
> > +{
> > + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> > + int err;
> > +
> > + err = dpsw_if_set_max_frame_length(port_priv->ethsw_data->mc_io,
> > + 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + port_priv->idx,
> > + (u16)ETHSW_L2_MAX_FRM(mtu));
> > + if (err) {
> > + netdev_err(netdev,
> > + "dpsw_if_set_max_frame_length() err %d\n", err);
> > + return err;
> > + }
> > +
> > + netdev->mtu = mtu;
> > + return 0;
> > +}
> > +
> > +static int port_carrier_state_sync(struct net_device *netdev)
> > +{
> > + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> > + struct dpsw_link_state state;
> > + int err;
> > +
> > + err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + port_priv->idx, &state);
> > + if (err) {
> > + netdev_err(netdev, "dpsw_if_get_link_state() err %d\n", err);
> > + return err;
> > + }
> > +
> > + WARN_ONCE(state.up > 1, "Garbage read into link_state");
> > +
> > + if (state.up != port_priv->link_state) {
> > + if (state.up)
> > + netif_carrier_on(netdev);
> > + else
> > + netif_carrier_off(netdev);
> > + port_priv->link_state = state.up;
> > + }
> > + return 0;
> > +}
> > +
> > +static int port_open(struct net_device *netdev)
> > +{
> > + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> > + int err;
> > +
> > + /* No need to allow Tx as control interface is disabled */
> > + netif_tx_stop_all_queues(netdev);
> > +
> > + err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + port_priv->idx);
> > + if (err) {
> > + netdev_err(netdev, "dpsw_if_enable err %d\n", err);
> > + return err;
> > + }
> > +
> > + /* sync carrier state */
> > + err = port_carrier_state_sync(netdev);
> > + if (err) {
> > + netdev_err(netdev,
> > + "port_carrier_state_sync err %d\n", err);
> > + goto err_carrier_sync;
> > + }
> > +
> > + return 0;
> > +
> > +err_carrier_sync:
> > + dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + port_priv->idx);
> > + return err;
> > +}
> > +
> > +static int port_stop(struct net_device *netdev)
> > +{
> > + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> > + int err;
> > +
> > + err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
> > + port_priv->ethsw_data->dpsw_handle,
> > + port_priv->idx);
> > + if (err) {
> > + netdev_err(netdev, "dpsw_if_disable err %d\n", err);
> > + return err;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static netdev_tx_t port_dropframe(struct sk_buff *skb,
> > + struct net_device *netdev)
> > +{
> > + /* we don't support I/O for now, drop the frame */
> > + dev_kfree_skb_any(skb);
> > +
> > + return NETDEV_TX_OK;
> > +}
> > +
> > +static const struct net_device_ops ethsw_port_ops = {
> > + .ndo_open = port_open,
> > + .ndo_stop = port_stop,
> > +
> > + .ndo_set_mac_address = eth_mac_addr,
> > + .ndo_change_mtu = port_change_mtu,
> > + .ndo_has_offload_stats = port_has_offload_stats,
> > + .ndo_get_offload_stats = port_get_offload_stats,
> > +
> > + .ndo_start_xmit = port_dropframe,
> > +};
> > +
> > +static void ethsw_links_state_update(struct ethsw_core *ethsw)
> > +{
> > + int i;
> > +
> > + for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
> > + port_carrier_state_sync(ethsw->ports[i]->netdev);
> > +}
> > +
> > +static irqreturn_t ethsw_irq0_handler(int irq_num, void *arg)
> > +{
> > + return IRQ_WAKE_THREAD;
> > +}
> > +
> > +static irqreturn_t ethsw_irq0_handler_thread(int irq_num, void *arg)
> > +{
> > + struct device *dev = (struct device *)arg;
> > + struct ethsw_core *ethsw = dev_get_drvdata(dev);
> > +
> > + /* Mask the events and the if_id reserved bits to be cleared on read */
> > + u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
> > + int err;
> > +
> > + err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
> > + DPSW_IRQ_INDEX_IF, &status);
> > + if (err) {
> > + dev_err(dev, "Can't get irq status (err %d)", err);
> > +
> > + err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw-
> >dpsw_handle,
> > + DPSW_IRQ_INDEX_IF, 0xFFFFFFFF);
> > + if (err)
> > + dev_err(dev, "Can't clear irq status (err %d)", err);
> > + goto out;
> > + }
> > +
> > + if (status & DPSW_IRQ_EVENT_LINK_CHANGED)
> > + ethsw_links_state_update(ethsw);
> > +
> > +out:
> > + return IRQ_HANDLED;
> > +}
> > +
> > +static int ethsw_setup_irqs(struct fsl_mc_device *sw_dev)
> > +{
> > + struct device *dev = &sw_dev->dev;
> > + struct ethsw_core *ethsw = dev_get_drvdata(dev);
> > + u32 mask = DPSW_IRQ_EVENT_LINK_CHANGED;
> > + struct fsl_mc_device_irq *irq;
> > + int err;
> > +
> > + err = fsl_mc_allocate_irqs(sw_dev);
> > + if (err) {
> > + dev_err(dev, "MC irqs allocation failed\n");
> > + return err;
> > + }
> > +
> > + if (WARN_ON(sw_dev->obj_desc.irq_count != DPSW_IRQ_NUM)) {
> > + err = -EINVAL;
> > + goto free_irq;
> > + }
> > +
> > + err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
> > + DPSW_IRQ_INDEX_IF, 0);
> > + if (err) {
> > + dev_err(dev, "dpsw_set_irq_enable err %d\n", err);
> > + goto free_irq;
> > + }
> > +
> > + irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
> > +
> > + err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
> > + ethsw_irq0_handler,
> > + ethsw_irq0_handler_thread,
> > + IRQF_NO_SUSPEND | IRQF_ONESHOT,
> > + dev_name(dev), dev);
> > + if (err) {
> > + dev_err(dev, "devm_request_threaded_irq(): %d", err);
> > + goto free_irq;
> > + }
> > +
> > + err = dpsw_set_irq_mask(ethsw->mc_io, 0, ethsw->dpsw_handle,
> > + DPSW_IRQ_INDEX_IF, mask);
> > + if (err) {
> > + dev_err(dev, "dpsw_set_irq_mask(): %d", err);
> > + goto free_devm_irq;
> > + }
> > +
> > + err = dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
> > + DPSW_IRQ_INDEX_IF, 1);
> > + if (err) {
> > + dev_err(dev, "dpsw_set_irq_enable(): %d", err);
> > + goto free_devm_irq;
> > + }
> > +
> > + return 0;
> > +
> > +free_devm_irq:
> > + devm_free_irq(dev, irq->msi_desc->irq, dev);
> > +free_irq:
> > + fsl_mc_free_irqs(sw_dev);
> > + return err;
> > +}
> > +
> > +static void ethsw_teardown_irqs(struct fsl_mc_device *sw_dev)
> > +{
> > + struct device *dev = &sw_dev->dev;
> > + struct ethsw_core *ethsw = dev_get_drvdata(dev);
> > + struct fsl_mc_device_irq *irq;
> > +
> > + irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
> > + dpsw_set_irq_enable(ethsw->mc_io, 0, ethsw->dpsw_handle,
> > + DPSW_IRQ_INDEX_IF, 0);
>
> You can still print an error message here, in case something goes wrong.
>
> > + fsl_mc_free_irqs(sw_dev);
> > +}
> > +
> > +static int swdev_port_attr_get(struct net_device *netdev,
> > + struct switchdev_attr *attr)
> > +{
> > + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> > +
> > + switch (attr->id) {
> > + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
> > + attr->u.ppid.id_len = 1;
> > + attr->u.ppid.id[0] = port_priv->ethsw_data->dev_id;
> > + break;
> > + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
> > + attr->u.brport_flags =
> > + (port_priv->ethsw_data->learning ? BR_LEARNING : 0) |
> > + (port_priv->flood ? BR_FLOOD : 0);
> > + break;
> > + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
> > + attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
> > + break;
> > + default:
> > + return -EOPNOTSUPP;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static int port_attr_stp_state_set(struct net_device *netdev,
> > + struct switchdev_trans *trans,
> > + u8 state)
> > +{
> > + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> > +
> > + if (switchdev_trans_ph_prepare(trans))
> > + return 0;
> > +
> > + return ethsw_port_set_stp_state(port_priv, state);
> > +}
> > +
> > +static int port_attr_br_flags_set(struct net_device *netdev,
> > + struct switchdev_trans *trans,
> > + unsigned long flags)
> > +{
> > + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> > + int err = 0;
> > +
> > + if (switchdev_trans_ph_prepare(trans))
> > + return 0;
> > +
> > + /* Learning is enabled per switch */
> > + err = ethsw_set_learning(port_priv->ethsw_data, flags &
> BR_LEARNING);
> > + if (err)
> > + goto exit;
> > +
> > + err = ethsw_port_set_flood(port_priv, flags & BR_FLOOD);
> > +
> > +exit:
> > + return err;
> > +}
> > +
> > +static int swdev_port_attr_set(struct net_device *netdev,
> > + const struct switchdev_attr *attr,
> > + struct switchdev_trans *trans)
> > +{
> > + int err = 0;
> > +
> > + switch (attr->id) {
> > + case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
> > + err = port_attr_stp_state_set(netdev, trans,
> > + attr->u.stp_state);
> > + break;
> > + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
> > + err = port_attr_br_flags_set(netdev, trans,
> > + attr->u.brport_flags);
> > + break;
> > + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
> > + /* VLANs are supported by default */
> > + break;
> > + default:
> > + err = -EOPNOTSUPP;
> > + break;
> > + }
> > +
> > + return err;
> > +}
> > +
> > +static int port_vlans_add(struct net_device *netdev,
> > + const struct switchdev_obj_port_vlan *vlan,
> > + struct switchdev_trans *trans)
> > +{
> > + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> > + int vid, err;
> > +
> > + if (switchdev_trans_ph_prepare(trans))
> > + return 0;
> > +
> > + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
> > + if (!port_priv->ethsw_data->vlans[vid]) {
> > + /* this is a new VLAN */
> > + err = ethsw_add_vlan(port_priv->ethsw_data, vid);
> > + if (err)
> > + return err;
> > +
> > + port_priv->ethsw_data->vlans[vid] |=
> ETHSW_VLAN_GLOBAL;
> > + }
> > + err = ethsw_port_add_vlan(port_priv, vid, vlan->flags);
> > + if (err)
> > + break;
> > + }
> > +
> > + return err;
> > +}
> > +
> > +static int port_lookup_address(struct net_device *netdev, int is_uc,
> > + const unsigned char *addr)
> > +{
> > + struct netdev_hw_addr_list *list = (is_uc) ? &netdev->uc : &netdev->mc;
> > + struct netdev_hw_addr *ha;
> > +
> > + netif_addr_lock_bh(netdev);
> > + list_for_each_entry(ha, &list->list, list) {
> > + if (ether_addr_equal(ha->addr, addr)) {
> > + netif_addr_unlock_bh(netdev);
> > + return 1;
> > + }
> > + }
> > + netif_addr_unlock_bh(netdev);
> > + return 0;
> > +}
> > +
> > +static int port_mdb_add(struct net_device *netdev,
> > + const struct switchdev_obj_port_mdb *mdb,
> > + struct switchdev_trans *trans)
> > +{
> > + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> > + int err;
> > +
> > + if (switchdev_trans_ph_prepare(trans))
> > + return 0;
> > +
> > + /* Check if address is already set on this port */
> > + if (port_lookup_address(netdev, 0, mdb->addr))
> > + return -EEXIST;
> > +
> > + err = ethsw_port_fdb_add_mc(port_priv, mdb->addr);
> > + if (err)
> > + return err;
> > +
> > + err = dev_mc_add(netdev, mdb->addr);
> > + if (err)
> > + netdev_err(netdev, "dev_mc_add err %d\n", err);
>
> In the error case, shouldn't there be a "ethsw_port_fdb_del_mc" ?
>
> > +
> > + return err;
> > +}
> > +
> > +static int swdev_port_obj_add(struct net_device *netdev,
> > + const struct switchdev_obj *obj,
> > + struct switchdev_trans *trans)
> > +{
> > + int err;
> > +
> > + switch (obj->id) {
> > + case SWITCHDEV_OBJ_ID_PORT_VLAN:
> > + err = port_vlans_add(netdev,
> > + SWITCHDEV_OBJ_PORT_VLAN(obj),
> > + trans);
> > + break;
> > + case SWITCHDEV_OBJ_ID_PORT_MDB:
> > + err = port_mdb_add(netdev,
> > + SWITCHDEV_OBJ_PORT_MDB(obj),
> > + trans);
> > + break;
> > + default:
> > + err = -EOPNOTSUPP;
> > + break;
> > + }
> > +
> > + return err;
> > +}
> > +
> > +static int ethsw_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid)
> > +{
> > + struct ethsw_core *ethsw = port_priv->ethsw_data;
> > + struct net_device *netdev = port_priv->netdev;
> > + struct dpsw_vlan_if_cfg vcfg;
> > + int i, err, err2;
> > + bool is_oper;
> > +
> > + if (!port_priv->vlans[vid])
> > + return -ENOENT;
> > +
> > + if (port_priv->vlans[vid] & ETHSW_VLAN_PVID) {
> > + struct dpsw_tci_cfg tci_cfg = { 0 };
> > + /* Interface needs to be down to change PVID */
> > + is_oper = netif_oper_up(netdev);
> > +
> > + if (is_oper) {
> > + err = dpsw_if_disable(ethsw->mc_io, 0,
> > + ethsw->dpsw_handle,
> > + port_priv->idx);
> > + if (err) {
> > + netdev_err(netdev, "dpsw_if_disable err %d\n",
> > + err);
> > + goto exit_err;
> > + }
> > + }
> > +
> > + err = dpsw_if_set_tci(ethsw->mc_io, 0,
> > + ethsw->dpsw_handle,
> > + port_priv->idx, &tci_cfg);
> > + if (!err) {
> > + port_priv->vlans[vid] &= ~ETHSW_VLAN_PVID;
> > + port_priv->pvid = 0;
> > + } else {
> > + netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
> > + }
> > +
> > + if (is_oper) {
> > + err2 = dpsw_if_enable(ethsw->mc_io, 0,
> > + ethsw->dpsw_handle,
> > + port_priv->idx);
> > + if (err2) {
> > + netdev_err(netdev, "dpsw_if_enable err %d\n",
> > + err2);
> > + return err2;
> > + }
> > + }
> > +
> > + if (err)
> > + goto exit_err;
> > + }
> > +
> > + vcfg.num_ifs = 1;
> > + vcfg.if_id[0] = port_priv->idx;
> > + if (port_priv->vlans[vid] & ETHSW_VLAN_UNTAGGED) {
> > + err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0,
> > + ethsw->dpsw_handle,
> > + vid, &vcfg);
> > + if (err) {
> > + netdev_err(netdev,
> > + "dpsw_vlan_remove_if_untagged err %d\n",
> > + err);
> > + }
> > + port_priv->vlans[vid] &= ~ETHSW_VLAN_UNTAGGED;
> > + }
> > +
> > + if (port_priv->vlans[vid] & ETHSW_VLAN_MEMBER) {
> > + err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw-
> >dpsw_handle,
> > + vid, &vcfg);
> > + if (err) {
> > + netdev_err(netdev,
> > + "dpsw_vlan_remove_if err %d\n", err);
> > + return err;
> > + }
> > + port_priv->vlans[vid] &= ~ETHSW_VLAN_MEMBER;
> > +
> > + /* Delete VLAN from switch if it is no longer configured on
> > + * any port
> > + */
> > + for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
> > + if (ethsw->ports[i]->vlans[vid] &
> ETHSW_VLAN_MEMBER)
> > + return 0; /* Found a port member in VID */
> > +
> > + ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
> > +
> > + err = ethsw_dellink_switch(ethsw, vid);
> > + if (err)
> > + goto exit_err;
> > + }
> > +
> > + return 0;
> > +exit_err:
> > + return err;
> > +}
> > +
> > +static int port_vlans_del(struct net_device *netdev,
> > + const struct switchdev_obj_port_vlan *vlan)
> > +{
> > + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> > + int vid, err;
> > +
> > + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
> > + err = ethsw_port_del_vlan(port_priv, vid);
> > + if (err)
> > + break;
> > + }
> > +
> > + return err;
> > +}
> > +
> > +static int port_mdb_del(struct net_device *netdev,
> > + const struct switchdev_obj_port_mdb *mdb)
> > +{
> > + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> > + int err;
> > +
> > + if (!port_lookup_address(netdev, 0, mdb->addr))
> > + return -ENOENT;
> > +
> > + err = ethsw_port_fdb_del_mc(port_priv, mdb->addr);
> > + if (err)
> > + return err;
> > +
> > + err = dev_mc_del(netdev, mdb->addr);
> > + if (err) {
> > + netdev_err(netdev, "dev_mc_del err %d\n", err);
> > + return err;
> > + }
> > +
> > + return err;
> > +}
> > +
> > +static int swdev_port_obj_del(struct net_device *netdev,
> > + const struct switchdev_obj *obj)
> > +{
> > + int err;
> > +
> > + switch (obj->id) {
> > + case SWITCHDEV_OBJ_ID_PORT_VLAN:
> > + err = port_vlans_del(netdev,
> SWITCHDEV_OBJ_PORT_VLAN(obj));
> > + break;
> > + case SWITCHDEV_OBJ_ID_PORT_MDB:
> > + err = port_mdb_del(netdev, SWITCHDEV_OBJ_PORT_MDB(obj));
> > + break;
> > + default:
> > + err = -EOPNOTSUPP;
> > + break;
> > + }
> > + return err;
> > +}
> > +
> > +static const struct switchdev_ops ethsw_port_switchdev_ops = {
> > + .switchdev_port_attr_get = swdev_port_attr_get,
> > + .switchdev_port_attr_set = swdev_port_attr_set,
> > + .switchdev_port_obj_add = swdev_port_obj_add,
> > + .switchdev_port_obj_del = swdev_port_obj_del,
> > +};
> > +
> > +/* For the moment, only flood setting needs to be updated */
> > +static int port_bridge_join(struct net_device *netdev)
> > +{
> > + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> > +
> > + /* Enable flooding */
> > + return ethsw_port_set_flood(port_priv, 1);
> > +}
> > +
> > +static int port_bridge_leave(struct net_device *netdev)
> > +{
> > + struct ethsw_port_priv *port_priv = netdev_priv(netdev);
> > +
> > + /* Disable flooding */
> > + return ethsw_port_set_flood(port_priv, 0);
> > +}
> > +
> > +static int port_netdevice_event(struct notifier_block *unused,
> > + unsigned long event, void *ptr)
> > +{
> > + struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
> > + struct netdev_notifier_changeupper_info *info = ptr;
> > + struct net_device *upper_dev;
> > + int err = 0;
> > +
> > + if (netdev->netdev_ops != ðsw_port_ops)
> > + return NOTIFY_DONE;
> > +
> > + /* Handle just upper dev link/unlink for the moment */
> > + if (event == NETDEV_CHANGEUPPER) {
> > + upper_dev = info->upper_dev;
> > + if (netif_is_bridge_master(upper_dev)) {
> > + if (info->linking)
> > + err = port_bridge_join(netdev);
> > + else
> > + err = port_bridge_leave(netdev);
> > + }
> > + }
> > +
> > + return notifier_from_errno(err);
> > +}
> > +
> > +static struct notifier_block port_nb __read_mostly = {
> > + .notifier_call = port_netdevice_event,
> > +};
> > +
> > +struct ethsw_switchdev_event_work {
> > + struct work_struct work;
> > + struct switchdev_notifier_fdb_info fdb_info;
> > + struct net_device *dev;
> > + unsigned long event;
> > +};
> > +
> > +static void ethsw_switchdev_event_work(struct work_struct *work)
> > +{
> > + struct ethsw_switchdev_event_work *switchdev_work =
> > + container_of(work, struct ethsw_switchdev_event_work, work);
> > + struct net_device *dev = switchdev_work->dev;
> > + struct switchdev_notifier_fdb_info *fdb_info;
> > + struct ethsw_port_priv *port_priv;
> > +
> > + rtnl_lock();
> > + port_priv = netdev_priv(dev);
> > + fdb_info = &switchdev_work->fdb_info;
> > +
> > + switch (switchdev_work->event) {
> > + case SWITCHDEV_FDB_ADD_TO_DEVICE:
> > + ethsw_port_fdb_add_uc(netdev_priv(dev), fdb_info->addr);
> > + break;
> > + case SWITCHDEV_FDB_DEL_TO_DEVICE:
> > + ethsw_port_fdb_del_uc(netdev_priv(dev), fdb_info->addr);
> > + break;
> > + }
> > +
> > + rtnl_unlock();
> > + kfree(switchdev_work->fdb_info.addr);
> > + kfree(switchdev_work);
> > + dev_put(dev);
> > +}
> > +
> > +/* Called under rcu_read_lock() */
> > +static int port_switchdev_event(struct notifier_block *unused,
> > + unsigned long event, void *ptr)
> > +{
> > + struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
> > + struct ethsw_switchdev_event_work *switchdev_work;
> > + struct switchdev_notifier_fdb_info *fdb_info = ptr;
> > +
> > + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
> > + if (!switchdev_work)
> > + return NOTIFY_BAD;
> > +
> > + INIT_WORK(&switchdev_work->work, ethsw_switchdev_event_work);
> > + switchdev_work->dev = dev;
> > + switchdev_work->event = event;
> > +
> > + switch (event) {
> > + case SWITCHDEV_FDB_ADD_TO_DEVICE:
> > + case SWITCHDEV_FDB_DEL_TO_DEVICE:
> > + memcpy(&switchdev_work->fdb_info, ptr,
> > + sizeof(switchdev_work->fdb_info));
> > + switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN,
> GFP_ATOMIC);
> > + if (!switchdev_work->fdb_info.addr)
> > + goto err_addr_alloc;
> > +
> > + ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
> > + fdb_info->addr);
> > +
> > + /* Take a reference on the device to avoid being freed. */
> > + dev_hold(dev);
> > + break;
> > + default:
> > + return NOTIFY_DONE;
> > + }
> > +
> > + queue_work(ethsw_owq, &switchdev_work->work);
> > +
> > + return NOTIFY_DONE;
> > +
> > +err_addr_alloc:
> > + kfree(switchdev_work);
> > + return NOTIFY_BAD;
> > +}
> > +
> > +static struct notifier_block port_switchdev_nb = {
> > + .notifier_call = port_switchdev_event,
> > +};
> > +
> > +static int ethsw_register_notifier(struct device *dev)
> > +{
> > + int err;
> > +
> > + err = register_netdevice_notifier(&port_nb);
> > + if (err) {
> > + dev_err(dev, "Failed to register netdev notifier\n");
> > + return err;
> > + }
> > +
> > + err = register_switchdev_notifier(&port_switchdev_nb);
> > + if (err) {
> > + dev_err(dev, "Failed to register switchdev notifier\n");
> > + goto err_switchdev_nb;
> > + }
> > +
> > + return 0;
> > +
> > +err_switchdev_nb:
> > + unregister_netdevice_notifier(&port_nb);
> > + return err;
> > +}
> > +
> > +static int ethsw_open(struct ethsw_core *ethsw)
>
> Minor formatting error, tab in function signature - see following function as well.
>
> > +{
> > + struct ethsw_port_priv *port_priv = NULL;
> > + int i, err;
> > +
> > + err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle);
> > + if (err) {
> > + dev_err(ethsw->dev, "dpsw_enable err %d\n", err);
> > + return err;
> > + }
> > +
> > + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
> > + port_priv = ethsw->ports[i];
> > + err = dev_open(port_priv->netdev);
> > + if (err) {
> > + netdev_err(port_priv->netdev, "dev_open err %d\n",
> err);
> > + return err;
> > + }
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static int ethsw_stop(struct ethsw_core *ethsw)
> > +{
> > + struct ethsw_port_priv *port_priv = NULL;
> > + int i, err;
> > +
> > + destroy_workqueue(ethsw_owq);
>
> If workqueue is destroyed here, shouldn't it be alloc'd in ethsw_open?
>
> > +
> > + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
> > + port_priv = ethsw->ports[i];
> > + dev_close(port_priv->netdev);
> > + }
> > +
> > + err = dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
> > + if (err) {
> > + dev_err(ethsw->dev, "dpsw_disable err %d\n", err);
> > + return err;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static int ethsw_init(struct fsl_mc_device *sw_dev)
> > +{
> > + struct device *dev = &sw_dev->dev;
> > + struct ethsw_core *ethsw = dev_get_drvdata(dev);
> > + u16 version_major, version_minor, i;
> > + struct dpsw_stp_cfg stp_cfg;
> > + int err;
> > +
> > + ethsw->dev_id = sw_dev->obj_desc.id;
> > +
> > + err = dpsw_open(ethsw->mc_io, 0, ethsw->dev_id, ðsw-
> >dpsw_handle);
> > + if (err) {
> > + dev_err(dev, "dpsw_open err %d\n", err);
> > + return err;
> > + }
> > +
> > + err = dpsw_get_attributes(ethsw->mc_io, 0, ethsw->dpsw_handle,
> > + ðsw->sw_attr);
> > + if (err) {
> > + dev_err(dev, "dpsw_get_attributes err %d\n", err);
> > + goto err_close;
> > + }
> > +
> > + err = dpsw_get_api_version(ethsw->mc_io, 0,
> > + &version_major,
> > + &version_minor);
> > + if (err) {
> > + dev_err(dev, "dpsw_get_api_version err %d\n", err);
> > + goto err_close;
> > + }
> > +
> > + /* Minimum supported DPSW version check */
> > + if (version_major < DPSW_MIN_VER_MAJOR ||
> > + (version_major == DPSW_MIN_VER_MAJOR &&
> > + version_minor < DPSW_MIN_VER_MINOR)) {
> > + dev_err(dev, "DPSW version %d:%d not supported. Use %d.%d
> or
> > greater.\n",
> > + version_major,
> > + version_minor,
> > + DPSW_MIN_VER_MAJOR, DPSW_MIN_VER_MINOR);
> > + err = -ENOTSUPP;
> > + goto err_close;
> > + }
> > +
> > + err = dpsw_reset(ethsw->mc_io, 0, ethsw->dpsw_handle);
> > + if (err) {
> > + dev_err(dev, "dpsw_reset err %d\n", err);
> > + goto err_close;
> > + }
> > +
> > + err = dpsw_fdb_set_learning_mode(ethsw->mc_io, 0, ethsw-
> >dpsw_handle, 0,
> > + DPSW_FDB_LEARNING_MODE_HW);
> > + if (err) {
> > + dev_err(dev, "dpsw_fdb_set_learning_mode err %d\n", err);
> > + goto err_close;
> > + }
> > +
> > + stp_cfg.vlan_id = DEFAULT_VLAN_ID;
> > + stp_cfg.state = DPSW_STP_STATE_FORWARDING;
> > +
> > + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
> > + err = dpsw_if_set_stp(ethsw->mc_io, 0, ethsw->dpsw_handle, i,
> > + &stp_cfg);
> > + if (err) {
> > + dev_err(dev, "dpsw_if_set_stp err %d for port %d\n",
> > + err, i);
> > + goto err_close;
> > + }
> > +
> > + err = dpsw_if_set_broadcast(ethsw->mc_io, 0,
> > + ethsw->dpsw_handle, i, 1);
> > + if (err) {
> > + dev_err(dev,
> > + "dpsw_if_set_broadcast err %d for port %d\n",
> > + err, i);
> > + goto err_close;
> > + }
> > + }
> > +
> > + ethsw_owq = alloc_ordered_workqueue("%s_ordered",
> WQ_MEM_RECLAIM,
> > + "ethsw");
> > + if (!ethsw_owq) {
> > + err = -ENOMEM;
> > + goto err_close;
> > + }
> > +
> > + err = ethsw_register_notifier(dev);
> > + if (err)
> > + goto err_destroy_ordered_workqueue;
> > +
> > + return 0;
> > +
> > +err_destroy_ordered_workqueue:
> > + destroy_workqueue(ethsw_owq);
> > +
> > +err_close:
> > + dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
> > + return err;
> > +}
> > +
> > +static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
> > +{
> > + const char def_mcast[ETH_ALEN] = {0x01, 0x00, 0x5e, 0x00, 0x00, 0x01};
> > + struct net_device *netdev = port_priv->netdev;
> > + struct ethsw_core *ethsw = port_priv->ethsw_data;
> > + struct dpsw_tci_cfg tci_cfg = {0};
> > + struct dpsw_vlan_if_cfg vcfg;
> > + int err;
> > +
> > + /* Switch starts with all ports configured to VLAN 1. Need to
> > + * remove this setting to allow configuration at bridge join
> > + */
> > + vcfg.num_ifs = 1;
> > + vcfg.if_id[0] = port_priv->idx;
> > +
> > + err = dpsw_vlan_remove_if_untagged(ethsw->mc_io, 0, ethsw-
> >dpsw_handle,
> > + DEFAULT_VLAN_ID, &vcfg);
> > + if (err) {
> > + netdev_err(netdev, "dpsw_vlan_remove_if_untagged err
> %d\n",
> > + err);
> > + return err;
> > + }
> > +
> > + err = dpsw_if_set_tci(ethsw->mc_io, 0, ethsw->dpsw_handle,
> > + port_priv->idx, &tci_cfg);
> > + if (err) {
> > + netdev_err(netdev, "dpsw_if_set_tci err %d\n", err);
> > + return err;
> > + }
> > +
> > + err = dpsw_vlan_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle,
> > + DEFAULT_VLAN_ID, &vcfg);
> > + if (err) {
> > + netdev_err(netdev, "dpsw_vlan_remove_if err %d\n", err);
> > + return err;
> > + }
> > +
> > + err = ethsw_port_fdb_add_mc(port_priv, def_mcast);
> > +
> > + return err;
> > +}
> > +
> > +static void ethsw_takedown(struct fsl_mc_device *sw_dev)
> > +{
> > + struct device *dev = &sw_dev->dev;
> > + struct ethsw_core *ethsw = dev_get_drvdata(dev);
> > + int err;
> > +
> > + err = unregister_switchdev_notifier(&port_switchdev_nb);
> > + if (err)
> > + dev_err(dev,
> > + "Failed to unregister switchdev notifier (%d)\n", err);
> > +
> > + err = unregister_netdevice_notifier(&port_nb);
> > + if (err)
> > + dev_err(dev,
> > + "Failed to unregister netdev notifier (%d)\n", err);
>
> Above 2 can be grouped into ethsw_unregister_notifier.
>
> > +
> > + err = dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
> > + if (err)
> > + dev_warn(dev, "dpsw_close err %d\n", err);
> > +}
> > +
> > +static int ethsw_remove(struct fsl_mc_device *sw_dev)
> > +{
> > + struct ethsw_port_priv *port_priv;
> > + struct ethsw_core *ethsw;
> > + struct device *dev;
> > + int i;
> > +
> > + dev = &sw_dev->dev;
> > + ethsw = dev_get_drvdata(dev);
> > +
> > + ethsw_teardown_irqs(sw_dev);
> > +
> > + rtnl_lock();
> > + ethsw_stop(ethsw);
> > + rtnl_unlock();
> > +
> > + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
> > + port_priv = ethsw->ports[i];
> > + unregister_netdev(port_priv->netdev);
> > + free_netdev(port_priv->netdev);
> > + }
> > + kfree(ethsw->ports);
> > +
> > + ethsw_takedown(sw_dev);
> > + fsl_mc_portal_free(ethsw->mc_io);
> > +
> > + kfree(ethsw);
> > +
> > + dev_set_drvdata(dev, NULL);
> > +
> > + return 0;
> > +}
> > +
> > +static int ethsw_probe_port(struct ethsw_core *ethsw, u16 port_idx)
> > +{
> > + struct ethsw_port_priv *port_priv;
> > + struct device *dev = ethsw->dev;
> > + struct net_device *port_netdev;
> > + int err;
> > +
> > + port_netdev = alloc_etherdev(sizeof(struct ethsw_port_priv));
> > + if (!port_netdev) {
> > + dev_err(dev, "alloc_etherdev error\n");
> > + return -ENOMEM;
> > + }
> > +
> > + port_priv = netdev_priv(port_netdev);
> > + port_priv->netdev = port_netdev;
> > + port_priv->ethsw_data = ethsw;
> > +
> > + port_priv->idx = port_idx;
> > + port_priv->stp_state = BR_STATE_FORWARDING;
> > +
> > + /* Flooding is implicitly enabled */
> > + port_priv->flood = true;
> > +
> > + SET_NETDEV_DEV(port_netdev, dev);
> > + port_netdev->netdev_ops = ðsw_port_ops;
> > + port_netdev->switchdev_ops = ðsw_port_switchdev_ops;
> > +
> > + /* Set MTU limits */
> > + port_netdev->min_mtu = ETH_MIN_MTU;
> > + port_netdev->max_mtu = ETHSW_MAX_FRAME_LENGTH;
> > +
> > + err = register_netdev(port_netdev);
> > + if (err < 0) {
> > + dev_err(dev, "register_netdev error %d\n", err);
> > + free_netdev(port_netdev);
> > + return err;
> > + }
> > +
> > + ethsw->ports[port_idx] = port_priv;
> > +
> > + return ethsw_port_init(port_priv, port_idx);
> > +}
> > +
> > +static int ethsw_probe(struct fsl_mc_device *sw_dev)
> > +{
> > + struct device *dev = &sw_dev->dev;
> > + struct ethsw_core *ethsw;
> > + int err;
> > + u16 i, j;
> > +
> > + /* Allocate switch core*/
> > + ethsw = kzalloc(sizeof(*ethsw), GFP_KERNEL);
> > +
> > + if (!ethsw)
> > + return -ENOMEM;
> > +
> > + ethsw->dev = dev;
> > + dev_set_drvdata(dev, ethsw);
> > +
> > + err = fsl_mc_portal_allocate(sw_dev, 0, ðsw->mc_io);
> > + if (err) {
> > + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
> > + goto err_free_drvdata;
> > + }
> > +
> > + err = ethsw_init(sw_dev);
> > + if (err)
> > + goto err_free_cmdport;
> > +
> > + /* DEFAULT_VLAN_ID is implicitly configured on the switch */
> > + ethsw->vlans[DEFAULT_VLAN_ID] = ETHSW_VLAN_MEMBER;
> > +
> > + /* Learning is implicitly enabled */
> > + ethsw->learning = true;
> > +
> > + ethsw->ports = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->ports),
> > + GFP_KERNEL);
> > + if (!(ethsw->ports)) {
> > + err = -ENOMEM;
> > + goto err_takedown;
> > + }
> > +
> > + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
> > + err = ethsw_probe_port(ethsw, i);
> > + if (err) {
> > + /* Cleanup previous ports only */
> > + for (j = 0; j < i; j++) {
>
> I think you can go with
> for (i--; i >= 0; i--)
>
> or better yet:
> goto err_free_ports
> and refactor err_free_ports to look like:
> for (i--; i >= 0; i--) {
> ...
> }
>
> Best regards,
> Bogdan P.
>
> > + unregister_netdev(ethsw->ports[j]->netdev);
> > + free_netdev(ethsw->ports[j]->netdev);
> > + }
> > + goto err_takedown;
> > + }
> > + }
> > +
> > + /* Switch starts up enabled */
> > + rtnl_lock();
> > + err = ethsw_open(ethsw);
> > + rtnl_unlock();
> > + if (err)
> > + goto err_free_ports;
> > +
> > + /* Setup IRQs */
> > + err = ethsw_setup_irqs(sw_dev);
> > + if (err)
> > + goto err_stop;
> > +
> > + dev_info(dev, "probed %d port switch\n", ethsw->sw_attr.num_ifs);
> > + return 0;
> > +
> > +err_stop:
> > + rtnl_lock();
> > + ethsw_stop(ethsw);
> > + rtnl_unlock();
> > +
> > +err_free_ports:
> > + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
> > + unregister_netdev(ethsw->ports[i]->netdev);
> > + free_netdev(ethsw->ports[i]->netdev);
> > + }
> > + kfree(ethsw->ports);
> > +
> > +err_takedown:
> > + ethsw_takedown(sw_dev);
> > +
> > +err_free_cmdport:
> > + fsl_mc_portal_free(ethsw->mc_io);
> > +
> > +err_free_drvdata:
> > + kfree(ethsw);
> > + dev_set_drvdata(dev, NULL);
> > +
> > + return err;
> > +}
> > +
> > +static const struct fsl_mc_device_id ethsw_match_id_table[] = {
> > + {
> > + .vendor = FSL_MC_VENDOR_FREESCALE,
> > + .obj_type = "dpsw",
> > + },
> > + { .vendor = 0x0 }
> > +};
> > +MODULE_DEVICE_TABLE(fslmc, ethsw_match_id_table);
> > +
> > +static struct fsl_mc_driver eth_sw_drv = {
> > + .driver = {
> > + .name = KBUILD_MODNAME,
> > + .owner = THIS_MODULE,
> > + },
> > + .probe = ethsw_probe,
> > + .remove = ethsw_remove,
> > + .match_id_table = ethsw_match_id_table
> > +};
> > +
> > +module_fsl_mc_driver(eth_sw_drv);
> > +
> > +MODULE_LICENSE("Dual BSD/GPL");
> > +MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");
> > diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h b/drivers/staging/fsl-
> > dpaa2/ethsw/ethsw.h
> > new file mode 100644
> > index 0000000..8c1d645
> > --- /dev/null
> > +++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
> > @@ -0,0 +1,88 @@
> > +/* Copyright 2014-2017 Freescale Semiconductor Inc.
> > + * Copyright 2017 NXP
> > + *
> > + * Redistribution and use in source and binary forms, with or without
> > + * modification, are permitted provided that the following conditions are met:
> > + * * Redistributions of source code must retain the above copyright
> > + * notice, this list of conditions and the following disclaimer.
> > + * * Redistributions in binary form must reproduce the above copyright
> > + * notice, this list of conditions and the following disclaimer in the
> > + * documentation and/or other materials provided with the distribution.
> > + * * Neither the name of the above-listed copyright holders nor the
> > + * names of any contributors may be used to endorse or promote products
> > + * derived from this software without specific prior written permission.
> > + *
> > + *
> > + * ALTERNATIVELY, this software may be distributed under the terms of the
> > + * GNU General Public License ("GPL") as published by the Free Software
> > + * Foundation, either version 2 of that License or (at your option) any
> > + * later version.
> > + *
> > + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS "AS IS"
> > + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> LIMITED TO, THE
> > + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
> PARTICULAR PURPOSE
> > + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
> CONTRIBUTORS BE
> > + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
> > + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
> PROCUREMENT OF
> > + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
> BUSINESS
> > + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
> WHETHER IN
> > + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
> OTHERWISE)
> > + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
> ADVISED OF THE
> > + * POSSIBILITY OF SUCH DAMAGE.
> > + */
> > +
> > +#ifndef __ETHSW_H
> > +#define __ETHSW_H
> > +
> > +#include <linux/netdevice.h>
> > +#include <linux/etherdevice.h>
> > +#include <linux/rtnetlink.h>
> > +#include <linux/if_vlan.h>
> > +#include <uapi/linux/if_bridge.h>
> > +#include <net/switchdev.h>
> > +#include <linux/if_bridge.h>
> > +
> > +#include "dpsw.h"
> > +
> > +/* Number of IRQs supported */
> > +#define DPSW_IRQ_NUM 2
> > +
> > +#define ETHSW_VLAN_MEMBER 1
> > +#define ETHSW_VLAN_UNTAGGED 2
> > +#define ETHSW_VLAN_PVID 4
> > +#define ETHSW_VLAN_GLOBAL 8
> > +
> > +/* Maximum Frame Length supported by HW (currently 10k) */
> > +#define DPAA2_MFL (10 * 1024)
> > +#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN -
> ETH_FCS_LEN)
> > +#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN +
> ETH_FCS_LEN)
> > +
> > +struct ethsw_core;
> > +
> > +/* Per port private data */
> > +struct ethsw_port_priv {
> > + struct net_device *netdev;
> > + u16 idx;
> > + struct ethsw_core *ethsw_data;
> > + u8 link_state;
> > + u8 stp_state;
> > + bool flood;
> > +
> > + u8 vlans[VLAN_VID_MASK + 1];
> > + u16 pvid;
> > +};
> > +
> > +/* Switch data */
> > +struct ethsw_core {
> > + struct device *dev;
> > + struct fsl_mc_io *mc_io;
> > + u16 dpsw_handle;
> > + struct dpsw_attr sw_attr;
> > + int dev_id;
> > + struct ethsw_port_priv **ports;
> > +
> > + u8 vlans[VLAN_VID_MASK + 1];
> > + bool learning;
> > +};
> > +
> > +#endif /* __ETHSW_H */
> > --
> > 1.9.1
On September 29, 2017 6:59:18 AM PDT, Razvan Stefanescu <[email protected]> wrote:
>
>
>> -----Original Message-----
>> From: Bogdan Purcareata
>> Sent: Friday, September 29, 2017 16:36
>> To: Razvan Stefanescu <[email protected]>;
>> [email protected]
>> Cc: [email protected]; [email protected];
>> [email protected]; [email protected]; [email protected]; Alexandru
>Marginean
>> <[email protected]>; Ruxandra Ioana Radulescu
>> <[email protected]>; Laurentiu Tudor
><[email protected]>;
>> [email protected]
>> Subject: RE: [RESEND PATCH 2/6] staging: fsl-dpaa2/ethsw: Add
>Freescale DPAA2
>> Ethernet Switch driver
>>
>> > Introduce the DPAA2 Ethernet Switch driver, which manages Datapath
>Switch
>> > (DPSW) objects discovered on the MC bus.
>> >
>> > Suggested-by: Alexandru Marginean <[email protected]>
>> > Signed-off-by: Razvan Stefanescu <[email protected]>
This looks pretty good for a new switchdev driver, is there a reason you can't target drivers/net/ethernet instead of staging? Is it because the MC bus code is still in staging (AFAICT)?
--
Florian