2023-06-30 13:20:46

by Zeng, Xin

[permalink] [raw]
Subject: [RFC 0/5] crypto: qat - enable SRIOV VF live migration

This set enables live migration for Intel QAT GEN4 SRIOV Virtual
Functions (VFs). It is composed of 5 patches.
The first is a pre-requisite. It adds logic to the QAT PF driver that
allows to save and restore the state of a bank (a virtual function is a
wrapper around a bank) and drain a ring pair. The second patch adds to
the QAT PF driver a set of interfaces to allow to save and restore the
state of a VF that will be called by the modules qat_vfio_pci which will
be introduced in the last patch. The third adds HZ_PER_GHZ which will be
required by the fourth one. The fourth one implements the defined
interfaces. The last one adds a vfio pci extension specific for QAT
which intercepts the vfio device operations for a QAT VF to allow live
migration.

Here are the steps required to test the live migration of a QAT GEN4 VF:
1. Bind one or more QAT GEN4 VF devices to the module qat_vfio_pci.ko
2. Assign the VFs to the virtual machine and enable device live
migration
3. Run a workload using a QAT VF inside the VM, for example using qatlib
(https://github.com/intel/qatlib)
4. Migrate the VM from the source node to a destination node

For P2P states support and AER support, we are going to implement these
in the final version. Any feedback is appreciated!

Andy Shevchenko (1):
units: Add HZ_PER_GHZ

Siming Wan (1):
crypto: qat - add bank save/restore and RP drain

Xin Zeng (3):
crypto: qat - add interface for live migration
crypto: qat - implement interface for live migration
vfio/qat: Add vfio_pci driver for Intel QAT VF devices

.../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 9 +-
.../intel/qat/qat_4xxx/adf_4xxx_hw_data.h | 3 +-
.../intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c | 2 +-
.../qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c | 2 +-
.../intel/qat/qat_c62x/adf_c62x_hw_data.c | 2 +-
.../intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c | 2 +-
drivers/crypto/intel/qat/qat_common/Makefile | 4 +-
.../intel/qat/qat_common/adf_accel_devices.h | 78 ++-
.../intel/qat/qat_common/adf_gen2_hw_data.c | 17 +-
.../intel/qat/qat_common/adf_gen2_hw_data.h | 10 +-
.../intel/qat/qat_common/adf_gen4_hw_data.c | 415 +++++++++++-
.../intel/qat/qat_common/adf_gen4_hw_data.h | 152 ++++-
.../intel/qat/qat_common/adf_gen4_pfvf.c | 7 +-
.../intel/qat/qat_common/adf_gen4_pfvf.h | 7 +
.../intel/qat/qat_common/adf_gen4_vf_mig.c | 609 ++++++++++++++++++
.../intel/qat/qat_common/adf_mstate_mgr.c | 267 ++++++++
.../intel/qat/qat_common/adf_mstate_mgr.h | 99 +++
.../intel/qat/qat_common/adf_transport.c | 11 +-
.../crypto/intel/qat/qat_common/adf_vf_isr.c | 2 +-
.../crypto/intel/qat/qat_common/qat_vf_mig.c | 106 +++
.../qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 2 +-
.../qat_dh895xccvf/adf_dh895xccvf_hw_data.c | 2 +-
drivers/vfio/pci/Kconfig | 2 +
drivers/vfio/pci/Makefile | 1 +
drivers/vfio/pci/qat/Kconfig | 13 +
drivers/vfio/pci/qat/Makefile | 4 +
drivers/vfio/pci/qat/qat_vfio_pci_main.c | 518 +++++++++++++++
include/linux/qat/qat_vf_mig.h | 15 +
include/linux/units.h | 6 +-
29 files changed, 2332 insertions(+), 35 deletions(-)
create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
create mode 100644 drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c
create mode 100644 drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h
create mode 100644 drivers/crypto/intel/qat/qat_common/qat_vf_mig.c
create mode 100644 drivers/vfio/pci/qat/Kconfig
create mode 100644 drivers/vfio/pci/qat/Makefile
create mode 100644 drivers/vfio/pci/qat/qat_vfio_pci_main.c
create mode 100644 include/linux/qat/qat_vf_mig.h

--
2.18.2



2023-06-30 13:21:00

by Zeng, Xin

[permalink] [raw]
Subject: [RFC 2/5] crypto: qat - add interface for live migration

Extend the driver with a new interface to be used for VF live migration.
This will be called by the QAT extension in vfio-pci.

Co-developed-by: Yahui Cao <[email protected]>
Signed-off-by: Yahui Cao <[email protected]>
Signed-off-by: Xin Zeng <[email protected]>
---
drivers/crypto/intel/qat/qat_common/Makefile | 2 +-
.../intel/qat/qat_common/adf_accel_devices.h | 13 +++
.../crypto/intel/qat/qat_common/qat_vf_mig.c | 106 ++++++++++++++++++
include/linux/qat/qat_vf_mig.h | 15 +++
4 files changed, 135 insertions(+), 1 deletion(-)
create mode 100644 drivers/crypto/intel/qat/qat_common/qat_vf_mig.c
create mode 100644 include/linux/qat/qat_vf_mig.h

diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 38de3aba6e8c..3855f2fa5733 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -33,4 +33,4 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \
adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \
- adf_gen2_pfvf.o adf_gen4_pfvf.o
+ adf_gen2_pfvf.o adf_gen4_pfvf.o qat_vf_mig.o
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 7fc2fd042916..adda2cac6af1 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -217,6 +217,17 @@ struct adf_dc_ops {
void (*build_deflate_ctx)(void *ctx);
};

+struct adf_vfmig_ops {
+ int (*init_device)(struct adf_accel_dev *accel_dev, u32 vf_nr);
+ void (*shutdown_device)(struct adf_accel_dev *accel_dev, u32 vf_nr);
+ int (*save_state)(struct adf_accel_dev *accel_dev, u32 vf_nr,
+ u8 *buf, u64 buf_sz);
+ int (*load_state)(struct adf_accel_dev *accel_dev, u32 vf_nr,
+ u8 *buf, u64 buf_sz);
+ int (*suspend_device)(struct adf_accel_dev *accel_dev, u32 vf_nr);
+ int (*resume_device)(struct adf_accel_dev *accel_dev, u32 vf_nr);
+};
+
struct adf_hw_device_data {
struct adf_hw_device_class *dev_class;
u32 (*get_accel_mask)(struct adf_hw_device_data *self);
@@ -263,6 +274,7 @@ struct adf_hw_device_data {
struct adf_hw_csr_info csr_info;
struct adf_pfvf_ops pfvf_ops;
struct adf_dc_ops dc_ops;
+ struct adf_vfmig_ops vfmig_ops;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses;
@@ -309,6 +321,7 @@ struct adf_hw_device_data {
#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.csr_ops)
#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
+#define GET_VFMIG_OPS(accel_dev) (&(accel_dev)->hw_device->vfmig_ops)
#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev

struct adf_admin_comms;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_vf_mig.c b/drivers/crypto/intel/qat/qat_common/qat_vf_mig.c
new file mode 100644
index 000000000000..1fb86952c9ac
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_vf_mig.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/bug.h>
+#include <linux/dev_printk.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/qat/qat_vf_mig.h>
+#include "adf_common_drv.h"
+
+int qat_vfmig_init_device(struct pci_dev *pdev, u32 vf_nr)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(!GET_VFMIG_OPS(accel_dev)->init_device))
+ return -EINVAL;
+
+ return GET_VFMIG_OPS(accel_dev)->init_device(accel_dev, vf_nr);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_init_device);
+
+void qat_vfmig_shutdown_device(struct pci_dev *pdev, u32 vf_nr)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return;
+ }
+
+ if (WARN_ON(!GET_VFMIG_OPS(accel_dev)->shutdown_device))
+ return;
+
+ GET_VFMIG_OPS(accel_dev)->shutdown_device(accel_dev, vf_nr);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_shutdown_device);
+
+int qat_vfmig_suspend_device(struct pci_dev *pdev, u32 vf_nr)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(!GET_VFMIG_OPS(accel_dev)->suspend_device))
+ return -EINVAL;
+
+ return GET_VFMIG_OPS(accel_dev)->suspend_device(accel_dev, vf_nr);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_suspend_device);
+
+int qat_vfmig_resume_device(struct pci_dev *pdev, u32 vf_nr)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(!GET_VFMIG_OPS(accel_dev)->resume_device))
+ return -EINVAL;
+
+ return GET_VFMIG_OPS(accel_dev)->resume_device(accel_dev, vf_nr);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_resume_device);
+
+int qat_vfmig_save_state(struct pci_dev *pdev, u32 vf_nr, u8 *buf, u64 buf_sz)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(!GET_VFMIG_OPS(accel_dev)->save_state))
+ return -EINVAL;
+
+ return GET_VFMIG_OPS(accel_dev)->save_state(accel_dev, vf_nr, buf, buf_sz);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_save_state);
+
+int qat_vfmig_load_state(struct pci_dev *pdev, u32 vf_nr, u8 *buf, u64 buf_sz)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ dev_err(&pdev->dev, "Failed to find accel_dev\n");
+ return -ENODEV;
+ }
+
+ if (WARN_ON(!GET_VFMIG_OPS(accel_dev)->load_state))
+ return -EINVAL;
+
+ return GET_VFMIG_OPS(accel_dev)->load_state(accel_dev, vf_nr, buf, buf_sz);
+}
+EXPORT_SYMBOL_GPL(qat_vfmig_load_state);
diff --git a/include/linux/qat/qat_vf_mig.h b/include/linux/qat/qat_vf_mig.h
new file mode 100644
index 000000000000..09101be800ce
--- /dev/null
+++ b/include/linux/qat/qat_vf_mig.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef QAT_VF_MIG_H_
+#define QAT_VF_MIG_H_
+
+struct pci_dev;
+
+int qat_vfmig_init_device(struct pci_dev *pdev, u32 vf_nr);
+void qat_vfmig_shutdown_device(struct pci_dev *pdev, u32 vf_nr);
+int qat_vfmig_save_state(struct pci_dev *pdev, u32 vf_nr, u8 *buf, u64 buf_sz);
+int qat_vfmig_load_state(struct pci_dev *pdev, u32 vf_nr, u8 *buf, u64 buf_sz);
+int qat_vfmig_suspend_device(struct pci_dev *pdev, u32 vf_nr);
+int qat_vfmig_resume_device(struct pci_dev *pdev, u32 vf_nr);
+#endif /*QAT_VF_MIG_H_*/
--
2.18.2


2023-06-30 13:21:00

by Zeng, Xin

[permalink] [raw]
Subject: [RFC 3/5] units: Add HZ_PER_GHZ

From: Andy Shevchenko <[email protected]>

There is going to be a new user of the HZ_PER_GHZ definition besides
possibly existing ones. Add that one to the header.

While at it, split Hz and kHz groups of the multipliers for better
maintenance and readability.

Signed-off-by: Andy Shevchenko <[email protected]>
Reviewed-by: Giovanni Cabiddu <[email protected]>
Signed-off-by: Xin Zeng <[email protected]>
---
include/linux/units.h | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/include/linux/units.h b/include/linux/units.h
index 2793a41e73a2..8d1dd5609906 100644
--- a/include/linux/units.h
+++ b/include/linux/units.h
@@ -20,12 +20,16 @@
#define PICO 1000000000000ULL
#define FEMTO 1000000000000000ULL

+/* Hz based multipliers */
#define NANOHZ_PER_HZ 1000000000UL
#define MICROHZ_PER_HZ 1000000UL
#define MILLIHZ_PER_HZ 1000UL
#define HZ_PER_KHZ 1000UL
-#define KHZ_PER_MHZ 1000UL
#define HZ_PER_MHZ 1000000UL
+#define HZ_PER_GHZ 1000000000UL
+
+/* kHz based multipliers */
+#define KHZ_PER_MHZ 1000UL

#define MILLIWATT_PER_WATT 1000UL
#define MICROWATT_PER_MILLIWATT 1000UL
--
2.18.2


2023-06-30 13:21:01

by Zeng, Xin

[permalink] [raw]
Subject: [RFC 1/5] crypto: qat - add bank save/restore and RP drain

From: Siming Wan <[email protected]>

Extend CSR ops and add logic to save and restore banks and drain the
ring pairs. This will be used to implement live migration.

This is implemented only for QAT GEN4 devices.

Co-developed-by: Svyatoslav Pankratov <[email protected]>
Signed-off-by: Svyatoslav Pankratov <[email protected]>
Signed-off-by: Siming Wan <[email protected]>
Signed-off-by: Xin Zeng <[email protected]>
---
.../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 5 +-
.../intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c | 2 +-
.../qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c | 2 +-
.../intel/qat/qat_c62x/adf_c62x_hw_data.c | 2 +-
.../intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c | 2 +-
.../intel/qat/qat_common/adf_accel_devices.h | 60 ++-
.../intel/qat/qat_common/adf_gen2_hw_data.c | 17 +-
.../intel/qat/qat_common/adf_gen2_hw_data.h | 10 +-
.../intel/qat/qat_common/adf_gen4_hw_data.c | 362 +++++++++++++++++-
.../intel/qat/qat_common/adf_gen4_hw_data.h | 131 ++++++-
.../intel/qat/qat_common/adf_transport.c | 11 +-
.../crypto/intel/qat/qat_common/adf_vf_isr.c | 2 +-
.../qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 2 +-
.../qat_dh895xccvf/adf_dh895xccvf_hw_data.c | 2 +-
14 files changed, 584 insertions(+), 26 deletions(-)

diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index e543a9e24a06..22fe4e6834c1 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -505,11 +505,14 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
hw_data->disable_iov = adf_disable_sriov;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+ hw_data->ring_pair_drain = adf_gen4_ring_pair_drain;
+ hw_data->bank_state_save = adf_gen4_bank_state_save;
+ hw_data->bank_state_restore = adf_gen4_bank_state_restore;
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;

- adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen4_init_hw_csr_ops(&hw_data->csr_info);
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen4_init_dc_ops(&hw_data->dc_ops);
}
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index 475643654e64..04a833affb29 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -129,7 +129,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->dev_config = adf_gen2_dev_config;

adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
- adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_hw_csr_ops(&hw_data->csr_info);
adf_gen2_init_dc_ops(&hw_data->dc_ops);
}

diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index 84d9486e04de..5cb46b2cd278 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -91,7 +91,7 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
hw_data->dev_config = adf_gen2_dev_config;
adf_devmgr_update_class_index(hw_data);
adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
- adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_hw_csr_ops(&hw_data->csr_info);
adf_gen2_init_dc_ops(&hw_data->dc_ops);
}

diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
index e14270703670..fd6f18c5f8e2 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -131,7 +131,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->dev_config = adf_gen2_dev_config;

adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
- adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_hw_csr_ops(&hw_data->csr_info);
adf_gen2_init_dc_ops(&hw_data->dc_ops);
}

diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index 751d7aa57fc7..ca3e0cea056f 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -91,7 +91,7 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
hw_data->dev_config = adf_gen2_dev_config;
adf_devmgr_update_class_index(hw_data);
adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
- adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_hw_csr_ops(&hw_data->csr_info);
adf_gen2_init_dc_ops(&hw_data->dc_ops);
}

diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 0399417b91fc..7fc2fd042916 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -119,6 +119,41 @@ struct admin_info {
u32 mailbox_offset;
};

+struct ring_config {
+ u32 config;
+ u64 base;
+ u32 head;
+ u32 tail;
+};
+
+struct bank_state {
+ u32 reservd0;
+ u32 reservd1;
+ u32 num_rings;
+ u32 ringstat0;
+ u32 ringstat1;
+ u32 ringuostat;
+ u32 ringestat;
+ u32 ringnestat;
+ u32 ringnfstat;
+ u32 ringfstat;
+ u32 ringcstat0;
+ u32 ringcstat1;
+ u32 ringcstat2;
+ u32 ringcstat3;
+ u32 iaintflagen;
+ u32 iaintflagreg;
+ u32 iaintflagsrcsel0;
+ u32 iaintflagsrcsel1;
+ u32 iaintcolen;
+ u32 iaintcolctl;
+ u32 iaintflagandcolen;
+ u32 ringexpstat;
+ u32 ringexpintenable;
+ u32 ringsrvarben;
+ struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK];
+};
+
struct adf_hw_csr_ops {
u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
@@ -136,15 +171,28 @@ struct adf_hw_csr_ops {
u32 ring, dma_addr_t addr);
void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
u32 value);
- void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
+ u32 (*read_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank, u32 idx);
+ void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank,
+ u32 idx, u32 value);
+ u32 (*read_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
u32 value);
+ u32 (*read_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
u32 value);
void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
u32 bank, u32 value);
+ u32 (*read_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
u32 value);
+ u32 (*get_src_sel_mask)(void);
+ u32 (*get_int_col_ctl_enable_mask)(void);
+};
+
+struct adf_hw_csr_info {
+ struct adf_hw_csr_ops csr_ops;
+ u32 num_rings_per_int_srcsel;
+ u32 arb_enable_mask;
};

struct adf_cfg_device_data;
@@ -200,14 +248,20 @@ struct adf_hw_device_data {
void (*enable_ints)(struct adf_accel_dev *accel_dev);
void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
+ int (*ring_pair_drain)(struct adf_accel_dev *accel_dev, u32 bank_nr,
+ int timeout_us);
+ int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct bank_state *state);
+ int (*bank_state_restore)(struct adf_accel_dev *accel_dev,
+ u32 bank_number, struct bank_state *state);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
u32 (*uof_get_num_objs)(void);
u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
int (*dev_config)(struct adf_accel_dev *accel_dev);
+ struct adf_hw_csr_info csr_info;
struct adf_pfvf_ops pfvf_ops;
- struct adf_hw_csr_ops csr_ops;
struct adf_dc_ops dc_ops;
const char *fw_name;
const char *fw_mmp_name;
@@ -252,7 +306,7 @@ struct adf_hw_device_data {
(((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
& ADF_SRV_TYPE_MASK)
#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
-#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
+#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.csr_ops)
#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
index d1884547b5a1..d956910f3228 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c
@@ -160,9 +160,10 @@ static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value)
WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
}

-static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank,
+ u32 idx, u32 value)
{
- WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
+ WRITE_CSR_INT_SRCSEL(csr_base_addr, bank, idx, value);
}

static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank,
@@ -189,8 +190,17 @@ static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
}

-void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+static u32 get_src_sel_mask(void)
{
+ return ADF_BANK_INT_SRC_SEL_MASK_X;
+}
+
+void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_info *csr_info)
+{
+ struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops;
+
+ csr_info->num_rings_per_int_srcsel = ADF_RINGS_PER_INT_SRCSEL;
+
csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
csr_ops->read_csr_ring_head = read_csr_ring_head;
csr_ops->write_csr_ring_head = write_csr_ring_head;
@@ -205,6 +215,7 @@ void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
+ csr_ops->get_src_sel_mask = get_src_sel_mask;
}
EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops);

diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
index e4bc07529be4..631eb2e2f334 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
@@ -7,8 +7,10 @@
#include "adf_cfg_common.h"

/* Transport access */
+#define ADF_RINGS_PER_INT_SRCSEL BIT(3)
#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_RING_SRV_ARB_EN_MASK GENMASK(7, 0)
#define ADF_RING_CSR_RING_CONFIG 0x000
#define ADF_RING_CSR_RING_LBASE 0x040
#define ADF_RING_CSR_RING_UBASE 0x080
@@ -25,6 +27,7 @@
#define ADF_RING_BUNDLE_SIZE 0x1000
#define ADF_GEN2_RX_RINGS_OFFSET 8
#define ADF_GEN2_TX_RINGS_MASK 0xFF
+#define ADF_RING_CSR_NEXT_INT_SRCSEL BIT(2)

#define BUILD_RING_BASE_ADDR(addr, size) \
(((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
@@ -60,7 +63,10 @@ do { \
#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
ADF_RING_CSR_INT_FLAG, value)
-#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+#define READ_CSR_INT_SRCSEL(csr_base_addr, bank, idx) \
+ ADF_CSR_RD(csr_base_addr, ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_SRCSEL + (idx) * ADF_RING_CSR_NEXT_INT_SRCSEL)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank, idx, value) \
do { \
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \
ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
@@ -155,7 +161,7 @@ u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self);
void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev);
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs);
-void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_info *csr_info);
void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info);
void adf_gen2_get_arb_info(struct arb_info *arb_info);
void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 3148a62938fd..924d51ebd3c3 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -32,32 +32,113 @@ static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring,
WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
}

+static u32 read_csr_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_uo_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_UO_STAT(csr_base_addr, bank);
+}
+
static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank)
{
return READ_CSR_E_STAT(csr_base_addr, bank);
}

+static u32 read_csr_ne_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_NE_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_nf_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_NF_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_f_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_F_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_c_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_C_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_exp_stat(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_EXP_STAT(csr_base_addr, bank);
+}
+
+static u32 read_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_EXP_INT_EN(csr_base_addr, bank);
+}
+
+static void write_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+ WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_CONFIG(csr_base_addr, bank, ring);
+}
+
static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring,
u32 value)
{
WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
}

+static dma_addr_t read_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring)
+{
+ return READ_CSR_RING_BASE(csr_base_addr, bank, ring);
+}
+
static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring,
dma_addr_t addr)
{
WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr);
}

+static u32 read_csr_int_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_EN(csr_base_addr, bank);
+}
+
+static void write_csr_int_en(void __iomem *csr_base_addr, u32 bank, u32 value)
+{
+ WRITE_CSR_INT_EN(csr_base_addr, bank, value);
+}
+
+static u32 read_csr_int_flag(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_FLAG(csr_base_addr, bank);
+}
+
static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank,
u32 value)
{
WRITE_CSR_INT_FLAG(csr_base_addr, bank, value);
}

-static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank)
+static u32 read_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank, u32 idx)
{
- WRITE_CSR_INT_SRCSEL(csr_base_addr, bank);
+ return READ_CSR_INT_SRCSEL(csr_base_addr, bank, idx);
+}
+
+static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank,
+ u32 idx, u32 value)
+{
+ WRITE_CSR_INT_SRCSEL(csr_base_addr, bank, idx, value);
+}
+
+static u32 read_csr_int_col_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_COL_EN(csr_base_addr, bank);
}

static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value)
@@ -65,26 +146,56 @@ static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 valu
WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value);
}

+static u32 read_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_COL_CTL(csr_base_addr, bank);
+}
+
static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank,
u32 value)
{
WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value);
}

+static u32 read_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank);
+}
+
static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank,
u32 value)
{
WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value);
}

+static u32 read_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank)
+{
+ return READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank);
+}
+
static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank,
u32 value)
{
WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
}

-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
+static u32 get_src_sel_mask(void)
+{
+ return ADF_BANK_INT_SRC_SEL_MASK;
+}
+
+static u32 get_int_col_ctl_enable_mask(void)
+{
+ return ADF_RING_CSR_INT_COL_CTL_ENABLE;
+}
+
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_info *csr_info)
{
+ struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops;
+
+ csr_info->num_rings_per_int_srcsel = ADF_RINGS_PER_INT_SRCSEL;
+ csr_info->arb_enable_mask = ADF_RING_SRV_ARB_EN_MASK;
+
csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
csr_ops->read_csr_ring_head = read_csr_ring_head;
csr_ops->write_csr_ring_head = write_csr_ring_head;
@@ -94,11 +205,17 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
csr_ops->write_csr_ring_config = write_csr_ring_config;
csr_ops->write_csr_ring_base = write_csr_ring_base;
csr_ops->write_csr_int_flag = write_csr_int_flag;
+ csr_ops->read_csr_int_srcsel = read_csr_int_srcsel;
csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
+ csr_ops->read_csr_int_col_en = read_csr_int_col_en;
csr_ops->write_csr_int_col_en = write_csr_int_col_en;
+ csr_ops->read_csr_int_col_ctl = read_csr_int_col_ctl;
csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
+ csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en;
csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
+ csr_ops->get_src_sel_mask = get_src_sel_mask;
+ csr_ops->get_int_col_ctl_enable_mask = get_int_col_ctl_enable_mask;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);

@@ -192,3 +309,242 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
return ret;
}
EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
+
+static int drain_ring_pair(void __iomem *csr, u32 bank_number, int timeout_us)
+{
+ u32 status;
+ int ret;
+
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
+ ADF_WQM_CSR_RPRESETCTL_DRAIN);
+
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_WQM_CSR_RPRESETSTS_STATUS,
+ ADF_RPRESET_POLL_DELAY_US, timeout_us, true, csr,
+ ADF_WQM_CSR_RPRESETSTS(bank_number));
+ if (ret)
+ return ret;
+
+ ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
+ ADF_WQM_CSR_RPRESETSTS_STATUS);
+
+ return 0;
+}
+
+int adf_gen4_ring_pair_drain(struct adf_accel_dev *accel_dev, u32 bank_number,
+ int timeout_us)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_bar *etr_bar;
+ void __iomem *csr;
+ int ret;
+
+ if (bank_number >= hw_data->num_banks || timeout_us < 0)
+ return -EINVAL;
+
+ etr_bar = &GET_BARS(accel_dev)[hw_data->get_etr_bar_id(hw_data)];
+ csr = etr_bar->virt_addr;
+
+ dev_dbg(&GET_DEV(accel_dev), "ring pair drain for bank:%d\n", bank_number);
+
+ ret = drain_ring_pair(csr, bank_number, timeout_us);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "ring pair drain failure (timeout)\n");
+ else
+ dev_dbg(&GET_DEV(accel_dev), "ring pair drained successfully\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_drain);
+
+static void gen4_bank_state_save(void __iomem *csr, u32 bank_number,
+ struct bank_state *state, u32 num_rings)
+{
+ u32 i;
+
+ state->ringstat0 = read_csr_stat(csr, bank_number);
+ state->ringuostat = read_csr_uo_stat(csr, bank_number);
+ state->ringestat = read_csr_e_stat(csr, bank_number);
+ state->ringnestat = read_csr_ne_stat(csr, bank_number);
+ state->ringnfstat = read_csr_nf_stat(csr, bank_number);
+ state->ringfstat = read_csr_f_stat(csr, bank_number);
+ state->ringcstat0 = read_csr_c_stat(csr, bank_number);
+ state->iaintflagen = read_csr_int_en(csr, bank_number);
+ state->iaintflagreg = read_csr_int_flag(csr, bank_number);
+ state->iaintflagsrcsel0 = read_csr_int_srcsel(csr, bank_number, 0);
+ state->iaintcolen = read_csr_int_col_en(csr, bank_number);
+ state->iaintcolctl = read_csr_int_col_ctl(csr, bank_number);
+ state->iaintflagandcolen = read_csr_int_flag_and_col(csr, bank_number);
+ state->ringexpstat = read_csr_exp_stat(csr, bank_number);
+ state->ringexpintenable = read_csr_exp_int_en(csr, bank_number);
+ state->ringsrvarben = read_csr_ring_srv_arb_en(csr, bank_number);
+ state->num_rings = num_rings;
+
+ for (i = 0; i < num_rings; i++) {
+ state->rings[i].head = read_csr_ring_head(csr, bank_number, i);
+ state->rings[i].tail = read_csr_ring_tail(csr, bank_number, i);
+ state->rings[i].config = read_csr_ring_config(csr, bank_number, i);
+ state->rings[i].base = read_csr_ring_base(csr, bank_number, i);
+ }
+}
+
+#define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2)
+#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0)
+static int gen4_bank_state_restore(void __iomem *csr, u32 bank_number,
+ struct bank_state *state, u32 num_rings,
+ int tx_rx_gap)
+{
+ u32 val, tmp_val, i;
+
+ write_csr_ring_srv_arb_en(csr, bank_number, 0);
+
+ for (i = 0; i < num_rings; i++)
+ write_csr_ring_base(csr, bank_number, i, state->rings[i].base);
+
+ for (i = 0; i < num_rings; i++)
+ write_csr_ring_config(csr, bank_number, i, state->rings[i].config);
+
+ for (i = 0; i < num_rings / 2; i++) {
+ int tx = i * (tx_rx_gap + 1);
+ int rx = tx + tx_rx_gap;
+ u32 tx_idx = tx / ADF_RINGS_PER_INT_SRCSEL;
+ u32 rx_idx = rx / ADF_RINGS_PER_INT_SRCSEL;
+
+ write_csr_ring_head(csr, bank_number, tx, state->rings[tx].head);
+
+ write_csr_ring_tail(csr, bank_number, tx, state->rings[tx].tail);
+
+ if (state->ringestat & (BIT(tx))) {
+ val = read_csr_int_srcsel(csr, bank_number, tx_idx);
+ val |= (ADF_RP_INT_SRC_SEL_F_RISE_MASK << (8 * tx));
+ write_csr_int_srcsel(csr, bank_number, tx_idx, val);
+ write_csr_ring_head(csr, bank_number, tx, state->rings[tx].head);
+ }
+
+ write_csr_ring_tail(csr, bank_number, rx, state->rings[rx].tail);
+
+ val = read_csr_int_srcsel(csr, bank_number, rx_idx);
+ val |= (ADF_RP_INT_SRC_SEL_F_RISE_MASK << (8 * rx));
+ write_csr_int_srcsel(csr, bank_number, rx_idx, val);
+
+ write_csr_ring_head(csr, bank_number, rx, state->rings[rx].head);
+
+ val = read_csr_int_srcsel(csr, bank_number, rx_idx);
+ val |= (ADF_RP_INT_SRC_SEL_F_FALL_MASK << (8 * rx));
+ write_csr_int_srcsel(csr, bank_number, rx_idx, val);
+
+ if (state->ringfstat & BIT(rx))
+ write_csr_ring_tail(csr, bank_number, rx, state->rings[rx].tail);
+ }
+
+ write_csr_int_flag_and_col(csr, bank_number, state->iaintflagandcolen);
+ write_csr_int_en(csr, bank_number, state->iaintflagen);
+ write_csr_int_col_en(csr, bank_number, state->iaintcolen);
+ write_csr_int_srcsel(csr, bank_number, 0, state->iaintflagsrcsel0);
+ write_csr_exp_int_en(csr, bank_number, state->ringexpintenable);
+ write_csr_int_col_ctl(csr, bank_number, state->iaintcolctl);
+
+ /* Check that all ring statuses are restored into a saved state. */
+ tmp_val = read_csr_stat(csr, bank_number);
+ val = state->ringstat0;
+ if (tmp_val != val) {
+ pr_err("Fail to restore ringstat register. Expected 0x%x, but actual is 0x%x\n",
+ tmp_val, val);
+ return -EINVAL;
+ }
+
+ tmp_val = read_csr_e_stat(csr, bank_number);
+ val = state->ringestat;
+ if (tmp_val != val) {
+ pr_err("Fail to restore ringestat register. Expected 0x%x, but actual is 0x%x\n",
+ tmp_val, val);
+ return -EINVAL;
+ }
+
+ tmp_val = read_csr_ne_stat(csr, bank_number);
+ val = state->ringnestat;
+ if (tmp_val != val) {
+ pr_err("Fail to restore ringnestat register. Expected 0x%x, but actual is 0x%x\n",
+ tmp_val, val);
+ return -EINVAL;
+ }
+
+ tmp_val = read_csr_nf_stat(csr, bank_number);
+ val = state->ringnfstat;
+ if (tmp_val != val) {
+ pr_err("Fail to restore ringnfstat register. Expected 0x%x, but actual is 0x%x\n",
+ tmp_val, val);
+ return -EINVAL;
+ }
+
+ tmp_val = read_csr_f_stat(csr, bank_number);
+ val = state->ringfstat;
+ if (tmp_val != val) {
+ pr_err("Fail to restore ringfstat register. Expected 0x%x, but actual is 0x%x\n",
+ tmp_val, val);
+ return -EINVAL;
+ }
+
+ tmp_val = read_csr_c_stat(csr, bank_number);
+ val = state->ringcstat0;
+ if (tmp_val != val) {
+ pr_err("Fail to restore ringcstat register. Expected 0x%x, but actual is 0x%x\n",
+ tmp_val, val);
+ return -EINVAL;
+ }
+
+ tmp_val = read_csr_exp_stat(csr, bank_number);
+ val = state->ringexpstat;
+ if (tmp_val && !val) {
+ pr_err("Bank was restored with exception: 0x%x\n", val);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *etr_bar;
+ void __iomem *csr;
+
+ if (bank_number >= hw_data->num_banks || !state)
+ return -EINVAL;
+
+ etr_bar = &GET_BARS(accel_dev)[hw_data->get_etr_bar_id(hw_data)];
+ csr = etr_bar->virt_addr;
+
+ dev_dbg(&GET_DEV(accel_dev), "Saving state of bank: %d\n", bank_number);
+
+ gen4_bank_state_save(csr, bank_number, state, hw_data->num_rings_per_bank);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_bank_state_save);
+
+int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev,
+ u32 bank_number, struct bank_state *state)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *etr_bar;
+ void __iomem *csr;
+ int ret;
+
+ if (bank_number >= hw_data->num_banks)
+ return -EINVAL;
+
+ etr_bar = &GET_BARS(accel_dev)[hw_data->get_etr_bar_id(hw_data)];
+ csr = etr_bar->virt_addr;
+
+ dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank: %d\n", bank_number);
+
+ ret = gen4_bank_state_restore(csr, bank_number, state,
+ hw_data->num_rings_per_bank, hw_data->tx_rx_gap);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Unable to restore state of bank %d\n", bank_number);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 4fb4b3df5a18..d2a4192aaa6d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -3,23 +3,36 @@
#ifndef ADF_GEN4_HW_CSR_DATA_H_
#define ADF_GEN4_HW_CSR_DATA_H_

+#include <linux/bitfield.h>
#include "adf_accel_devices.h"
#include "adf_cfg_common.h"

/* Transport access */
+#define ADF_RINGS_PER_INT_SRCSEL BIT(1)
#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL
+#define ADF_RING_SRV_ARB_EN_MASK BIT(0)
#define ADF_RING_CSR_RING_CONFIG 0x1000
#define ADF_RING_CSR_RING_LBASE 0x1040
#define ADF_RING_CSR_RING_UBASE 0x1080
#define ADF_RING_CSR_RING_HEAD 0x0C0
#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_STAT 0x140
+#define ADF_RING_CSR_UO_STAT 0x148
#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_NE_STAT 0x150
+#define ADF_RING_CSR_NF_STAT 0x154
+#define ADF_RING_CSR_F_STAT 0x158
+#define ADF_RING_CSR_C_STAT 0x15C
+#define ADF_RING_CSR_INT_FLAG_EN 0x16C
#define ADF_RING_CSR_INT_FLAG 0x170
#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_COL_EN 0x17C
#define ADF_RING_CSR_INT_COL_CTL 0x180
#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_EXP_STAT 0x188
+#define ADF_RING_CSR_EXP_INT_EN 0x18C
#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
-#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_NEXT_INT_SRCSEL BIT(2)
#define ADF_RING_CSR_ADDR_OFFSET 0x100000
#define ADF_RING_BUNDLE_SIZE 0x2000

@@ -33,9 +46,49 @@
ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_RING_TAIL + ((ring) << 2))
+#define READ_CSR_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_STAT)
+#define READ_CSR_UO_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_UO_STAT)
#define READ_CSR_E_STAT(csr_base_addr, bank) \
ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT)
+#define READ_CSR_NE_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_NE_STAT)
+#define READ_CSR_NF_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_NF_STAT)
+#define READ_CSR_F_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_F_STAT)
+#define READ_CSR_C_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_C_STAT)
+#define READ_CSR_EXP_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_EXP_STAT)
+#define READ_CSR_EXP_INT_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_EXP_INT_EN)
+#define WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_EXP_INT_EN, value)
+#define READ_CSR_RING_CONFIG(csr_base_addr, bank, ring) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_CONFIG + ((ring) << 2))
#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
ADF_RING_BUNDLE_SIZE * (bank) + \
@@ -57,6 +110,32 @@ do { \
ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \
} while (0)

+static inline u64 read_base(void __iomem *csr_base_addr,
+ u32 bank,
+ u32 ring)
+{
+ u32 l_base, u_base;
+ u64 addr;
+
+ /*
+ * Use special IO wrapper for ring base as LBASE and UBASE are
+ * not physical contigious
+ */
+ l_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) +
+ ADF_RING_CSR_RING_LBASE + (ring << 2));
+ u_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) +
+ ADF_RING_CSR_RING_UBASE + (ring << 2));
+
+ addr = FIELD_GET(GENMASK_ULL(31, 0), (u64)l_base);
+ addr |= FIELD_GET(GENMASK_ULL(31, 0), (u64)u_base) << 32;
+
+ return addr;
+}
+
+#define READ_CSR_RING_BASE(csr_base_addr, bank, ring) \
+ read_base(((csr_base_addr) + \
+ ADF_RING_CSR_ADDR_OFFSET), (bank), (ring))
+
#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
ADF_RING_BUNDLE_SIZE * (bank) + \
@@ -65,23 +144,52 @@ do { \
ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_RING_TAIL + ((ring) << 2), value)
+#define READ_CSR_INT_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG_EN)
+#define WRITE_CSR_INT_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG_EN, (value))
+#define READ_CSR_INT_FLAG(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG)
#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \
ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_INT_FLAG, (value))
-#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+#define READ_CSR_INT_SRCSEL(csr_base_addr, bank, idx) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_SRCSEL + (idx) * ADF_RING_CSR_NEXT_INT_SRCSEL)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank, idx, value) \
ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
- ADF_RING_BUNDLE_SIZE * (bank) + \
- ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK)
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_SRCSEL + ((idx) * ADF_RING_CSR_NEXT_INT_SRCSEL), \
+ (value))
+#define READ_CSR_INT_COL_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_COL_EN)
#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_INT_COL_EN, (value))
+#define READ_CSR_INT_COL_CTL(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_COL_CTL)
#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
ADF_RING_BUNDLE_SIZE * (bank) + \
ADF_RING_CSR_INT_COL_CTL, \
ADF_RING_CSR_INT_COL_CTL_ENABLE | (value))
+#define READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL)
#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
ADF_RING_BUNDLE_SIZE * (bank) + \
@@ -90,6 +198,10 @@ do { \
/* Arbiter configuration */
#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C

+#define READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank) \
+ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
+ ADF_RING_BUNDLE_SIZE * (bank) + \
+ ADF_RING_CSR_RING_SRV_ARB_EN)
#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \
ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \
ADF_RING_BUNDLE_SIZE * (bank) + \
@@ -122,6 +234,9 @@ do { \
#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0)
#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)

+/* Ring drain */
+#define ADF_WQM_CSR_RPRESETCTL_DRAIN BIT(2)
+
/* Error source registers */
#define ADF_GEN4_ERRSOU0 (0x41A200)
#define ADF_GEN4_ERRSOU1 (0x41A204)
@@ -137,6 +252,12 @@ do { \
#define ADF_GEN4_VFLNOTIFY BIT(7)

void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
-void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
+void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_info *csr_info);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
+int adf_gen4_ring_pair_drain(struct adf_accel_dev *accel_dev, u32 bank_number,
+ int timeout_us);
+int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
+ struct bank_state *state);
+int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev,
+ u32 bank_number, struct bank_state *state);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport.c b/drivers/crypto/intel/qat/qat_common/adf_transport.c
index 630d0483c4e0..ce71ac17e617 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_transport.c
@@ -387,10 +387,12 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u8 num_rings_per_bank = hw_data->num_rings_per_bank;
- struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
+ struct adf_hw_csr_info *csr_info = &hw_data->csr_info;
+ struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops;
u32 irq_mask = BIT(num_rings_per_bank) - 1;
struct adf_etr_ring_data *ring;
struct adf_etr_ring_data *tx_ring;
+ u32 num_rings_per_int_srcsel;
u32 i, coalesc_enabled = 0;
unsigned long ring_mask;
int size;
@@ -447,7 +449,12 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
}

csr_ops->write_csr_int_flag(csr_addr, bank_num, irq_mask);
- csr_ops->write_csr_int_srcsel(csr_addr, bank_num);
+
+ num_rings_per_int_srcsel = csr_info->num_rings_per_int_srcsel;
+
+ for (i = 0; i < num_rings_per_bank / num_rings_per_int_srcsel; i++)
+ csr_ops->write_csr_int_srcsel(csr_addr, bank_num, i,
+ csr_ops->get_src_sel_mask());

return 0;
err:
diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
index b05c3957a160..eab42026df8e 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
@@ -131,7 +131,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
{
struct adf_accel_dev *accel_dev = privdata;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
void __iomem *pmisc_bar_addr = pmisc->virt_addr;
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 1ebe0b351fae..2a01985ca7f5 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -242,7 +242,7 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
hw_data->pfvf_ops.disable_all_vf2pf_interrupts = disable_all_vf2pf_interrupts;
hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = disable_pending_vf2pf_interrupts;
- adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_hw_csr_ops(&hw_data->csr_info);
adf_gen2_init_dc_ops(&hw_data->dc_ops);
}

diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index 70e56cc16ece..ac668c038fc6 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -91,7 +91,7 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
hw_data->dev_config = adf_gen2_dev_config;
adf_devmgr_update_class_index(hw_data);
adf_gen2_init_vf_pfvf_ops(&hw_data->pfvf_ops);
- adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen2_init_hw_csr_ops(&hw_data->csr_info);
adf_gen2_init_dc_ops(&hw_data->dc_ops);
}

--
2.18.2


2023-06-30 13:40:52

by Zeng, Xin

[permalink] [raw]
Subject: [RFC 4/5] crypto: qat - implement interface for live migration

Add logic to implement interface for live migration for QAT GEN4 Virtual
Functions (VFs).
This introduces a migration data manager which is used to hold the
device state during migration.

The VF state is organized in a section hierarchy, as reported below:
preamble | general state section | leaf state
| MISC bar state section| leaf state
| ETR bar state section | bank0 state section | leaf state
| bank1 state section | leaf state
| bank2 state section | leaf state
| bank3 state section | leaf state

Co-developed-by: Siming Wan <[email protected]>
Signed-off-by: Siming Wan <[email protected]>
Signed-off-by: Xin Zeng <[email protected]>
---
.../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 4 +-
.../intel/qat/qat_4xxx/adf_4xxx_hw_data.h | 3 +-
drivers/crypto/intel/qat/qat_common/Makefile | 2 +
.../intel/qat/qat_common/adf_accel_devices.h | 5 +
.../intel/qat/qat_common/adf_gen4_hw_data.c | 53 ++
.../intel/qat/qat_common/adf_gen4_hw_data.h | 21 +
.../intel/qat/qat_common/adf_gen4_pfvf.c | 7 +-
.../intel/qat/qat_common/adf_gen4_pfvf.h | 7 +
.../intel/qat/qat_common/adf_gen4_vf_mig.c | 609 ++++++++++++++++++
.../intel/qat/qat_common/adf_mstate_mgr.c | 267 ++++++++
.../intel/qat/qat_common/adf_mstate_mgr.h | 99 +++
11 files changed, 1070 insertions(+), 7 deletions(-)
create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
create mode 100644 drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c
create mode 100644 drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h

diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 22fe4e6834c1..e859350bdfb0 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -455,7 +455,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->dev_class = &adf_4xxx_class;
hw_data->instance_id = adf_4xxx_class.instances++;
hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
- hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF;
+ hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF;
hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
@@ -487,6 +487,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->init_device = adf_init_device;
hw_data->reset_device = adf_reset_flr;
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
+ hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
switch (dev_id) {
case ADF_402XX_PCI_DEVICE_ID:
hw_data->fw_name = ADF_402XX_FW;
@@ -515,6 +516,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
adf_gen4_init_hw_csr_ops(&hw_data->csr_info);
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen4_init_dc_ops(&hw_data->dc_ops);
+ adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops);
}

void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
index e5b314d2b60e..1f96c7f8ca6f 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -3,6 +3,7 @@
#ifndef ADF_4XXX_HW_DATA_H_
#define ADF_4XXX_HW_DATA_H_

+#include <linux/units.h>
#include <adf_accel_devices.h>

/* PCIe configuration space */
@@ -26,6 +27,7 @@
#define ADF_4XXX_ACCELERATORS_MASK (0x1)
#define ADF_4XXX_ACCELENGINES_MASK (0x1FF)
#define ADF_4XXX_ADMIN_AE_MASK (0x100)
+#define ADF_4XXX_AE_FREQ (1 * HZ_PER_GHZ)

#define ADF_4XXX_ETR_MAX_BANKS 64

@@ -37,7 +39,6 @@

/* Bank and ring configuration */
#define ADF_4XXX_NUM_RINGS_PER_BANK 2
-#define ADF_4XXX_NUM_BANKS_PER_VF 4

/* Arbiter configuration */
#define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 3855f2fa5733..e0de2d0901f9 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -14,9 +14,11 @@ intel_qat-objs := adf_cfg.o \
adf_gen2_hw_data.o \
adf_gen2_config.o \
adf_gen4_hw_data.o \
+ adf_gen4_vf_mig.o \
adf_gen4_pm.o \
adf_gen2_dc.o \
adf_gen4_dc.o \
+ adf_mstate_mgr.o \
qat_crypto.o \
qat_compression.o \
qat_comp_algs.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index adda2cac6af1..b21a38e776a7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -339,6 +339,11 @@ struct adf_accel_vf_info {
u32 vf_nr;
bool init;
u8 vf_compat_ver;
+ /*
+ * Private area used for device migration.
+ * Memory allocation and free is managed by migration driver.
+ */
+ void *mig_priv;
};

struct adf_dc_data {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 924d51ebd3c3..8b4d17d8b178 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
#include <linux/iopoll.h>
+#include <asm/div64.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_gen4_hw_data.h"
@@ -310,6 +311,58 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
}
EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);

+int adf_gen4_rp_quiesce_coal_timer(struct adf_accel_dev *accel_dev,
+ u32 bank_idx, int timeout_ms)
+{
+ u32 int_col_ctl, int_col_mask, int_col_en;
+ struct adf_hw_device_data *hw_data;
+ struct adf_bar *etr_bar, *misc_bar;
+ void __iomem *csr_etr, *csr_misc;
+ struct adf_hw_csr_ops *csr_ops;
+ u32 e_stat, intsrc;
+ u64 wait_us;
+ int ret;
+
+ if (timeout_ms < 0)
+ return -EINVAL;
+
+ hw_data = GET_HW_DATA(accel_dev);
+ csr_ops = GET_CSR_OPS(accel_dev);
+ etr_bar = &GET_BARS(accel_dev)[hw_data->get_etr_bar_id(hw_data)];
+ misc_bar = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ csr_etr = etr_bar->virt_addr;
+ csr_misc = misc_bar->virt_addr;
+
+ int_col_ctl = csr_ops->read_csr_int_col_ctl(csr_etr, bank_idx);
+ int_col_mask = csr_ops->get_int_col_ctl_enable_mask();
+ if (!(int_col_ctl & int_col_mask))
+ return 0;
+
+ int_col_en = csr_ops->read_csr_int_col_en(csr_etr, bank_idx);
+ int_col_en &= BIT(ADF_WQM_CSR_RP_IDX_RX);
+ e_stat = csr_ops->read_csr_e_stat(csr_etr, bank_idx);
+ if (!(~e_stat & int_col_en))
+ return 0;
+
+ wait_us = 2 * ((int_col_ctl & ~int_col_mask) << 8) * USEC_PER_SEC;
+ do_div(wait_us, hw_data->clock_frequency);
+ wait_us = min(wait_us, (u64)timeout_ms * USEC_PER_MSEC);
+ dev_dbg(&GET_DEV(accel_dev),
+ "wait for bank %d coalesced timer expiration in %llu us. (max=%u ms estat=0x%x intcolen=0x%x)\n",
+ bank_idx, wait_us, timeout_ms, e_stat, int_col_en);
+
+ ret = read_poll_timeout(ADF_CSR_RD, intsrc, intsrc,
+ ADF_COALESCED_POLL_DELAY_US, wait_us, true,
+ csr_misc, ADF_WQM_CSR_RPINTSTS(bank_idx));
+ if (ret)
+ dev_warn(&GET_DEV(accel_dev),
+ "waits(%llu us) for bank %d coalesced timer ran out\n",
+ wait_us, bank_idx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_rp_quiesce_coal_timer);
+
static int drain_ring_pair(void __iomem *csr, u32 bank_number, int timeout_us)
{
u32 status;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index d2a4192aaa6d..29774841af39 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -7,6 +7,9 @@
#include "adf_accel_devices.h"
#include "adf_cfg_common.h"

+/* Bank configuration */
+#define ADF_GEN4_NUM_BANKS_PER_VF 4
+
/* Transport access */
#define ADF_RINGS_PER_INT_SRCSEL BIT(1)
#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL
@@ -237,6 +240,13 @@ static inline u64 read_base(void __iomem *csr_base_addr,
/* Ring drain */
#define ADF_WQM_CSR_RPRESETCTL_DRAIN BIT(2)

+/* Ring interrupt*/
+#define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC)
+#define ADF_COALESCED_POLL_DELAY_US 1000
+#define ADF_WQM_CSR_RPINTSTS(bank) (0x200000 + ((bank) << 12))
+
+#define ADF_WQM_CSR_RP_IDX_RX 1
+
/* Error source registers */
#define ADF_GEN4_ERRSOU0 (0x41A200)
#define ADF_GEN4_ERRSOU1 (0x41A204)
@@ -251,13 +261,24 @@ static inline u64 read_base(void __iomem *csr_base_addr,

#define ADF_GEN4_VFLNOTIFY BIT(7)

+/* Number of heartbeat counter pairs */
+#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+
+struct adf_gen4_vfmig {
+ u32 ringsrvarben[ADF_GEN4_NUM_BANKS_PER_VF];
+ void *mstate_mgr;
+};
+
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_info *csr_info);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
int adf_gen4_ring_pair_drain(struct adf_accel_dev *accel_dev, u32 bank_number,
int timeout_us);
+int adf_gen4_rp_quiesce_coal_timer(struct adf_accel_dev *accel_dev,
+ u32 bank_idx, int timeout_ms);
int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
struct bank_state *state);
int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev,
u32 bank_number, struct bank_state *state);
+void adf_gen4_init_vf_mig_ops(struct adf_vfmig_ops *vfmig_ops);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
index 8e8efe93f3ee..fe202ab3bc9d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c
@@ -9,9 +9,6 @@
#include "adf_pfvf_pf_proto.h"
#include "adf_pfvf_utils.h"

-#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20))
-#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20))
-
/* VF2PF interrupt source registers */
#define ADF_4XXX_VM2PF_SOU 0x41A180
#define ADF_4XXX_VM2PF_MSK 0x41A1C0
@@ -29,12 +26,12 @@ static const struct pfvf_csr_format csr_gen4_fmt = {

static u32 adf_gen4_pf_get_pf2vf_offset(u32 i)
{
- return ADF_4XXX_PF2VM_OFFSET(i);
+ return ADF_GEN4_PF2VM_OFFSET(i);
}

static u32 adf_gen4_pf_get_vf2pf_offset(u32 i)
{
- return ADF_4XXX_VM2PF_OFFSET(i);
+ return ADF_GEN4_VM2PF_OFFSET(i);
}

static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
index 17d1b774d4a8..38edf02dbf8d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.h
@@ -5,6 +5,13 @@

#include "adf_accel_devices.h"

+#define ADF_GEN4_PF2VM_OFFSET(i) (0x40B010 + (i) * 0x20)
+#define ADF_GEN4_VM2PF_OFFSET(i) (0x40B014 + (i) * 0x20)
+#define ADF_GEN4_VINTMSKPF2VM_OFFSET(i) (0x40B00C + (i) * 0x20)
+#define ADF_GEN4_VINTSOUPF2VM_OFFSET(i) (0x40B008 + (i) * 0x20)
+#define ADF_GEN4_VINTMSK_OFFSET(i) (0x40B004 + (i) * 0x20)
+#define ADF_GEN4_VINTSOU_OFFSET(i) (0x40B000 + (i) * 0x20)
+
#ifdef CONFIG_PCI_IOV
void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
#else
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
new file mode 100644
index 000000000000..48ac192d3b53
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c
@@ -0,0 +1,609 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/dev_printk.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/errno.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_hw_data.h"
+#include "adf_gen4_pfvf.h"
+#include "adf_mstate_mgr.h"
+
+static int adf_gen4_vfmig_init_device(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_gen4_vfmig *vfmig;
+
+ vfmig = kzalloc(sizeof(*vfmig), GFP_KERNEL);
+ if (!vfmig)
+ return -ENOMEM;
+
+ vfmig->mstate_mgr = adf_mstate_mgr_new(NULL, 0);
+ if (!vfmig->mstate_mgr)
+ return -ENOMEM;
+
+ vf_info->mig_priv = vfmig;
+
+ return 0;
+}
+
+static void adf_gen4_vfmig_shutdown_device(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_gen4_vfmig *vfmig;
+
+ if (vf_info->mig_priv) {
+ vfmig = vf_info->mig_priv;
+ adf_mstate_mgr_destroy(vfmig->mstate_mgr);
+ kfree(vfmig);
+ vf_info->mig_priv = NULL;
+ }
+}
+
+static int adf_gen4_vfmig_suspend_device(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ int ret, i;
+
+ /* Drain all inflight jobs */
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ struct adf_hw_csr_ops *csr_ops = &hw_data->csr_info.csr_ops;
+ u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data);
+ void __iomem *csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr;
+ u32 pf_bank_number = i + vf_nr * hw_data->num_banks_per_vf;
+ u32 arben;
+
+ arben = csr_ops->read_csr_ring_srv_arb_en(csr, pf_bank_number) &
+ hw_data->csr_info.arb_enable_mask;
+ if (arben)
+ csr_ops->write_csr_ring_srv_arb_en(csr, pf_bank_number, 0);
+
+ vfmig->ringsrvarben[i] = arben;
+
+ ret = hw_data->ring_pair_drain(accel_dev, pf_bank_number,
+ ADF_RPRESET_POLL_TIMEOUT_US);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Ring pair drain for VF%d failure\n",
+ vf_nr);
+ return ret;
+ }
+
+ adf_gen4_rp_quiesce_coal_timer(accel_dev, pf_bank_number,
+ ADF_COALESCED_POLL_TIMEOUT_US);
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_resume_device(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ int i;
+
+ /* Restore ringsrvarben to resume device */
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data);
+ void __iomem *csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr;
+ struct adf_hw_csr_ops *csr_ops = &hw_data->csr_info.csr_ops;
+ u32 pf_bank_number = i + vf_nr * hw_data->num_banks_per_vf;
+
+ csr_ops->write_csr_ring_srv_arb_en(csr, pf_bank_number,
+ vfmig->ringsrvarben[i]);
+ }
+
+ return 0;
+}
+
+struct adf_vf_bank_info {
+ struct adf_accel_dev *accel_dev;
+ u32 vf_nr;
+ u32 bank_nr;
+};
+
+static inline int adf_mstate_cap_check_size(u32 src_size, u32 dst_size, u32 max_size)
+{
+ if (src_size > max_size || dst_size > max_size)
+ return -EINVAL;
+
+ if (src_size != dst_size) {
+ /*
+ * If the length of target capability mask is greater than the
+ * source one, it impliclitly means the target capability mask
+ * is possible to represent all the capabilities the source
+ * capability mask represents, we will allow this but further check
+ * is needed.
+ */
+ pr_warn("Mismatched state size: %u vs. %u\n", src_size, dst_size);
+ if (src_size > dst_size)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * adf_mstate_capmask_compare() - compare QAT device capability mask
+ * @sinfo: Pointer to source capability info
+ * @dinfo: Pointer to target capability info
+ *
+ * This function compares the capability mask betwee source VF and target VF
+ *
+ * Return: 0 if target capability mask is identical to source capability mask,
+ * 1 if target mask can represent all the capabilities represented by source mask,
+ * -1 if target mask can't represent all the capabilities represented by source
+ * mask.
+ */
+static int adf_mstate_capmask_compare(struct adf_mstate_vreginfo *sinfo,
+ struct adf_mstate_vreginfo *dinfo)
+{
+ u64 src = 0, dst = 0;
+
+ if (adf_mstate_cap_check_size(sinfo->size, dinfo->size, sizeof(u64)) < 0) {
+ pr_err("Mismatched length of cap %u %u %lu\n",
+ sinfo->size, dinfo->size, sizeof(u64));
+ return -1;
+ }
+ memcpy(&src, sinfo->addr, sinfo->size);
+ memcpy(&dst, dinfo->addr, dinfo->size);
+ pr_debug("Check cap compatibility of cap %llu %llu\n", src, dst);
+
+ if (src == dst)
+ return 0;
+ if ((src | dst) == dst)
+ return 1;
+ return -1;
+}
+
+static int adf_mstate_capmask_superset(void *sub_mgr, u8 *buf, u32 size, void *opa)
+{
+ struct adf_mstate_vreginfo sinfo = {buf, size};
+
+ if (adf_mstate_capmask_compare(&sinfo, opa) >= 0)
+ return 0;
+ return -EINVAL;
+}
+
+static int adf_mstate_capmask_equal(void *sub_mgr, u8 *buf, u32 size, void *opa)
+{
+ struct adf_mstate_vreginfo sinfo = {buf, size};
+
+ if (adf_mstate_capmask_compare(&sinfo, opa) == 0)
+ return 0;
+ return -EINVAL;
+}
+
+static int adf_gen4_vfmig_load_etr_regs(void *subs, u8 *state, u32 size, void *opa)
+{
+ struct adf_vf_bank_info *vf_bank_info = opa;
+ struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 pf_bank_nr;
+ int ret;
+
+ pf_bank_nr = vf_bank_info->bank_nr + vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
+ ret = hw_data->bank_state_restore(accel_dev, pf_bank_nr, (struct bank_state *)state);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load regs for vf%d bank%d\n",
+ vf_bank_info->vf_nr, vf_bank_info->bank_nr);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_etr_bank(struct adf_accel_dev *accel_dev, u32 vf_nr,
+ u32 bank_nr, void *mstate_mgr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr sub_sects_mgr;
+ void *subsec, *l2_subsec;
+ struct adf_mstate_vreginfo info;
+ struct adf_vf_bank_info vf_bank_info = {accel_dev, vf_nr, bank_nr};
+ char bank_ids[8];
+
+ snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%d", bank_nr);
+ subsec = adf_mstate_sect_lookup(mstate_mgr, bank_ids, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to lookup sec %s for vf%d bank%d\n",
+ ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_by_psect(&sub_sects_mgr, subsec);
+
+ info.addr = &vfmig->ringsrvarben[bank_nr];
+ info.size = sizeof(vfmig->ringsrvarben[bank_nr]);
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, ADF_MSTATE_ARBITER_IDS, NULL, &info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to lookupd sec %s for vf%d bank%d\n",
+ ADF_MSTATE_ARBITER_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS,
+ adf_gen4_vfmig_load_etr_regs, &vf_bank_info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s for vf%d bank%d\n",
+ ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_etr(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ int ret, i;
+ void *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ void *subsec;
+
+ subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", ADF_MSTATE_ETRB_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_by_psect(&sub_sects_mgr, subsec);
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ ret = adf_gen4_vfmig_load_etr_bank(accel_dev, vf_nr, i, &sub_sects_mgr);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_misc(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
+ void __iomem *csr = (&GET_BARS(accel_dev)[misc_bar_id])->virt_addr;
+ int i;
+ void *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ void *subsec, *l2_subsec;
+ struct {
+ char *id;
+ u64 ofs;
+ } misc_states[] = {
+ {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)},
+ };
+
+ subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", ADF_MSTATE_MISCB_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_by_psect(&sub_sects_mgr, subsec);
+ for (i = 0; i < ARRAY_SIZE(misc_states); i++) {
+ struct adf_mstate_vreginfo info;
+ u32 regv;
+
+ info.addr = &regv;
+ info.size = sizeof(regv);
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, misc_states[i].id,
+ NULL, &info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", misc_states[i].id);
+ return -EINVAL;
+ }
+ ADF_CSR_WR(csr, misc_states[i].ofs, regv);
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_generic(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ int i;
+ void *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ void *subsec, *l2_subsec;
+ struct {
+ char *id;
+ int (*action)(void *sub_mgr, u8 *buf, u32 size, void *opa);
+ struct adf_mstate_vreginfo info;
+ } gen_states[] = {
+ {ADF_MSTATE_GEN_CAP_IDS, adf_mstate_capmask_superset,
+ {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}},
+ {ADF_MSTATE_GEN_SVCMAP_IDS, adf_mstate_capmask_equal,
+ {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}},
+ {ADF_MSTATE_GEN_EXTDC_IDS, adf_mstate_capmask_superset,
+ {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}},
+ };
+
+ subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", ADF_MSTATE_GEN_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_by_psect(&sub_sects_mgr, subsec);
+ for (i = 0; i < ARRAY_SIZE(gen_states); i++) {
+ l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, gen_states[i].id,
+ gen_states[i].action, &gen_states[i].info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", gen_states[i].id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_etr_regs(void *subs, u8 *state, u32 size, void *opa)
+{
+ struct adf_vf_bank_info *vf_bank_info = opa;
+ struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 pf_bank_nr;
+ int ret;
+
+ pf_bank_nr = vf_bank_info->bank_nr + vf_bank_info->vf_nr * hw_data->num_banks_per_vf;
+ ret = hw_data->bank_state_save(accel_dev, pf_bank_nr, (struct bank_state *)state);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to save regs for vf%d bank%d\n",
+ vf_bank_info->vf_nr, vf_bank_info->bank_nr);
+ return ret;
+ }
+
+ return sizeof(struct bank_state);
+}
+
+static int adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev *accel_dev, u32 vf_nr,
+ u32 bank_nr, void *mstate_mgr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_mgr sub_sects_mgr;
+ void *subsec, *l2_subsec;
+ struct adf_mstate_vreginfo info;
+ struct adf_vf_bank_info vf_bank_info;
+ char bank_ids[8];
+
+ snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%d", bank_nr);
+ subsec = adf_mstate_sect_add(mstate_mgr, bank_ids, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s for vf%d bank%d\n",
+ ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+ adf_mstate_mgr_init_by_parent(&sub_sects_mgr, mstate_mgr);
+
+ info.addr = &vfmig->ringsrvarben[bank_nr];
+ info.size = sizeof(vfmig->ringsrvarben[bank_nr]);
+ l2_subsec = adf_mstate_sect_add(&sub_sects_mgr, ADF_MSTATE_ARBITER_IDS,
+ NULL, &info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s for vf%d bank%d\n",
+ ADF_MSTATE_ARBITER_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+
+ vf_bank_info.accel_dev = accel_dev;
+ vf_bank_info.vf_nr = vf_nr;
+ vf_bank_info.bank_nr = bank_nr;
+ l2_subsec = adf_mstate_sect_add(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS,
+ adf_gen4_vfmig_save_etr_regs, &vf_bank_info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s for vf%d bank%d\n",
+ ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr);
+ return -EINVAL;
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_etr(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ int i;
+ void *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ void *subsec;
+
+ subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", ADF_MSTATE_GEN_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_by_parent(&sub_sects_mgr, mstate_mgr);
+ for (i = 0; i < hw_data->num_banks_per_vf; i++) {
+ if (adf_gen4_vfmig_save_etr_bank(accel_dev, vf_nr, i, &sub_sects_mgr) < 0)
+ return -EINVAL;
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_misc(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ int i;
+ void *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ void *subsec, *l2_subsec;
+ u32 regv;
+ u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
+ void __iomem *csr = (&GET_BARS(accel_dev)[misc_bar_id])->virt_addr;
+ struct {
+ char *id;
+ u64 ofs;
+ } misc_states[] = {
+ {ADF_MSTATE_VINTSRC_IDS, ADF_GEN4_VINTSOU_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTSRC_PF2VM_IDS, ADF_GEN4_VINTSOUPF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)},
+ {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)},
+ };
+
+ subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", ADF_MSTATE_GEN_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_by_parent(&sub_sects_mgr, mstate_mgr);
+ for (i = 0; i < ARRAY_SIZE(misc_states); i++) {
+ struct adf_mstate_vreginfo info;
+
+ info.addr = &regv;
+ info.size = sizeof(regv);
+ regv = ADF_CSR_RD(csr, misc_states[i].ofs);
+ l2_subsec = adf_mstate_sect_add(&sub_sects_mgr, misc_states[i].id,
+ NULL, &info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", misc_states[i].id);
+ return -EINVAL;
+ }
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_generic(struct adf_accel_dev *accel_dev, u32 vf_nr)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ int i;
+ void *mstate_mgr = vfmig->mstate_mgr;
+ struct adf_mstate_mgr sub_sects_mgr;
+ void *subsec, *l2_subsec;
+ struct {
+ char *id;
+ struct adf_mstate_vreginfo info;
+ } gen_states[] = {
+ {ADF_MSTATE_GEN_CAP_IDS,
+ {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}},
+ {ADF_MSTATE_GEN_SVCMAP_IDS,
+ {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}},
+ {ADF_MSTATE_GEN_EXTDC_IDS,
+ {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}},
+ };
+
+ subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL);
+ if (!subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", ADF_MSTATE_GEN_IDS);
+ return -EINVAL;
+ }
+
+ adf_mstate_mgr_init_by_parent(&sub_sects_mgr, mstate_mgr);
+ for (i = 0; i < ARRAY_SIZE(gen_states); i++) {
+ l2_subsec = adf_mstate_sect_add(&sub_sects_mgr, gen_states[i].id,
+ NULL, &gen_states[i].info);
+ if (!l2_subsec) {
+ dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", gen_states[i].id);
+ return -EINVAL;
+ }
+ }
+ adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_save_state(struct adf_accel_dev *accel_dev, u32 vf_nr,
+ u8 *buf, u64 buf_sz)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ struct adf_mstate_preh *pre;
+ int ret;
+
+ adf_mstate_mgr_init(vfmig->mstate_mgr, buf, buf_sz);
+ pre = adf_mstate_preamble_add(vfmig->mstate_mgr);
+ ret = adf_gen4_vfmig_save_generic(accel_dev, vf_nr);
+ if (ret < 0) {
+ dev_err(&GET_DEV(accel_dev), "Failed to save generic state for vf_nr:%d\n", vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_save_misc(accel_dev, vf_nr);
+ if (ret < 0) {
+ dev_err(&GET_DEV(accel_dev), "Failed to save misc bar state for vf_nr:%d\n", vf_nr);
+ return ret;
+ }
+
+ ret = adf_gen4_vfmig_save_etr(accel_dev, vf_nr);
+ if (ret < 0) {
+ dev_err(&GET_DEV(accel_dev), "Failed to save etr bar state for vf_nr:%d\n", vf_nr);
+ return ret;
+ }
+
+ adf_mstate_preamble_update(vfmig->mstate_mgr, pre);
+
+ return 0;
+}
+
+static int adf_gen4_vfmig_load_state(struct adf_accel_dev *accel_dev, u32 vf_nr,
+ u8 *buf, u64 buf_sz)
+{
+ struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr];
+ struct adf_gen4_vfmig *vfmig = vf_info->mig_priv;
+ int ret;
+
+ adf_mstate_mgr_init(vfmig->mstate_mgr, buf, buf_sz);
+ ret = adf_mstate_mgr_scan(vfmig->mstate_mgr, adf_mstate_preamble_checker, NULL);
+ if (ret < 0) {
+ dev_err(&GET_DEV(accel_dev), "Invalid state for vf_nr:%d\n", vf_nr);
+ return ret;
+ }
+ ret = adf_gen4_vfmig_load_generic(accel_dev, vf_nr);
+ if (ret < 0) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load gerneal state for vf_nr:%d\n", vf_nr);
+ return ret;
+ }
+ ret = adf_gen4_vfmig_load_misc(accel_dev, vf_nr);
+ if (ret < 0) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load misc bar state for vf_nr:%d\n", vf_nr);
+ return ret;
+ }
+ ret = adf_gen4_vfmig_load_etr(accel_dev, vf_nr);
+ if (ret < 0) {
+ dev_err(&GET_DEV(accel_dev), "Failed to load etr bar state for vf_nr:%d\n", vf_nr);
+ return ret;
+ }
+
+ return 0;
+}
+
+void adf_gen4_init_vf_mig_ops(struct adf_vfmig_ops *vfmig_ops)
+{
+ vfmig_ops->init_device = adf_gen4_vfmig_init_device;
+ vfmig_ops->shutdown_device = adf_gen4_vfmig_shutdown_device;
+ vfmig_ops->suspend_device = adf_gen4_vfmig_suspend_device;
+ vfmig_ops->resume_device = adf_gen4_vfmig_resume_device;
+ vfmig_ops->save_state = adf_gen4_vfmig_save_state;
+ vfmig_ops->load_state = adf_gen4_vfmig_load_state;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_vf_mig_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c
new file mode 100644
index 000000000000..8bafff229929
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "adf_mstate_mgr.h"
+
+struct adf_mstate_sect_h {
+ u8 id[8];
+ u32 size;
+ u32 sub_sects;
+ u8 state[];
+};
+
+void adf_mstate_mgr_reset(void *m)
+{
+ struct adf_mstate_mgr *mgr = m;
+
+ mgr->state = mgr->buf;
+ mgr->n_sects = 0;
+}
+
+static void __adf_mstate_mgr_reinit(void *m, void *buf, u32 size)
+{
+ struct adf_mstate_mgr *mgr = m;
+
+ mgr->buf = buf;
+ mgr->state = buf;
+ mgr->size = size;
+ mgr->n_sects = 0;
+};
+
+void *adf_mstate_mgr_new(u8 *buf, u32 size)
+{
+ struct adf_mstate_mgr *mgr;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return NULL;
+ __adf_mstate_mgr_reinit(mgr, buf, size);
+
+ return mgr;
+}
+
+void adf_mstate_mgr_destroy(void *mgr)
+{
+ kfree(mgr);
+}
+
+static inline
+u32 _adf_mstate_state_size(struct adf_mstate_mgr *mgr)
+{
+ return mgr->state - mgr->buf;
+}
+
+int adf_mstate_state_size(void *mgr)
+{
+ return _adf_mstate_state_size(mgr);
+}
+
+void adf_mstate_mgr_init_by_parent(void *mgr, void *p_mgr_)
+{
+ struct adf_mstate_mgr *p_mgr = p_mgr_;
+
+ __adf_mstate_mgr_reinit(mgr, p_mgr->state, p_mgr->size - _adf_mstate_state_size(p_mgr));
+}
+
+void adf_mstate_mgr_init_by_psect(void *mgr_, void *p_sect_)
+{
+ struct adf_mstate_sect_h *p_sect = p_sect_;
+ struct adf_mstate_mgr *mgr = mgr_;
+
+ __adf_mstate_mgr_reinit(mgr, p_sect->state, p_sect->size);
+ mgr->n_sects = p_sect->sub_sects;
+}
+
+void adf_mstate_mgr_init(void *mgr, u8 *buf, u32 size)
+{
+ return __adf_mstate_mgr_reinit(mgr, buf, size);
+}
+
+struct adf_mstate_preh *adf_mstate_preamble_add(void *mgr_)
+{
+ struct adf_mstate_mgr *mgr = mgr_;
+ struct adf_mstate_preh *pre = (struct adf_mstate_preh *)(mgr->buf);
+
+ adf_mstate_preamble_init(pre);
+ mgr->state += pre->preh_len;
+
+ return pre;
+}
+
+int adf_mstate_preamble_update(void *mgr_, struct adf_mstate_preh *preamble)
+{
+ struct adf_mstate_mgr *mgr = mgr_;
+
+ preamble->size = _adf_mstate_state_size(mgr) - preamble->preh_len;
+ preamble->n_sects = mgr->n_sects;
+
+ return 0;
+}
+
+static void adf_mstate_dump_sect(struct adf_mstate_sect_h *sect, const char *prefix)
+{
+ pr_debug("%s QAT state section %s\n", prefix, sect->id);
+ print_hex_dump_debug("h-", DUMP_PREFIX_OFFSET, 16, 2,
+ sect, sizeof(*sect), true);
+ print_hex_dump_debug("s-", DUMP_PREFIX_OFFSET, 16, 2,
+ sect->state, sect->size, true);
+}
+
+int adf_mstate_sect_update(void *p_mgr_, void *curr_mgr_, void *sect_)
+{
+ struct adf_mstate_sect_h *sect = sect_;
+ struct adf_mstate_mgr *curr_mgr = curr_mgr_;
+ struct adf_mstate_mgr *p_mgr = p_mgr_;
+
+ sect->size += _adf_mstate_state_size(curr_mgr);
+ sect->sub_sects += curr_mgr->n_sects;
+ p_mgr->state += sect->size;
+
+ adf_mstate_dump_sect(sect, "Update");
+
+ return 0;
+}
+
+void *adf_mstate_sect_add(void *mgr_,
+ const char *id,
+ int (*populate)(void *sub_mgr, u8 *state, u32 size, void *opaque),
+ void *opaque)
+{
+ struct adf_mstate_mgr *mgr = mgr_;
+ u8 *end = mgr->buf + mgr->size;
+ struct adf_mstate_sect_h *sect;
+ int remaining;
+ int ret;
+
+ if ((u64)mgr->state + sizeof(*sect) < (u64)mgr->state ||
+ ((u64)mgr->state + sizeof(*sect) > (u64)end)) {
+ pr_err("Not enough space to hold QAT state header of sect %s! 0x%lx bytes left\n",
+ id, end - mgr->state);
+ return NULL;
+ }
+
+ sect = (struct adf_mstate_sect_h *)(mgr->state);
+ remaining = mgr->size - _adf_mstate_state_size(mgr) - sizeof(*sect);
+ if (populate) {
+ struct adf_mstate_mgr sub_sects_mgr;
+
+ __adf_mstate_mgr_reinit(&sub_sects_mgr, sect->state, remaining);
+ ret = (*populate)(&sub_sects_mgr, sect->state, remaining, opaque);
+ if (ret < 0)
+ return NULL;
+ ret += _adf_mstate_state_size(&sub_sects_mgr);
+ sect->sub_sects = sub_sects_mgr.n_sects;
+ } else if (opaque) {
+ /* Use default function */
+ struct adf_mstate_vreginfo *info = opaque;
+
+ if (info->size > remaining) {
+ pr_err("Not enough space for QAT state sect %s! has %u, need %u\n",
+ id, remaining, info->size);
+ return NULL;
+ }
+ memcpy(sect->state, info->addr, info->size);
+ ret = info->size;
+ sect->sub_sects = 0;
+ } else {
+ ret = 0;
+ }
+
+ strncpy(sect->id, id, sizeof(sect->id));
+ sect->id[sizeof(sect->id) - 1] = 0;
+ sect->size = ret;
+ ret += sizeof(*sect);
+ mgr->state += ret;
+ mgr->n_sects++;
+
+ adf_mstate_dump_sect(sect, "Add");
+
+ return sect;
+}
+
+static int adf_mstate_sect_scan(struct adf_mstate_mgr *mgr, int n_sects)
+{
+ struct adf_mstate_sect_h *start = (struct adf_mstate_sect_h *)(mgr->state);
+ struct adf_mstate_sect_h *sect = start;
+ int i;
+ u64 end;
+
+ end = (u64)mgr->buf + mgr->size;
+ for (i = 0; i < n_sects; i++) {
+ u64 s_start = (u64)sect->state;
+ u64 s_end = s_start + sect->size;
+
+ if (s_end < s_start || s_end > end) {
+ pr_err("Corrupted state section(index=%u,max size %u,got size %u)\n",
+ i, mgr->size, sect->size);
+ return -EINVAL;
+ }
+ sect = (struct adf_mstate_sect_h *)s_end;
+ }
+ mgr->n_sects = n_sects;
+ pr_debug("Scanned section (eldest child is %s), calculated size=%lu, mgr_size=%u sub_secs=%u\n",
+ start->id, sizeof(struct adf_mstate_sect_h) * (sect - start),
+ mgr->size, mgr->n_sects);
+
+ return 0;
+}
+
+int adf_mstate_mgr_scan(void *mgr_,
+ int (*pre_checker)(struct adf_mstate_preh *, void *),
+ void *opaque)
+{
+ struct adf_mstate_mgr *mgr = mgr_;
+ struct adf_mstate_preh *pre = (struct adf_mstate_preh *)(mgr->buf);
+
+ pr_debug("Found QAT state preambles\n");
+ print_hex_dump_debug("", DUMP_PREFIX_OFFSET, 16, 2, pre, pre->preh_len, 0);
+
+ if (*pre_checker && (*pre_checker)(pre, opaque) < 0)
+ return -EINVAL;
+ mgr->state = mgr->buf + pre->preh_len;
+
+ return adf_mstate_sect_scan(mgr, pre->n_sects);
+}
+
+void *adf_mstate_sect_lookup(void *mgr_,
+ const char *id,
+ int (*action)(void *, u8 *, u32, void *),
+ void *opaque)
+{
+ struct adf_mstate_mgr *mgr = mgr_, sub_sects_mgr;
+ struct adf_mstate_sect_h *sect = (struct adf_mstate_sect_h *)(mgr->state);
+ int i, ret;
+
+ for (i = 0; i < mgr->n_sects; i++) {
+ if (!strncmp(sect->id, id, sizeof(sect->id)))
+ goto found;
+ sect = (struct adf_mstate_sect_h *)(sect->state + sect->size);
+ }
+
+ return NULL;
+found:
+ adf_mstate_dump_sect(sect, "Found");
+ __adf_mstate_mgr_reinit(&sub_sects_mgr, sect->state, sect->size);
+ if (sect->sub_sects > 0 &&
+ adf_mstate_sect_scan(&sub_sects_mgr, sect->sub_sects) < 0)
+ return NULL;
+ if (action) {
+ ret = (*action)(&sub_sects_mgr, sect->state, sect->size, opaque);
+ if (ret < 0)
+ return NULL;
+ } else if (opaque) {
+ /* Use default function */
+ struct adf_mstate_vreginfo *info = opaque;
+
+ if (sect->size != info->size) {
+ pr_err("Mismatched QAT state sect %s, has %u, need %u\n",
+ id, sect->size, info->size);
+ return NULL;
+ }
+ memcpy(info->addr, sect->state, info->size);
+ }
+
+ return sect;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h
new file mode 100644
index 000000000000..7489428331fa
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_MSTATE_MGR_H
+#define ADF_MSTATE_MGR_H
+
+#define ADF_MSTATE_MAGIC 0xADF5CAEA
+#define ADF_MSTATE_VERSION 0x1
+
+#define ADF_MSTATE_ETRB_IDS "ETRBAR"
+#define ADF_MSTATE_MISCB_IDS "MISCBAR"
+#define ADF_MSTATE_EXTB_IDS "EXTBAR"
+#define ADF_MSTATE_GEN_IDS "general"
+#define ADF_MSTATE_SECTION_NUM 4
+
+#define ADF_MSTATE_BANK_IDX_IDS "bnk"
+
+#define ADF_MSTATE_ETR_REGS_IDS "m_regs"
+#define ADF_MSTATE_ARBITER_IDS "arb_en"
+#define ADF_MSTATE_VINTSRC_IDS "vintsrc"
+#define ADF_MSTATE_VINTMSK_IDS "vintmsk"
+#define ADF_MSTATE_IOV_INIT_IDS "iovinit"
+#define ADF_MSTATE_COMPAT_VER_IDS "compver"
+#define ADF_MSTATE_SVM_CAP_IDS "svmcap"
+#define ADF_MSTATE_GEN_CAP_IDS "gencap"
+#define ADF_MSTATE_GEN_SVCMAP_IDS "svcmap"
+#define ADF_MSTATE_GEN_EXTDC_IDS "extdc"
+#define ADF_MSTATE_VINTSRC_PF2VM_IDS "vintsvm"
+#define ADF_MSTATE_VINTMSK_PF2VM_IDS "vintmvm"
+#define ADF_MSTATE_VM2PF_IDS "vm2pf"
+#define ADF_MSTATE_PF2VM_IDS "pf2vm"
+
+struct adf_mstate_mgr {
+ u8 *buf;
+ u8 *state;
+ u32 size;
+ u32 n_sects;
+};
+
+struct adf_mstate_preh {
+ u32 magic;
+ u32 version;
+ u16 preh_len;
+ u16 n_sects;
+ u32 size;
+};
+
+struct adf_mstate_vreginfo {
+ void *addr;
+ u32 size;
+};
+
+static inline void adf_mstate_preamble_init(struct adf_mstate_preh *preamble)
+{
+ preamble->magic = ADF_MSTATE_MAGIC;
+ preamble->version = ADF_MSTATE_VERSION;
+ preamble->preh_len = sizeof(*preamble);
+ preamble->size = 0;
+ preamble->n_sects = 0;
+}
+
+/* default preambles checker */
+static inline int adf_mstate_preamble_checker(struct adf_mstate_preh *preamble, void *opaque)
+{
+ if (preamble->magic != ADF_MSTATE_MAGIC ||
+ preamble->version > ADF_MSTATE_VERSION) {
+ pr_err("unrecognized vqat state, magic=0x%x,version=0x%x, hlen=%u\n",
+ preamble->magic, preamble->version, preamble->preh_len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void *adf_mstate_mgr_new(u8 *buf, u32 size);
+void adf_mstate_mgr_reset(void *m);
+void adf_mstate_mgr_destroy(void *mgr);
+void adf_mstate_mgr_init(void *mgr, u8 *buf, u32 size);
+void adf_mstate_mgr_init_by_parent(void *mgr, void *p_mgr);
+void adf_mstate_mgr_init_by_psect(void *mgr, void *p_sect);
+struct adf_mstate_preh *adf_mstate_preamble_add(void *mgr);
+int adf_mstate_preamble_update(void *mgr, struct adf_mstate_preh *preamble);
+struct adf_mstate_preh *adf_mstate_preamble_get(void *mgr);
+int adf_mstate_sect_update(void *p_mgr, void *sub_mgr, void *sect);
+void *adf_mstate_sect_add(void *mgr,
+ const char *id,
+ int (*populate)(void *sub_sects_mgr, u8 *buf,
+ u32 size, void *opaque),
+ void *opaque);
+int adf_mstate_mgr_scan(void *mgr,
+ int (*pre_checker)(struct adf_mstate_preh *, void *),
+ void *opaque);
+void *adf_mstate_sect_lookup(void *mgr,
+ const char *id,
+ int (*action)(void *sub_sects_mgr, u8 *buf,
+ u32 size, void *opaque),
+ void *opaque);
+int adf_mstate_state_size(void *mgr);
+#endif
--
2.18.2


2023-06-30 13:43:10

by Zeng, Xin

[permalink] [raw]
Subject: [RFC 5/5] vfio/qat: Add vfio_pci driver for Intel QAT VF devices

Add vfio pci driver for Intel QAT VF devices.

This driver uses vfio_pci_core to register to the VFIO subsystem. It
acts as a vfio agent and interacts with the QAT PF driver to implement
VF live migration.

Co-developed-by: Yahui Cao <[email protected]>
Signed-off-by: Yahui Cao <[email protected]>
Signed-off-by: Xin Zeng <[email protected]>
---
drivers/vfio/pci/Kconfig | 2 +
drivers/vfio/pci/Makefile | 1 +
drivers/vfio/pci/qat/Kconfig | 13 +
drivers/vfio/pci/qat/Makefile | 4 +
drivers/vfio/pci/qat/qat_vfio_pci_main.c | 518 +++++++++++++++++++++++
5 files changed, 538 insertions(+)
create mode 100644 drivers/vfio/pci/qat/Kconfig
create mode 100644 drivers/vfio/pci/qat/Makefile
create mode 100644 drivers/vfio/pci/qat/qat_vfio_pci_main.c

diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index f9d0c908e738..47c9773cf0c7 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -59,4 +59,6 @@ source "drivers/vfio/pci/mlx5/Kconfig"

source "drivers/vfio/pci/hisilicon/Kconfig"

+source "drivers/vfio/pci/qat/Kconfig"
+
endif
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
index 24c524224da5..dcc6366df8fa 100644
--- a/drivers/vfio/pci/Makefile
+++ b/drivers/vfio/pci/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
obj-$(CONFIG_MLX5_VFIO_PCI) += mlx5/

obj-$(CONFIG_HISI_ACC_VFIO_PCI) += hisilicon/
+obj-$(CONFIG_QAT_VFIO_PCI) += qat/
diff --git a/drivers/vfio/pci/qat/Kconfig b/drivers/vfio/pci/qat/Kconfig
new file mode 100644
index 000000000000..38e5b4a0ca9c
--- /dev/null
+++ b/drivers/vfio/pci/qat/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config QAT_VFIO_PCI
+ tristate "VFIO support for QAT VF PCI devices"
+ depends on X86
+ depends on VFIO_PCI_CORE
+ depends on CRYPTO_DEV_QAT
+ help
+ This provides migration support for Intel(R) QAT Virtual Function
+ using the VFIO framework.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_vfio_pci. If you don't know what to do here,
+ say N.
diff --git a/drivers/vfio/pci/qat/Makefile b/drivers/vfio/pci/qat/Makefile
new file mode 100644
index 000000000000..106791887b91
--- /dev/null
+++ b/drivers/vfio/pci/qat/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_QAT_VFIO_PCI) += qat_vfio_pci.o
+qat_vfio_pci-y := qat_vfio_pci_main.o
+
diff --git a/drivers/vfio/pci/qat/qat_vfio_pci_main.c b/drivers/vfio/pci/qat/qat_vfio_pci_main.c
new file mode 100644
index 000000000000..af971fd05fd2
--- /dev/null
+++ b/drivers/vfio/pci/qat/qat_vfio_pci_main.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/anon_inodes.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/vfio_pci_core.h>
+#include <linux/qat/qat_vf_mig.h>
+
+struct qat_vf_mig_data {
+ u8 state[SZ_4K];
+};
+
+struct qat_vf_migration_file {
+ struct file *filp;
+ struct mutex lock; /* protect migration region context */
+ bool disabled;
+
+ size_t total_length;
+ struct qat_vf_mig_data mig_data;
+};
+
+struct qat_vf_core_device {
+ struct vfio_pci_core_device core_device;
+ struct pci_dev *parent;
+ int vf_id;
+
+ struct mutex state_mutex; /* protect migration state */
+ enum vfio_device_mig_state mig_state;
+ struct qat_vf_migration_file *resuming_migf;
+ struct qat_vf_migration_file *saving_migf;
+};
+
+static int qat_vf_init(struct qat_vf_core_device *qat_vdev)
+{
+ return qat_vfmig_init_device(qat_vdev->parent, qat_vdev->vf_id);
+}
+
+static void qat_vf_cleanup(struct qat_vf_core_device *qat_vdev)
+{
+ qat_vfmig_shutdown_device(qat_vdev->parent, qat_vdev->vf_id);
+}
+
+static int qat_vf_pci_open_device(struct vfio_device *core_vdev)
+{
+ int ret;
+ struct qat_vf_core_device *qat_vdev =
+ container_of(core_vdev, struct qat_vf_core_device,
+ core_device.vdev);
+ struct vfio_pci_core_device *vdev = &qat_vdev->core_device;
+
+ ret = vfio_pci_core_enable(vdev);
+ if (ret)
+ return ret;
+
+ ret = qat_vf_init(qat_vdev);
+ if (ret) {
+ vfio_pci_core_disable(vdev);
+ return ret;
+ }
+ qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
+
+ vfio_pci_core_finish_enable(vdev);
+
+ return 0;
+}
+
+static void qat_vf_disable_fd(struct qat_vf_migration_file *migf)
+{
+ mutex_lock(&migf->lock);
+ migf->disabled = true;
+ migf->total_length = 0;
+ migf->filp->f_pos = 0;
+ mutex_unlock(&migf->lock);
+}
+
+static void qat_vf_disable_fds(struct qat_vf_core_device *qat_vdev)
+{
+ if (qat_vdev->resuming_migf) {
+ qat_vf_disable_fd(qat_vdev->resuming_migf);
+ fput(qat_vdev->resuming_migf->filp);
+ qat_vdev->resuming_migf = NULL;
+ }
+
+ if (qat_vdev->saving_migf) {
+ qat_vf_disable_fd(qat_vdev->saving_migf);
+ fput(qat_vdev->saving_migf->filp);
+ qat_vdev->saving_migf = NULL;
+ }
+}
+
+static void qat_vf_pci_close_device(struct vfio_device *core_vdev)
+{
+ struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
+ struct qat_vf_core_device, core_device.vdev);
+
+ qat_vf_cleanup(qat_vdev);
+ qat_vf_disable_fds(qat_vdev);
+ vfio_pci_core_close_device(core_vdev);
+}
+
+static int qat_vf_stop_device(struct qat_vf_core_device *qat_vdev)
+{
+ return qat_vfmig_suspend_device(qat_vdev->parent, qat_vdev->vf_id);
+}
+
+static ssize_t qat_vf_save_read(struct file *filp, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct qat_vf_migration_file *migf = filp->private_data;
+ ssize_t done = 0;
+ loff_t *offs;
+ int ret;
+
+ if (pos)
+ return -ESPIPE;
+ offs = &filp->f_pos;
+
+ mutex_lock(&migf->lock);
+ if (*offs > migf->total_length || *offs < 0) {
+ done = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (migf->disabled) {
+ done = -ENODEV;
+ goto out_unlock;
+ }
+
+ len = min_t(size_t, migf->total_length - *offs, len);
+ if (len) {
+ ret = copy_to_user(buf, &migf->mig_data + *offs, len);
+ if (ret) {
+ done = -EFAULT;
+ goto out_unlock;
+ }
+ *offs += len;
+ done = len;
+ }
+
+out_unlock:
+ mutex_unlock(&migf->lock);
+ return done;
+}
+
+static int qat_vf_release_file(struct inode *inode, struct file *filp)
+{
+ struct qat_vf_migration_file *migf = filp->private_data;
+
+ qat_vf_disable_fd(migf);
+ mutex_destroy(&migf->lock);
+ kfree(migf);
+
+ return 0;
+}
+
+static const struct file_operations qat_vf_save_fops = {
+ .owner = THIS_MODULE,
+ .read = qat_vf_save_read,
+ .release = qat_vf_release_file,
+ .llseek = no_llseek,
+};
+
+static int qat_vf_save_state(struct qat_vf_core_device *qat_vdev,
+ struct qat_vf_migration_file *migf)
+{
+ struct qat_vf_mig_data *mig_data = &migf->mig_data;
+ int ret;
+
+ ret = qat_vfmig_save_state(qat_vdev->parent, qat_vdev->vf_id,
+ mig_data->state,
+ sizeof(mig_data->state));
+ if (ret)
+ return ret;
+
+ migf->total_length = sizeof(struct qat_vf_mig_data);
+
+ return 0;
+}
+
+static struct qat_vf_migration_file *
+qat_vf_save_device_data(struct qat_vf_core_device *qat_vdev)
+{
+ struct qat_vf_migration_file *migf;
+ int ret;
+
+ migf = kzalloc(sizeof(*migf), GFP_KERNEL);
+ if (!migf)
+ return ERR_PTR(-ENOMEM);
+
+ migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_save_fops,
+ migf, O_RDONLY);
+ ret = PTR_ERR_OR_ZERO(migf->filp);
+ if (ret) {
+ kfree(migf);
+ return ERR_PTR(ret);
+ }
+
+ stream_open(migf->filp->f_inode, migf->filp);
+ mutex_init(&migf->lock);
+
+ ret = qat_vf_save_state(qat_vdev, migf);
+ if (ret) {
+ fput(migf->filp);
+ kfree(migf);
+ return ERR_PTR(ret);
+ }
+
+ return migf;
+}
+
+static ssize_t qat_vf_resume_write(struct file *filp, const char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct qat_vf_migration_file *migf = filp->private_data;
+ loff_t requested_length;
+ ssize_t done = 0;
+ loff_t *offs;
+ int ret;
+
+ if (pos)
+ return -ESPIPE;
+ offs = &filp->f_pos;
+
+ if (*offs < 0 ||
+ check_add_overflow((loff_t)len, *offs, &requested_length))
+ return -EOVERFLOW;
+
+ if (requested_length > sizeof(struct qat_vf_mig_data))
+ return -ENOMEM;
+
+ mutex_lock(&migf->lock);
+ if (migf->disabled) {
+ done = -ENODEV;
+ goto out_unlock;
+ }
+
+ ret = copy_from_user(&migf->mig_data + *offs, buf, len);
+ if (ret) {
+ done = -EFAULT;
+ goto out_unlock;
+ }
+ *offs += len;
+ done = len;
+ migf->total_length += len;
+
+out_unlock:
+ mutex_unlock(&migf->lock);
+ return done;
+}
+
+static const struct file_operations qat_vf_resume_fops = {
+ .owner = THIS_MODULE,
+ .write = qat_vf_resume_write,
+ .release = qat_vf_release_file,
+ .llseek = no_llseek,
+};
+
+static struct qat_vf_migration_file *
+qat_vf_resume_device_data(struct qat_vf_core_device *qat_vdev)
+{
+ struct qat_vf_migration_file *migf;
+ int ret;
+
+ migf = kzalloc(sizeof(*migf), GFP_KERNEL);
+ if (!migf)
+ return ERR_PTR(-ENOMEM);
+
+ migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_resume_fops,
+ migf, O_WRONLY);
+ ret = PTR_ERR_OR_ZERO(migf->filp);
+ if (ret) {
+ kfree(migf);
+ return ERR_PTR(ret);
+ }
+ stream_open(migf->filp->f_inode, migf->filp);
+ mutex_init(&migf->lock);
+
+ return migf;
+}
+
+static int qat_vf_load_state(struct qat_vf_core_device *qat_vdev,
+ struct qat_vf_migration_file *migf)
+{
+ struct qat_vf_mig_data *mig_data = &migf->mig_data;
+
+ return qat_vfmig_load_state(qat_vdev->parent, qat_vdev->vf_id,
+ mig_data->state,
+ sizeof(mig_data->state));
+}
+
+static int qat_vf_load_device_data(struct qat_vf_core_device *qat_vdev)
+{
+ return qat_vf_load_state(qat_vdev, qat_vdev->resuming_migf);
+}
+
+static int qat_vf_start_device(struct qat_vf_core_device *qat_vdev)
+{
+ return qat_vfmig_resume_device(qat_vdev->parent, qat_vdev->vf_id);
+}
+
+static struct file *qat_vf_pci_step_device_state(struct qat_vf_core_device *qat_vdev, u32 new)
+{
+ u32 cur = qat_vdev->mig_state;
+ int ret;
+
+ if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_STOP) {
+ ret = qat_vf_stop_device(qat_vdev);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
+ struct qat_vf_migration_file *migf;
+
+ migf = qat_vf_save_device_data(qat_vdev);
+ if (IS_ERR(migf))
+ return ERR_CAST(migf);
+ get_file(migf->filp);
+ qat_vdev->saving_migf = migf;
+ return migf->filp;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) {
+ qat_vf_disable_fds(qat_vdev);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
+ struct qat_vf_migration_file *migf;
+
+ migf = qat_vf_resume_device_data(qat_vdev);
+ if (IS_ERR(migf))
+ return ERR_CAST(migf);
+ get_file(migf->filp);
+ qat_vdev->resuming_migf = migf;
+ return migf->filp;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
+ ret = qat_vf_load_device_data(qat_vdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ qat_vf_disable_fds(qat_vdev);
+ return NULL;
+ }
+
+ if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING) {
+ qat_vf_start_device(qat_vdev);
+ return NULL;
+ }
+
+ /* vfio_mig_get_next_state() does not use arcs other than the above */
+ WARN_ON(true);
+ return ERR_PTR(-EINVAL);
+}
+
+static struct file *qat_vf_pci_set_device_state(struct vfio_device *vdev,
+ enum vfio_device_mig_state new_state)
+{
+ struct qat_vf_core_device *qat_vdev = container_of(vdev,
+ struct qat_vf_core_device, core_device.vdev);
+ enum vfio_device_mig_state next_state;
+ struct file *res = NULL;
+ int ret;
+
+ mutex_lock(&qat_vdev->state_mutex);
+ while (new_state != qat_vdev->mig_state) {
+ ret = vfio_mig_get_next_state(vdev, qat_vdev->mig_state,
+ new_state, &next_state);
+ if (ret) {
+ res = ERR_PTR(-EINVAL);
+ break;
+ }
+
+ res = qat_vf_pci_step_device_state(qat_vdev, next_state);
+ if (IS_ERR(res))
+ break;
+ qat_vdev->mig_state = next_state;
+ if (WARN_ON(res && new_state != qat_vdev->mig_state)) {
+ fput(res);
+ res = ERR_PTR(-EINVAL);
+ break;
+ }
+ }
+ mutex_unlock(&qat_vdev->state_mutex);
+
+ return res;
+}
+
+static int qat_vf_pci_get_device_state(struct vfio_device *vdev,
+ enum vfio_device_mig_state *curr_state)
+{
+ struct qat_vf_core_device *qat_vdev = container_of(vdev,
+ struct qat_vf_core_device, core_device.vdev);
+
+ mutex_lock(&qat_vdev->state_mutex);
+ *curr_state = qat_vdev->mig_state;
+ mutex_unlock(&qat_vdev->state_mutex);
+
+ return 0;
+}
+
+static int qat_vf_pci_get_data_size(struct vfio_device *vdev,
+ unsigned long *stop_copy_length)
+{
+ *stop_copy_length = sizeof(struct qat_vf_mig_data);
+ return 0;
+}
+
+static const struct vfio_migration_ops qat_vf_pci_mig_ops = {
+ .migration_set_state = qat_vf_pci_set_device_state,
+ .migration_get_state = qat_vf_pci_get_device_state,
+ .migration_get_data_size = qat_vf_pci_get_data_size,
+};
+
+static int qat_vf_pci_init_dev(struct vfio_device *core_vdev)
+{
+ struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
+ struct qat_vf_core_device, core_device.vdev);
+
+ mutex_init(&qat_vdev->state_mutex);
+
+ core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY;
+ core_vdev->mig_ops = &qat_vf_pci_mig_ops;
+
+ return vfio_pci_core_init_dev(core_vdev);
+}
+
+static const struct vfio_device_ops qat_vf_pci_ops = {
+ .name = "qat-vf-vfio-pci",
+ .init = qat_vf_pci_init_dev,
+ .release = vfio_pci_core_release_dev,
+ .open_device = qat_vf_pci_open_device,
+ .close_device = qat_vf_pci_close_device,
+ .ioctl = vfio_pci_core_ioctl,
+ .read = vfio_pci_core_read,
+ .write = vfio_pci_core_write,
+ .mmap = vfio_pci_core_mmap,
+ .request = vfio_pci_core_request,
+ .match = vfio_pci_core_match,
+ .bind_iommufd = vfio_iommufd_physical_bind,
+ .unbind_iommufd = vfio_iommufd_physical_unbind,
+ .attach_ioas = vfio_iommufd_physical_attach_ioas,
+};
+
+static int
+qat_vf_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct qat_vf_core_device *qat_vdev;
+ int ret;
+
+ qat_vdev = vfio_alloc_device(qat_vf_core_device, core_device.vdev, dev, &qat_vf_pci_ops);
+ if (IS_ERR(qat_vdev))
+ return PTR_ERR(qat_vdev);
+
+ qat_vdev->vf_id = pci_iov_vf_id(pdev);
+ qat_vdev->parent = pdev->physfn;
+ if (!qat_vdev->parent || qat_vdev->vf_id < 0)
+ return -EINVAL;
+
+ pci_set_drvdata(pdev, &qat_vdev->core_device);
+ ret = vfio_pci_core_register_device(&qat_vdev->core_device);
+ if (ret)
+ goto out_put_device;
+
+ return 0;
+
+out_put_device:
+ vfio_put_device(&qat_vdev->core_device.vdev);
+ return ret;
+}
+
+static struct qat_vf_core_device *qat_vf_drvdata(struct pci_dev *pdev)
+{
+ struct vfio_pci_core_device *core_device = pci_get_drvdata(pdev);
+
+ return container_of(core_device, struct qat_vf_core_device, core_device);
+}
+
+static void qat_vf_vfio_pci_remove(struct pci_dev *pdev)
+{
+ struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev);
+
+ vfio_pci_core_unregister_device(&qat_vdev->core_device);
+ vfio_put_device(&qat_vdev->core_device.vdev);
+}
+
+static const struct pci_device_id qat_vf_vfio_pci_table[] = {
+ /* Intel QAT GEN4 4xxx VF device */
+ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4941) },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, qat_vf_vfio_pci_table);
+
+static struct pci_driver qat_vf_vfio_pci_driver = {
+ .name = "qat_vfio_pci",
+ .id_table = qat_vf_vfio_pci_table,
+ .probe = qat_vf_vfio_pci_probe,
+ .remove = qat_vf_vfio_pci_remove,
+ .driver_managed_dma = true,
+};
+module_pci_driver(qat_vf_vfio_pci_driver)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("QAT VFIO PCI - VFIO PCI driver with live migration support for Intel(R) QAT device family");
--
2.18.2


2023-07-26 20:12:42

by Alex Williamson

[permalink] [raw]
Subject: Re: [RFC 5/5] vfio/qat: Add vfio_pci driver for Intel QAT VF devices


Please Cc the vfio-pci variant driver reviewers listed in MAINTAINERS.
Also useful to put the subsystem maintainer on the cc rather than just
the list.

On Fri, 30 Jun 2023 21:13:04 +0800
Xin Zeng <[email protected]> wrote:

> Add vfio pci driver for Intel QAT VF devices.
>
> This driver uses vfio_pci_core to register to the VFIO subsystem. It
> acts as a vfio agent and interacts with the QAT PF driver to implement
> VF live migration.
>
> Co-developed-by: Yahui Cao <[email protected]>
> Signed-off-by: Yahui Cao <[email protected]>
> Signed-off-by: Xin Zeng <[email protected]>
> ---
> drivers/vfio/pci/Kconfig | 2 +
> drivers/vfio/pci/Makefile | 1 +
> drivers/vfio/pci/qat/Kconfig | 13 +
> drivers/vfio/pci/qat/Makefile | 4 +
> drivers/vfio/pci/qat/qat_vfio_pci_main.c | 518 +++++++++++++++++++++++

Rename to main.c.

> 5 files changed, 538 insertions(+)
> create mode 100644 drivers/vfio/pci/qat/Kconfig
> create mode 100644 drivers/vfio/pci/qat/Makefile
> create mode 100644 drivers/vfio/pci/qat/qat_vfio_pci_main.c
>
> diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
> index f9d0c908e738..47c9773cf0c7 100644
> --- a/drivers/vfio/pci/Kconfig
> +++ b/drivers/vfio/pci/Kconfig
> @@ -59,4 +59,6 @@ source "drivers/vfio/pci/mlx5/Kconfig"
>
> source "drivers/vfio/pci/hisilicon/Kconfig"
>
> +source "drivers/vfio/pci/qat/Kconfig"
> +
> endif
> diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
> index 24c524224da5..dcc6366df8fa 100644
> --- a/drivers/vfio/pci/Makefile
> +++ b/drivers/vfio/pci/Makefile
> @@ -11,3 +11,4 @@ obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
> obj-$(CONFIG_MLX5_VFIO_PCI) += mlx5/
>
> obj-$(CONFIG_HISI_ACC_VFIO_PCI) += hisilicon/
> +obj-$(CONFIG_QAT_VFIO_PCI) += qat/
> diff --git a/drivers/vfio/pci/qat/Kconfig b/drivers/vfio/pci/qat/Kconfig
> new file mode 100644
> index 000000000000..38e5b4a0ca9c
> --- /dev/null
> +++ b/drivers/vfio/pci/qat/Kconfig
> @@ -0,0 +1,13 @@
> +# SPDX-License-Identifier: GPL-2.0-only
> +config QAT_VFIO_PCI
> + tristate "VFIO support for QAT VF PCI devices"
> + depends on X86

What specific X86 dependency exists here? CRYPTO_DEV_QAT and the
various versions of the QAT driver don't seem to have an explicit arch
dependency, therefore this shouldn't either.

> + depends on VFIO_PCI_CORE

select VFIO_PCI_CORE, this was updated for all vfio-pci variant drivers
for v6.5.

> + depends on CRYPTO_DEV_QAT
> + help
> + This provides migration support for Intel(R) QAT Virtual Function
> + using the VFIO framework.
> +
> + To compile this as a module, choose M here: the module
> + will be called qat_vfio_pci. If you don't know what to do here,
> + say N.
> diff --git a/drivers/vfio/pci/qat/Makefile b/drivers/vfio/pci/qat/Makefile
> new file mode 100644
> index 000000000000..106791887b91
> --- /dev/null
> +++ b/drivers/vfio/pci/qat/Makefile
> @@ -0,0 +1,4 @@
> +# SPDX-License-Identifier: GPL-2.0-only
> +obj-$(CONFIG_QAT_VFIO_PCI) += qat_vfio_pci.o
> +qat_vfio_pci-y := qat_vfio_pci_main.o
> +
> diff --git a/drivers/vfio/pci/qat/qat_vfio_pci_main.c b/drivers/vfio/pci/qat/qat_vfio_pci_main.c
> new file mode 100644
> index 000000000000..af971fd05fd2
> --- /dev/null
> +++ b/drivers/vfio/pci/qat/qat_vfio_pci_main.c
> @@ -0,0 +1,518 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/* Copyright(c) 2023 Intel Corporation */
> +#include <linux/anon_inodes.h>
> +#include <linux/container_of.h>
> +#include <linux/device.h>
> +#include <linux/file.h>
> +#include <linux/init.h>
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/mutex.h>
> +#include <linux/pci.h>
> +#include <linux/sizes.h>
> +#include <linux/types.h>
> +#include <linux/uaccess.h>
> +#include <linux/vfio_pci_core.h>
> +#include <linux/qat/qat_vf_mig.h>
> +
> +struct qat_vf_mig_data {
> + u8 state[SZ_4K];
> +};
> +
> +struct qat_vf_migration_file {
> + struct file *filp;
> + struct mutex lock; /* protect migration region context */
> + bool disabled;
> +
> + size_t total_length;
> + struct qat_vf_mig_data mig_data;
> +};
> +
> +struct qat_vf_core_device {
> + struct vfio_pci_core_device core_device;
> + struct pci_dev *parent;
> + int vf_id;
> +
> + struct mutex state_mutex; /* protect migration state */
> + enum vfio_device_mig_state mig_state;
> + struct qat_vf_migration_file *resuming_migf;
> + struct qat_vf_migration_file *saving_migf;
> +};
> +
> +static int qat_vf_init(struct qat_vf_core_device *qat_vdev)
> +{
> + return qat_vfmig_init_device(qat_vdev->parent, qat_vdev->vf_id);
> +}
> +
> +static void qat_vf_cleanup(struct qat_vf_core_device *qat_vdev)
> +{
> + qat_vfmig_shutdown_device(qat_vdev->parent, qat_vdev->vf_id);
> +}
> +
> +static int qat_vf_pci_open_device(struct vfio_device *core_vdev)
> +{
> + int ret;
> + struct qat_vf_core_device *qat_vdev =
> + container_of(core_vdev, struct qat_vf_core_device,
> + core_device.vdev);
> + struct vfio_pci_core_device *vdev = &qat_vdev->core_device;
> +
> + ret = vfio_pci_core_enable(vdev);
> + if (ret)
> + return ret;
> +
> + ret = qat_vf_init(qat_vdev);
> + if (ret) {
> + vfio_pci_core_disable(vdev);
> + return ret;
> + }
> + qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
> +
> + vfio_pci_core_finish_enable(vdev);
> +
> + return 0;
> +}
> +
> +static void qat_vf_disable_fd(struct qat_vf_migration_file *migf)
> +{
> + mutex_lock(&migf->lock);
> + migf->disabled = true;
> + migf->total_length = 0;
> + migf->filp->f_pos = 0;
> + mutex_unlock(&migf->lock);
> +}
> +
> +static void qat_vf_disable_fds(struct qat_vf_core_device *qat_vdev)
> +{
> + if (qat_vdev->resuming_migf) {
> + qat_vf_disable_fd(qat_vdev->resuming_migf);
> + fput(qat_vdev->resuming_migf->filp);
> + qat_vdev->resuming_migf = NULL;
> + }
> +
> + if (qat_vdev->saving_migf) {
> + qat_vf_disable_fd(qat_vdev->saving_migf);
> + fput(qat_vdev->saving_migf->filp);
> + qat_vdev->saving_migf = NULL;
> + }
> +}
> +
> +static void qat_vf_pci_close_device(struct vfio_device *core_vdev)
> +{
> + struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
> + struct qat_vf_core_device, core_device.vdev);
> +
> + qat_vf_cleanup(qat_vdev);
> + qat_vf_disable_fds(qat_vdev);
> + vfio_pci_core_close_device(core_vdev);
> +}
> +
> +static int qat_vf_stop_device(struct qat_vf_core_device *qat_vdev)
> +{
> + return qat_vfmig_suspend_device(qat_vdev->parent, qat_vdev->vf_id);
> +}
> +
> +static ssize_t qat_vf_save_read(struct file *filp, char __user *buf,
> + size_t len, loff_t *pos)
> +{
> + struct qat_vf_migration_file *migf = filp->private_data;
> + ssize_t done = 0;
> + loff_t *offs;
> + int ret;
> +
> + if (pos)
> + return -ESPIPE;
> + offs = &filp->f_pos;
> +
> + mutex_lock(&migf->lock);
> + if (*offs > migf->total_length || *offs < 0) {
> + done = -EINVAL;
> + goto out_unlock;
> + }
> +
> + if (migf->disabled) {
> + done = -ENODEV;
> + goto out_unlock;
> + }
> +
> + len = min_t(size_t, migf->total_length - *offs, len);
> + if (len) {
> + ret = copy_to_user(buf, &migf->mig_data + *offs, len);
> + if (ret) {
> + done = -EFAULT;
> + goto out_unlock;
> + }
> + *offs += len;
> + done = len;
> + }
> +
> +out_unlock:
> + mutex_unlock(&migf->lock);
> + return done;
> +}
> +
> +static int qat_vf_release_file(struct inode *inode, struct file *filp)
> +{
> + struct qat_vf_migration_file *migf = filp->private_data;
> +
> + qat_vf_disable_fd(migf);
> + mutex_destroy(&migf->lock);
> + kfree(migf);
> +
> + return 0;
> +}
> +
> +static const struct file_operations qat_vf_save_fops = {
> + .owner = THIS_MODULE,
> + .read = qat_vf_save_read,
> + .release = qat_vf_release_file,
> + .llseek = no_llseek,
> +};
> +
> +static int qat_vf_save_state(struct qat_vf_core_device *qat_vdev,
> + struct qat_vf_migration_file *migf)
> +{
> + struct qat_vf_mig_data *mig_data = &migf->mig_data;
> + int ret;
> +
> + ret = qat_vfmig_save_state(qat_vdev->parent, qat_vdev->vf_id,
> + mig_data->state,
> + sizeof(mig_data->state));
> + if (ret)
> + return ret;
> +
> + migf->total_length = sizeof(struct qat_vf_mig_data);
> +
> + return 0;
> +}
> +
> +static struct qat_vf_migration_file *
> +qat_vf_save_device_data(struct qat_vf_core_device *qat_vdev)
> +{
> + struct qat_vf_migration_file *migf;
> + int ret;
> +
> + migf = kzalloc(sizeof(*migf), GFP_KERNEL);
> + if (!migf)
> + return ERR_PTR(-ENOMEM);
> +
> + migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_save_fops,
> + migf, O_RDONLY);
> + ret = PTR_ERR_OR_ZERO(migf->filp);
> + if (ret) {
> + kfree(migf);
> + return ERR_PTR(ret);
> + }
> +
> + stream_open(migf->filp->f_inode, migf->filp);
> + mutex_init(&migf->lock);
> +
> + ret = qat_vf_save_state(qat_vdev, migf);
> + if (ret) {
> + fput(migf->filp);
> + kfree(migf);
> + return ERR_PTR(ret);
> + }
> +
> + return migf;
> +}
> +
> +static ssize_t qat_vf_resume_write(struct file *filp, const char __user *buf,
> + size_t len, loff_t *pos)
> +{
> + struct qat_vf_migration_file *migf = filp->private_data;
> + loff_t requested_length;
> + ssize_t done = 0;
> + loff_t *offs;
> + int ret;
> +
> + if (pos)
> + return -ESPIPE;
> + offs = &filp->f_pos;
> +
> + if (*offs < 0 ||
> + check_add_overflow((loff_t)len, *offs, &requested_length))
> + return -EOVERFLOW;
> +
> + if (requested_length > sizeof(struct qat_vf_mig_data))
> + return -ENOMEM;
> +
> + mutex_lock(&migf->lock);
> + if (migf->disabled) {
> + done = -ENODEV;
> + goto out_unlock;
> + }
> +
> + ret = copy_from_user(&migf->mig_data + *offs, buf, len);
> + if (ret) {
> + done = -EFAULT;
> + goto out_unlock;
> + }
> + *offs += len;
> + done = len;
> + migf->total_length += len;
> +
> +out_unlock:
> + mutex_unlock(&migf->lock);
> + return done;
> +}
> +
> +static const struct file_operations qat_vf_resume_fops = {
> + .owner = THIS_MODULE,
> + .write = qat_vf_resume_write,
> + .release = qat_vf_release_file,
> + .llseek = no_llseek,
> +};
> +
> +static struct qat_vf_migration_file *
> +qat_vf_resume_device_data(struct qat_vf_core_device *qat_vdev)
> +{
> + struct qat_vf_migration_file *migf;
> + int ret;
> +
> + migf = kzalloc(sizeof(*migf), GFP_KERNEL);
> + if (!migf)
> + return ERR_PTR(-ENOMEM);
> +
> + migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_resume_fops,
> + migf, O_WRONLY);
> + ret = PTR_ERR_OR_ZERO(migf->filp);
> + if (ret) {
> + kfree(migf);
> + return ERR_PTR(ret);
> + }
> + stream_open(migf->filp->f_inode, migf->filp);
> + mutex_init(&migf->lock);
> +
> + return migf;
> +}
> +
> +static int qat_vf_load_state(struct qat_vf_core_device *qat_vdev,
> + struct qat_vf_migration_file *migf)
> +{
> + struct qat_vf_mig_data *mig_data = &migf->mig_data;
> +
> + return qat_vfmig_load_state(qat_vdev->parent, qat_vdev->vf_id,
> + mig_data->state,
> + sizeof(mig_data->state));
> +}
> +
> +static int qat_vf_load_device_data(struct qat_vf_core_device *qat_vdev)
> +{
> + return qat_vf_load_state(qat_vdev, qat_vdev->resuming_migf);
> +}
> +
> +static int qat_vf_start_device(struct qat_vf_core_device *qat_vdev)
> +{
> + return qat_vfmig_resume_device(qat_vdev->parent, qat_vdev->vf_id);
> +}
> +
> +static struct file *qat_vf_pci_step_device_state(struct qat_vf_core_device *qat_vdev, u32 new)
> +{
> + u32 cur = qat_vdev->mig_state;
> + int ret;
> +
> + if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_STOP) {
> + ret = qat_vf_stop_device(qat_vdev);
> + if (ret)
> + return ERR_PTR(ret);
> + return NULL;
> + }
> +
> + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) {
> + struct qat_vf_migration_file *migf;
> +
> + migf = qat_vf_save_device_data(qat_vdev);
> + if (IS_ERR(migf))
> + return ERR_CAST(migf);
> + get_file(migf->filp);
> + qat_vdev->saving_migf = migf;
> + return migf->filp;
> + }
> +
> + if (cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) {
> + qat_vf_disable_fds(qat_vdev);
> + return NULL;
> + }
> +
> + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) {
> + struct qat_vf_migration_file *migf;
> +
> + migf = qat_vf_resume_device_data(qat_vdev);
> + if (IS_ERR(migf))
> + return ERR_CAST(migf);
> + get_file(migf->filp);
> + qat_vdev->resuming_migf = migf;
> + return migf->filp;
> + }
> +
> + if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) {
> + ret = qat_vf_load_device_data(qat_vdev);
> + if (ret)
> + return ERR_PTR(ret);
> +
> + qat_vf_disable_fds(qat_vdev);
> + return NULL;
> + }
> +
> + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING) {
> + qat_vf_start_device(qat_vdev);
> + return NULL;
> + }
> +
> + /* vfio_mig_get_next_state() does not use arcs other than the above */
> + WARN_ON(true);
> + return ERR_PTR(-EINVAL);
> +}
> +
> +static struct file *qat_vf_pci_set_device_state(struct vfio_device *vdev,
> + enum vfio_device_mig_state new_state)
> +{
> + struct qat_vf_core_device *qat_vdev = container_of(vdev,
> + struct qat_vf_core_device, core_device.vdev);
> + enum vfio_device_mig_state next_state;
> + struct file *res = NULL;
> + int ret;
> +
> + mutex_lock(&qat_vdev->state_mutex);
> + while (new_state != qat_vdev->mig_state) {
> + ret = vfio_mig_get_next_state(vdev, qat_vdev->mig_state,
> + new_state, &next_state);
> + if (ret) {
> + res = ERR_PTR(-EINVAL);
> + break;
> + }
> +
> + res = qat_vf_pci_step_device_state(qat_vdev, next_state);
> + if (IS_ERR(res))
> + break;
> + qat_vdev->mig_state = next_state;
> + if (WARN_ON(res && new_state != qat_vdev->mig_state)) {
> + fput(res);
> + res = ERR_PTR(-EINVAL);
> + break;
> + }
> + }
> + mutex_unlock(&qat_vdev->state_mutex);
> +
> + return res;
> +}
> +
> +static int qat_vf_pci_get_device_state(struct vfio_device *vdev,
> + enum vfio_device_mig_state *curr_state)
> +{
> + struct qat_vf_core_device *qat_vdev = container_of(vdev,
> + struct qat_vf_core_device, core_device.vdev);
> +
> + mutex_lock(&qat_vdev->state_mutex);
> + *curr_state = qat_vdev->mig_state;
> + mutex_unlock(&qat_vdev->state_mutex);
> +
> + return 0;
> +}
> +
> +static int qat_vf_pci_get_data_size(struct vfio_device *vdev,
> + unsigned long *stop_copy_length)
> +{
> + *stop_copy_length = sizeof(struct qat_vf_mig_data);
> + return 0;
> +}
> +
> +static const struct vfio_migration_ops qat_vf_pci_mig_ops = {
> + .migration_set_state = qat_vf_pci_set_device_state,
> + .migration_get_state = qat_vf_pci_get_device_state,
> + .migration_get_data_size = qat_vf_pci_get_data_size,
> +};
> +
> +static int qat_vf_pci_init_dev(struct vfio_device *core_vdev)
> +{
> + struct qat_vf_core_device *qat_vdev = container_of(core_vdev,
> + struct qat_vf_core_device, core_device.vdev);
> +
> + mutex_init(&qat_vdev->state_mutex);
> +
> + core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY;
> + core_vdev->mig_ops = &qat_vf_pci_mig_ops;
> +
> + return vfio_pci_core_init_dev(core_vdev);
> +}
> +
> +static const struct vfio_device_ops qat_vf_pci_ops = {
> + .name = "qat-vf-vfio-pci",
> + .init = qat_vf_pci_init_dev,
> + .release = vfio_pci_core_release_dev,
> + .open_device = qat_vf_pci_open_device,
> + .close_device = qat_vf_pci_close_device,
> + .ioctl = vfio_pci_core_ioctl,
> + .read = vfio_pci_core_read,
> + .write = vfio_pci_core_write,
> + .mmap = vfio_pci_core_mmap,
> + .request = vfio_pci_core_request,
> + .match = vfio_pci_core_match,
> + .bind_iommufd = vfio_iommufd_physical_bind,
> + .unbind_iommufd = vfio_iommufd_physical_unbind,
> + .attach_ioas = vfio_iommufd_physical_attach_ioas,
> +};
> +
> +static int
> +qat_vf_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
> +{
> + struct device *dev = &pdev->dev;
> + struct qat_vf_core_device *qat_vdev;
> + int ret;
> +
> + qat_vdev = vfio_alloc_device(qat_vf_core_device, core_device.vdev, dev, &qat_vf_pci_ops);
> + if (IS_ERR(qat_vdev))
> + return PTR_ERR(qat_vdev);
> +
> + qat_vdev->vf_id = pci_iov_vf_id(pdev);
> + qat_vdev->parent = pdev->physfn;
> + if (!qat_vdev->parent || qat_vdev->vf_id < 0)
> + return -EINVAL;
> +
> + pci_set_drvdata(pdev, &qat_vdev->core_device);
> + ret = vfio_pci_core_register_device(&qat_vdev->core_device);
> + if (ret)
> + goto out_put_device;
> +
> + return 0;
> +
> +out_put_device:
> + vfio_put_device(&qat_vdev->core_device.vdev);
> + return ret;
> +}
> +
> +static struct qat_vf_core_device *qat_vf_drvdata(struct pci_dev *pdev)
> +{
> + struct vfio_pci_core_device *core_device = pci_get_drvdata(pdev);
> +
> + return container_of(core_device, struct qat_vf_core_device, core_device);
> +}
> +
> +static void qat_vf_vfio_pci_remove(struct pci_dev *pdev)
> +{
> + struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev);
> +
> + vfio_pci_core_unregister_device(&qat_vdev->core_device);
> + vfio_put_device(&qat_vdev->core_device.vdev);
> +}
> +
> +static const struct pci_device_id qat_vf_vfio_pci_table[] = {
> + /* Intel QAT GEN4 4xxx VF device */
> + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4941) },

Should this driver depend on CRYPTO_DEV_QAT_4XXX if that's the only
supported PF driver?

> + {}
> +};
> +MODULE_DEVICE_TABLE(pci, qat_vf_vfio_pci_table);
> +
> +static struct pci_driver qat_vf_vfio_pci_driver = {
> + .name = "qat_vfio_pci",
> + .id_table = qat_vf_vfio_pci_table,
> + .probe = qat_vf_vfio_pci_probe,
> + .remove = qat_vf_vfio_pci_remove,
> + .driver_managed_dma = true,
> +};
> +module_pci_driver(qat_vf_vfio_pci_driver)
> +
> +MODULE_LICENSE("GPL");
> +MODULE_AUTHOR("Intel Corporation");
> +MODULE_DESCRIPTION("QAT VFIO PCI - VFIO PCI driver with live migration support for Intel(R) QAT device family");

Or at least one version of the QAT device family ;) Thanks,

Alex


2023-08-04 08:30:20

by Tian, Kevin

[permalink] [raw]
Subject: RE: [RFC 4/5] crypto: qat - implement interface for live migration

> From: Xin Zeng <[email protected]>
> Sent: Friday, June 30, 2023 9:13 PM
>
> Add logic to implement interface for live migration for QAT GEN4 Virtual
> Functions (VFs).
> This introduces a migration data manager which is used to hold the
> device state during migration.
>
> The VF state is organized in a section hierarchy, as reported below:
> preamble | general state section | leaf state
> | MISC bar state section| leaf state
> | ETR bar state section | bank0 state section | leaf state
> | bank1 state section | leaf state
> | bank2 state section | leaf state
> | bank3 state section | leaf state
>
> Co-developed-by: Siming Wan <[email protected]>
> Signed-off-by: Siming Wan <[email protected]>
> Signed-off-by: Xin Zeng <[email protected]>

this is a big patch. Need reviewed-by from qat/crypto maintainers.

2023-08-04 08:41:34

by Tian, Kevin

[permalink] [raw]
Subject: RE: [RFC 1/5] crypto: qat - add bank save/restore and RP drain

> From: Xin Zeng <[email protected]>
> Sent: Friday, June 30, 2023 9:13 PM
> ---
> .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 5 +-
> .../intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c | 2 +-
> .../qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c | 2 +-
> .../intel/qat/qat_c62x/adf_c62x_hw_data.c | 2 +-
> .../intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c | 2 +-
> .../intel/qat/qat_common/adf_accel_devices.h | 60 ++-
> .../intel/qat/qat_common/adf_gen2_hw_data.c | 17 +-
> .../intel/qat/qat_common/adf_gen2_hw_data.h | 10 +-
> .../intel/qat/qat_common/adf_gen4_hw_data.c | 362 +++++++++++++++++-
> .../intel/qat/qat_common/adf_gen4_hw_data.h | 131 ++++++-
> .../intel/qat/qat_common/adf_transport.c | 11 +-
> .../crypto/intel/qat/qat_common/adf_vf_isr.c | 2 +-
> .../qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 2 +-
> .../qat_dh895xccvf/adf_dh895xccvf_hw_data.c | 2 +-
> 14 files changed, 584 insertions(+), 26 deletions(-)

this could be split into 3 patches.

one is moving from hw_data->csr_ops to hw_data->csr_info. apply to
all qat drivers.

the 2nd is adding new csr_ops.

the last one then covers bank save/restore.

> +
> +#define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2)
> +#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0)
> +static int gen4_bank_state_restore(void __iomem *csr, u32 bank_number,
> + struct bank_state *state, u32 num_rings,
> + int tx_rx_gap)
> +{

restore is the most tricky part. it's worth of some comments to help
understand the flow, e.g. what is rx/tx layout, why there are multiple
ring tails writes, etc.

> + u32 val, tmp_val, i;
> +
> + write_csr_ring_srv_arb_en(csr, bank_number, 0);
> +
> + for (i = 0; i < num_rings; i++)
> + write_csr_ring_base(csr, bank_number, i, state-
> >rings[i].base);
> +
> + for (i = 0; i < num_rings; i++)
> + write_csr_ring_config(csr, bank_number, i, state-
> >rings[i].config);
> +
> + for (i = 0; i < num_rings / 2; i++) {
> + int tx = i * (tx_rx_gap + 1);
> + int rx = tx + tx_rx_gap;
> + u32 tx_idx = tx / ADF_RINGS_PER_INT_SRCSEL;
> + u32 rx_idx = rx / ADF_RINGS_PER_INT_SRCSEL;
> +
> + write_csr_ring_head(csr, bank_number, tx, state-
> >rings[tx].head);
> +
> + write_csr_ring_tail(csr, bank_number, tx, state->rings[tx].tail);
> +
> + if (state->ringestat & (BIT(tx))) {
> + val = read_csr_int_srcsel(csr, bank_number, tx_idx);
> + val |= (ADF_RP_INT_SRC_SEL_F_RISE_MASK << (8 *
> tx));
> + write_csr_int_srcsel(csr, bank_number, tx_idx, val);
> + write_csr_ring_head(csr, bank_number, tx, state-
> >rings[tx].head);
> + }
> +
> + write_csr_ring_tail(csr, bank_number, rx, state-
> >rings[rx].tail);
> +
> + val = read_csr_int_srcsel(csr, bank_number, rx_idx);
> + val |= (ADF_RP_INT_SRC_SEL_F_RISE_MASK << (8 * rx));
> + write_csr_int_srcsel(csr, bank_number, rx_idx, val);
> +
> + write_csr_ring_head(csr, bank_number, rx, state-
> >rings[rx].head);
> +
> + val = read_csr_int_srcsel(csr, bank_number, rx_idx);
> + val |= (ADF_RP_INT_SRC_SEL_F_FALL_MASK << (8 * rx));
> + write_csr_int_srcsel(csr, bank_number, rx_idx, val);
> +
> + if (state->ringfstat & BIT(rx))
> + write_csr_ring_tail(csr, bank_number, rx, state-
> >rings[rx].tail);
> + }
> +
> + write_csr_int_flag_and_col(csr, bank_number, state-
> >iaintflagandcolen);
> + write_csr_int_en(csr, bank_number, state->iaintflagen);
> + write_csr_int_col_en(csr, bank_number, state->iaintcolen);
> + write_csr_int_srcsel(csr, bank_number, 0, state->iaintflagsrcsel0);
> + write_csr_exp_int_en(csr, bank_number, state->ringexpintenable);
> + write_csr_int_col_ctl(csr, bank_number, state->iaintcolctl);
> +
> + /* Check that all ring statuses are restored into a saved state. */
> + tmp_val = read_csr_stat(csr, bank_number);
> + val = state->ringstat0;
> + if (tmp_val != val) {
> + pr_err("Fail to restore ringstat register. Expected 0x%x, but
> actual is 0x%x\n",
> + tmp_val, val);
> + return -EINVAL;
> + }
> +
> + tmp_val = read_csr_e_stat(csr, bank_number);
> + val = state->ringestat;
> + if (tmp_val != val) {
> + pr_err("Fail to restore ringestat register. Expected 0x%x, but
> actual is 0x%x\n",
> + tmp_val, val);
> + return -EINVAL;
> + }
> +
> + tmp_val = read_csr_ne_stat(csr, bank_number);
> + val = state->ringnestat;
> + if (tmp_val != val) {
> + pr_err("Fail to restore ringnestat register. Expected 0x%x, but
> actual is 0x%x\n",
> + tmp_val, val);
> + return -EINVAL;
> + }
> +
> + tmp_val = read_csr_nf_stat(csr, bank_number);
> + val = state->ringnfstat;
> + if (tmp_val != val) {
> + pr_err("Fail to restore ringnfstat register. Expected 0x%x, but
> actual is 0x%x\n",
> + tmp_val, val);
> + return -EINVAL;
> + }
> +
> + tmp_val = read_csr_f_stat(csr, bank_number);
> + val = state->ringfstat;
> + if (tmp_val != val) {
> + pr_err("Fail to restore ringfstat register. Expected 0x%x, but
> actual is 0x%x\n",
> + tmp_val, val);
> + return -EINVAL;
> + }
> +
> + tmp_val = read_csr_c_stat(csr, bank_number);
> + val = state->ringcstat0;
> + if (tmp_val != val) {
> + pr_err("Fail to restore ringcstat register. Expected 0x%x, but
> actual is 0x%x\n",
> + tmp_val, val);
> + return -EINVAL;
> + }
> +
> + tmp_val = read_csr_exp_stat(csr, bank_number);
> + val = state->ringexpstat;
> + if (tmp_val && !val) {
> + pr_err("Bank was restored with exception: 0x%x\n", val);
> + return -EINVAL;
> + }

above checks could be wrapped in macros.


2023-08-04 08:47:06

by Tian, Kevin

[permalink] [raw]
Subject: RE: [RFC 5/5] vfio/qat: Add vfio_pci driver for Intel QAT VF devices

> From: Xin Zeng <[email protected]>
> Sent: Friday, June 30, 2023 9:13 PM
>
> Add vfio pci driver for Intel QAT VF devices.
>
> This driver uses vfio_pci_core to register to the VFIO subsystem. It
> acts as a vfio agent and interacts with the QAT PF driver to implement
> VF live migration.
>

this lacks of P2P support and .err_handler.


2023-08-04 08:57:01

by Tian, Kevin

[permalink] [raw]
Subject: RE: [RFC 2/5] crypto: qat - add interface for live migration

> From: Xin Zeng <[email protected]>
> Sent: Friday, June 30, 2023 9:13 PM
> +
> +int qat_vfmig_suspend_device(struct pci_dev *pdev, u32 vf_nr)
> +{
> + struct adf_accel_dev *accel_dev =
> adf_devmgr_pci_to_accel_dev(pdev);
> +
> + if (!accel_dev) {
> + dev_err(&pdev->dev, "Failed to find accel_dev\n");
> + return -ENODEV;
> + }
> +
> + if (WARN_ON(!GET_VFMIG_OPS(accel_dev)->suspend_device))
> + return -EINVAL;

this and other warns should be done one-off at device registration point
instead of letting it triggerable by every user ioctl.


2023-08-11 09:17:54

by Wan, Siming

[permalink] [raw]
Subject: RE: [RFC 1/5] crypto: qat - add bank save/restore and RP drain

-----Original Message-----
From: Tian, Kevin <[email protected]>
Sent: Friday, August 4, 2023 3:51 PM
To: Zeng, Xin <[email protected]>; [email protected]; [email protected]
Cc: Cabiddu, Giovanni <[email protected]>; [email protected]; Wan, Siming <[email protected]>; Pankratov, Svyatoslav <[email protected]>; Zeng, Xin <[email protected]>
Subject: RE: [RFC 1/5] crypto: qat - add bank save/restore and RP drain

> From: Xin Zeng <[email protected]>
> Sent: Friday, June 30, 2023 9:13 PM
> ---
> .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 5 +-
> .../intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c | 2 +-
> .../qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c | 2 +-
> .../intel/qat/qat_c62x/adf_c62x_hw_data.c | 2 +-
> .../intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c | 2 +-
> .../intel/qat/qat_common/adf_accel_devices.h | 60 ++-
> .../intel/qat/qat_common/adf_gen2_hw_data.c | 17 +-
> .../intel/qat/qat_common/adf_gen2_hw_data.h | 10 +-
> .../intel/qat/qat_common/adf_gen4_hw_data.c | 362 +++++++++++++++++-
> .../intel/qat/qat_common/adf_gen4_hw_data.h | 131 ++++++-
> .../intel/qat/qat_common/adf_transport.c | 11 +-
> .../crypto/intel/qat/qat_common/adf_vf_isr.c | 2 +-
> .../qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 2 +-
> .../qat_dh895xccvf/adf_dh895xccvf_hw_data.c | 2 +-
> 14 files changed, 584 insertions(+), 26 deletions(-)

this could be split into 3 patches.

one is moving from hw_data->csr_ops to hw_data->csr_info. apply to all qat drivers.

the 2nd is adding new csr_ops.

the last one then covers bank save/restore.
->Will split it, thanks.

> +
> +#define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2) #define
> +ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0) static int
> +gen4_bank_state_restore(void __iomem *csr, u32 bank_number,
> + struct bank_state *state, u32 num_rings,
> + int tx_rx_gap)
> +{

restore is the most tricky part. it's worth of some comments to help understand the flow, e.g. what is rx/tx layout, why there are multiple ring tails writes, etc.
->Will add some comments to explain.

> + u32 val, tmp_val, i;
> +
> + write_csr_ring_srv_arb_en(csr, bank_number, 0);
> +
> + for (i = 0; i < num_rings; i++)
> + write_csr_ring_base(csr, bank_number, i, state-
> >rings[i].base);
> +
> + for (i = 0; i < num_rings; i++)
> + write_csr_ring_config(csr, bank_number, i, state-
> >rings[i].config);
> +
> + for (i = 0; i < num_rings / 2; i++) {
> + int tx = i * (tx_rx_gap + 1);
> + int rx = tx + tx_rx_gap;
> + u32 tx_idx = tx / ADF_RINGS_PER_INT_SRCSEL;
> + u32 rx_idx = rx / ADF_RINGS_PER_INT_SRCSEL;
> +
> + write_csr_ring_head(csr, bank_number, tx, state-
> >rings[tx].head);
> +
> + write_csr_ring_tail(csr, bank_number, tx, state->rings[tx].tail);
> +
> + if (state->ringestat & (BIT(tx))) {
> + val = read_csr_int_srcsel(csr, bank_number, tx_idx);
> + val |= (ADF_RP_INT_SRC_SEL_F_RISE_MASK << (8 *
> tx));
> + write_csr_int_srcsel(csr, bank_number, tx_idx, val);
> + write_csr_ring_head(csr, bank_number, tx, state-
> >rings[tx].head);
> + }
> +
> + write_csr_ring_tail(csr, bank_number, rx, state-
> >rings[rx].tail);
> +
> + val = read_csr_int_srcsel(csr, bank_number, rx_idx);
> + val |= (ADF_RP_INT_SRC_SEL_F_RISE_MASK << (8 * rx));
> + write_csr_int_srcsel(csr, bank_number, rx_idx, val);
> +
> + write_csr_ring_head(csr, bank_number, rx, state-
> >rings[rx].head);
> +
> + val = read_csr_int_srcsel(csr, bank_number, rx_idx);
> + val |= (ADF_RP_INT_SRC_SEL_F_FALL_MASK << (8 * rx));
> + write_csr_int_srcsel(csr, bank_number, rx_idx, val);
> +
> + if (state->ringfstat & BIT(rx))
> + write_csr_ring_tail(csr, bank_number, rx, state-
> >rings[rx].tail);
> + }
> +
> + write_csr_int_flag_and_col(csr, bank_number, state-
> >iaintflagandcolen);
> + write_csr_int_en(csr, bank_number, state->iaintflagen);
> + write_csr_int_col_en(csr, bank_number, state->iaintcolen);
> + write_csr_int_srcsel(csr, bank_number, 0, state->iaintflagsrcsel0);
> + write_csr_exp_int_en(csr, bank_number, state->ringexpintenable);
> + write_csr_int_col_ctl(csr, bank_number, state->iaintcolctl);
> +
> + /* Check that all ring statuses are restored into a saved state. */
> + tmp_val = read_csr_stat(csr, bank_number);
> + val = state->ringstat0;
> + if (tmp_val != val) {
> + pr_err("Fail to restore ringstat register. Expected 0x%x, but
> actual is 0x%x\n",
> + tmp_val, val);
> + return -EINVAL;
> + }
> +
> + tmp_val = read_csr_e_stat(csr, bank_number);
> + val = state->ringestat;
> + if (tmp_val != val) {
> + pr_err("Fail to restore ringestat register. Expected 0x%x, but
> actual is 0x%x\n",
> + tmp_val, val);
> + return -EINVAL;
> + }
> +
> + tmp_val = read_csr_ne_stat(csr, bank_number);
> + val = state->ringnestat;
> + if (tmp_val != val) {
> + pr_err("Fail to restore ringnestat register. Expected 0x%x, but
> actual is 0x%x\n",
> + tmp_val, val);
> + return -EINVAL;
> + }
> +
> + tmp_val = read_csr_nf_stat(csr, bank_number);
> + val = state->ringnfstat;
> + if (tmp_val != val) {
> + pr_err("Fail to restore ringnfstat register. Expected 0x%x, but
> actual is 0x%x\n",
> + tmp_val, val);
> + return -EINVAL;
> + }
> +
> + tmp_val = read_csr_f_stat(csr, bank_number);
> + val = state->ringfstat;
> + if (tmp_val != val) {
> + pr_err("Fail to restore ringfstat register. Expected 0x%x, but
> actual is 0x%x\n",
> + tmp_val, val);
> + return -EINVAL;
> + }
> +
> + tmp_val = read_csr_c_stat(csr, bank_number);
> + val = state->ringcstat0;
> + if (tmp_val != val) {
> + pr_err("Fail to restore ringcstat register. Expected 0x%x, but
> actual is 0x%x\n",
> + tmp_val, val);
> + return -EINVAL;
> + }
> +
> + tmp_val = read_csr_exp_stat(csr, bank_number);
> + val = state->ringexpstat;
> + if (tmp_val && !val) {
> + pr_err("Bank was restored with exception: 0x%x\n", val);
> + return -EINVAL;
> + }

above checks could be wrapped in macros.
->If define macro as below, checkpatch.pl will appear warning-> WARNING: Macros with flow control statements should be avoided.
So do we still need to change?
#define CHECK_STAT(expect, actual, csr_name) \
do { \
u32 _expect = expect; \
u32 _actual = actual; \
char *_csr_name = csr_name; \
if (_expect != _actual) { \
pr_err("Fail to restore %s register. Expected 0x%x, but actual is 0x%x\n", \
_csr_name, _expect, _actual); \
return -EINVAL; \
} \
} while (0)