From: Srinivasa Rao Mandadapu <[email protected]>
Update regmap configuration for supporting headset playback and
capture and DMIC capture using codec dma interface
Signed-off-by: Srinivasa Rao Mandadapu <[email protected]>
Co-developed-by: Venkata Prasad Potturu <[email protected]>
Signed-off-by: Venkata Prasad Potturu <[email protected]>
---
sound/soc/qcom/lpass-cpu.c | 185 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 185 insertions(+)
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index 81818f0..a5a46bc 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -28,6 +28,8 @@
#define LPASS_CPU_I2S_SD2_3_MASK GENMASK(3, 2)
#define LPASS_CPU_I2S_SD0_1_2_MASK GENMASK(2, 0)
#define LPASS_CPU_I2S_SD0_1_2_3_MASK GENMASK(3, 0)
+#define LPASS_REG_READ 1
+#define LPASS_REG_WRITE 0
/*
* Channel maps for Quad channel playbacks on MI2S Secondary
@@ -798,6 +800,189 @@ static struct regmap_config lpass_hdmi_regmap_config = {
.cache_type = REGCACHE_FLAT,
};
+static bool __lpass_rxtx_regmap_accessible(struct device *dev, unsigned int reg, bool rw)
+{
+ struct lpass_data *drvdata = dev_get_drvdata(dev);
+ struct lpass_variant *v = drvdata->variant;
+ int i;
+
+ for (i = 0; i < v->rxtx_irq_ports; ++i) {
+ if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i))
+ return true;
+ if (reg == LPAIF_RXTX_IRQEN_REG(v, i))
+ return true;
+ if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i))
+ return true;
+ }
+
+ for (i = 0; i < v->rxtx_rdma_channels; ++i) {
+ if (reg == LPAIF_CDC_RXTX_RDMACTL_REG(v, i, LPASS_CDC_DMA_RX0))
+ return true;
+ if (reg == LPAIF_CDC_RXTX_RDMABASE_REG(v, i, LPASS_CDC_DMA_RX0))
+ return true;
+ if (reg == LPAIF_CDC_RXTX_RDMABUFF_REG(v, i, LPASS_CDC_DMA_RX0))
+ return true;
+ if (rw == LPASS_REG_READ) {
+ if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
+ return true;
+ }
+ if (reg == LPAIF_CDC_RXTX_RDMAPER_REG(v, i, LPASS_CDC_DMA_RX0))
+ return true;
+ if (reg == LPAIF_CDC_RXTX_RDMA_INTF_REG(v, i, LPASS_CDC_DMA_RX0))
+ return true;
+ }
+
+ for (i = 0; i < v->rxtx_wrdma_channels; ++i) {
+ if (reg == LPAIF_CDC_RXTX_WRDMACTL_REG(v, i + v->rxtx_wrdma_channel_start,
+ LPASS_CDC_DMA_TX3))
+ return true;
+ if (reg == LPAIF_CDC_RXTX_WRDMABASE_REG(v, i + v->rxtx_wrdma_channel_start,
+ LPASS_CDC_DMA_TX3))
+ return true;
+ if (reg == LPAIF_CDC_RXTX_WRDMABUFF_REG(v, i + v->rxtx_wrdma_channel_start,
+ LPASS_CDC_DMA_TX3))
+ return true;
+ if (rw == LPASS_REG_READ) {
+ if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
+ return true;
+ }
+ if (reg == LPAIF_CDC_RXTX_WRDMAPER_REG(v, i + v->rxtx_wrdma_channel_start,
+ LPASS_CDC_DMA_TX3))
+ return true;
+ if (reg == LPAIF_CDC_RXTX_WRDMA_INTF_REG(v, i + v->rxtx_wrdma_channel_start,
+ LPASS_CDC_DMA_TX3))
+ return true;
+ }
+ return false;
+}
+
+static bool lpass_rxtx_regmap_writeable(struct device *dev, unsigned int reg)
+{
+ return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_WRITE);
+}
+
+static bool lpass_rxtx_regmap_readable(struct device *dev, unsigned int reg)
+{
+ return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_READ);
+}
+
+static bool lpass_rxtx_regmap_volatile(struct device *dev, unsigned int reg)
+{
+ struct lpass_data *drvdata = dev_get_drvdata(dev);
+ struct lpass_variant *v = drvdata->variant;
+ int i;
+
+ for (i = 0; i < v->rxtx_irq_ports; ++i) {
+ if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i))
+ return true;
+ if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i))
+ return true;
+ }
+
+ for (i = 0; i < v->rxtx_rdma_channels; ++i)
+ if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
+ return true;
+
+ for (i = 0; i < v->rxtx_wrdma_channels; ++i)
+ if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i + v->rxtx_wrdma_channel_start,
+ LPASS_CDC_DMA_TX3))
+ return true;
+
+ return false;
+}
+
+static bool __lpass_va_regmap_accessible(struct device *dev, unsigned int reg, bool rw)
+{
+ struct lpass_data *drvdata = dev_get_drvdata(dev);
+ struct lpass_variant *v = drvdata->variant;
+ int i;
+
+ for (i = 0; i < v->va_irq_ports; ++i) {
+ if (reg == LPAIF_VA_IRQCLEAR_REG(v, i))
+ return true;
+ if (reg == LPAIF_VA_IRQEN_REG(v, i))
+ return true;
+ if (reg == LPAIF_VA_IRQSTAT_REG(v, i))
+ return true;
+ }
+
+ for (i = 0; i < v->va_wrdma_channels; ++i) {
+ if (reg == LPAIF_CDC_VA_WRDMACTL_REG(v, i + v->va_wrdma_channel_start,
+ LPASS_CDC_DMA_VA_TX0))
+ return true;
+ if (reg == LPAIF_CDC_VA_WRDMABASE_REG(v, i + v->va_wrdma_channel_start,
+ LPASS_CDC_DMA_VA_TX0))
+ return true;
+ if (reg == LPAIF_CDC_VA_WRDMABUFF_REG(v, i + v->va_wrdma_channel_start,
+ LPASS_CDC_DMA_VA_TX0))
+ return true;
+ if (rw == LPASS_REG_READ) {
+ if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start,
+ LPASS_CDC_DMA_VA_TX0))
+ return true;
+ }
+ if (reg == LPAIF_CDC_VA_WRDMAPER_REG(v, i + v->va_wrdma_channel_start,
+ LPASS_CDC_DMA_VA_TX0))
+ return true;
+ if (reg == LPAIF_CDC_VA_WRDMA_INTF_REG(v, i + v->va_wrdma_channel_start,
+ LPASS_CDC_DMA_VA_TX0))
+ return true;
+ }
+ return false;
+}
+
+static bool lpass_va_regmap_writeable(struct device *dev, unsigned int reg)
+{
+ return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_WRITE);
+}
+
+static bool lpass_va_regmap_readable(struct device *dev, unsigned int reg)
+{
+ return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_READ);
+}
+
+static bool lpass_va_regmap_volatile(struct device *dev, unsigned int reg)
+{
+ struct lpass_data *drvdata = dev_get_drvdata(dev);
+ struct lpass_variant *v = drvdata->variant;
+ int i;
+
+ for (i = 0; i < v->va_irq_ports; ++i) {
+ if (reg == LPAIF_VA_IRQCLEAR_REG(v, i))
+ return true;
+ if (reg == LPAIF_VA_IRQSTAT_REG(v, i))
+ return true;
+ }
+
+ for (i = 0; i < v->va_wrdma_channels; ++i) {
+ if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start,
+ LPASS_CDC_DMA_VA_TX0))
+ return true;
+ }
+
+ return false;
+}
+
+static struct regmap_config lpass_rxtx_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .writeable_reg = lpass_rxtx_regmap_writeable,
+ .readable_reg = lpass_rxtx_regmap_readable,
+ .volatile_reg = lpass_rxtx_regmap_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static struct regmap_config lpass_va_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .writeable_reg = lpass_va_regmap_writeable,
+ .readable_reg = lpass_va_regmap_readable,
+ .volatile_reg = lpass_va_regmap_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
static unsigned int of_lpass_cpu_parse_sd_lines(struct device *dev,
struct device_node *node,
const char *name)
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center, Inc.,
is a member of Code Aurora Forum, a Linux Foundation Collaborative Project.
On 02/12/2021 15:43, Srinivasa Rao Mandadapu wrote:
> From: Srinivasa Rao Mandadapu <[email protected]>
>
> Update regmap configuration for supporting headset playback and
> capture and DMIC capture using codec dma interface
>
> Signed-off-by: Srinivasa Rao Mandadapu <[email protected]>
> Co-developed-by: Venkata Prasad Potturu <[email protected]>
> Signed-off-by: Venkata Prasad Potturu <[email protected]>
LGTM,
Reviewed-by: Srinivas Kandagatla <[email protected]>
> ---
> sound/soc/qcom/lpass-cpu.c | 185 +++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 185 insertions(+)
>
> diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
> index 81818f0..a5a46bc 100644
> --- a/sound/soc/qcom/lpass-cpu.c
> +++ b/sound/soc/qcom/lpass-cpu.c
> @@ -28,6 +28,8 @@
> #define LPASS_CPU_I2S_SD2_3_MASK GENMASK(3, 2)
> #define LPASS_CPU_I2S_SD0_1_2_MASK GENMASK(2, 0)
> #define LPASS_CPU_I2S_SD0_1_2_3_MASK GENMASK(3, 0)
> +#define LPASS_REG_READ 1
> +#define LPASS_REG_WRITE 0
>
> /*
> * Channel maps for Quad channel playbacks on MI2S Secondary
> @@ -798,6 +800,189 @@ static struct regmap_config lpass_hdmi_regmap_config = {
> .cache_type = REGCACHE_FLAT,
> };
>
> +static bool __lpass_rxtx_regmap_accessible(struct device *dev, unsigned int reg, bool rw)
> +{
> + struct lpass_data *drvdata = dev_get_drvdata(dev);
> + struct lpass_variant *v = drvdata->variant;
> + int i;
> +
> + for (i = 0; i < v->rxtx_irq_ports; ++i) {
> + if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i))
> + return true;
> + if (reg == LPAIF_RXTX_IRQEN_REG(v, i))
> + return true;
> + if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i))
> + return true;
> + }
> +
> + for (i = 0; i < v->rxtx_rdma_channels; ++i) {
> + if (reg == LPAIF_CDC_RXTX_RDMACTL_REG(v, i, LPASS_CDC_DMA_RX0))
> + return true;
> + if (reg == LPAIF_CDC_RXTX_RDMABASE_REG(v, i, LPASS_CDC_DMA_RX0))
> + return true;
> + if (reg == LPAIF_CDC_RXTX_RDMABUFF_REG(v, i, LPASS_CDC_DMA_RX0))
> + return true;
> + if (rw == LPASS_REG_READ) {
> + if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
> + return true;
> + }
> + if (reg == LPAIF_CDC_RXTX_RDMAPER_REG(v, i, LPASS_CDC_DMA_RX0))
> + return true;
> + if (reg == LPAIF_CDC_RXTX_RDMA_INTF_REG(v, i, LPASS_CDC_DMA_RX0))
> + return true;
> + }
> +
> + for (i = 0; i < v->rxtx_wrdma_channels; ++i) {
> + if (reg == LPAIF_CDC_RXTX_WRDMACTL_REG(v, i + v->rxtx_wrdma_channel_start,
> + LPASS_CDC_DMA_TX3))
> + return true;
> + if (reg == LPAIF_CDC_RXTX_WRDMABASE_REG(v, i + v->rxtx_wrdma_channel_start,
> + LPASS_CDC_DMA_TX3))
> + return true;
> + if (reg == LPAIF_CDC_RXTX_WRDMABUFF_REG(v, i + v->rxtx_wrdma_channel_start,
> + LPASS_CDC_DMA_TX3))
> + return true;
> + if (rw == LPASS_REG_READ) {
> + if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
> + return true;
> + }
> + if (reg == LPAIF_CDC_RXTX_WRDMAPER_REG(v, i + v->rxtx_wrdma_channel_start,
> + LPASS_CDC_DMA_TX3))
> + return true;
> + if (reg == LPAIF_CDC_RXTX_WRDMA_INTF_REG(v, i + v->rxtx_wrdma_channel_start,
> + LPASS_CDC_DMA_TX3))
> + return true;
> + }
> + return false;
> +}
> +
> +static bool lpass_rxtx_regmap_writeable(struct device *dev, unsigned int reg)
> +{
> + return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_WRITE);
> +}
> +
> +static bool lpass_rxtx_regmap_readable(struct device *dev, unsigned int reg)
> +{
> + return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_READ);
> +}
> +
> +static bool lpass_rxtx_regmap_volatile(struct device *dev, unsigned int reg)
> +{
> + struct lpass_data *drvdata = dev_get_drvdata(dev);
> + struct lpass_variant *v = drvdata->variant;
> + int i;
> +
> + for (i = 0; i < v->rxtx_irq_ports; ++i) {
> + if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i))
> + return true;
> + if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i))
> + return true;
> + }
> +
> + for (i = 0; i < v->rxtx_rdma_channels; ++i)
> + if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0))
> + return true;
> +
> + for (i = 0; i < v->rxtx_wrdma_channels; ++i)
> + if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i + v->rxtx_wrdma_channel_start,
> + LPASS_CDC_DMA_TX3))
> + return true;
> +
> + return false;
> +}
> +
> +static bool __lpass_va_regmap_accessible(struct device *dev, unsigned int reg, bool rw)
> +{
> + struct lpass_data *drvdata = dev_get_drvdata(dev);
> + struct lpass_variant *v = drvdata->variant;
> + int i;
> +
> + for (i = 0; i < v->va_irq_ports; ++i) {
> + if (reg == LPAIF_VA_IRQCLEAR_REG(v, i))
> + return true;
> + if (reg == LPAIF_VA_IRQEN_REG(v, i))
> + return true;
> + if (reg == LPAIF_VA_IRQSTAT_REG(v, i))
> + return true;
> + }
> +
> + for (i = 0; i < v->va_wrdma_channels; ++i) {
> + if (reg == LPAIF_CDC_VA_WRDMACTL_REG(v, i + v->va_wrdma_channel_start,
> + LPASS_CDC_DMA_VA_TX0))
> + return true;
> + if (reg == LPAIF_CDC_VA_WRDMABASE_REG(v, i + v->va_wrdma_channel_start,
> + LPASS_CDC_DMA_VA_TX0))
> + return true;
> + if (reg == LPAIF_CDC_VA_WRDMABUFF_REG(v, i + v->va_wrdma_channel_start,
> + LPASS_CDC_DMA_VA_TX0))
> + return true;
> + if (rw == LPASS_REG_READ) {
> + if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start,
> + LPASS_CDC_DMA_VA_TX0))
> + return true;
> + }
> + if (reg == LPAIF_CDC_VA_WRDMAPER_REG(v, i + v->va_wrdma_channel_start,
> + LPASS_CDC_DMA_VA_TX0))
> + return true;
> + if (reg == LPAIF_CDC_VA_WRDMA_INTF_REG(v, i + v->va_wrdma_channel_start,
> + LPASS_CDC_DMA_VA_TX0))
> + return true;
> + }
> + return false;
> +}
> +
> +static bool lpass_va_regmap_writeable(struct device *dev, unsigned int reg)
> +{
> + return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_WRITE);
> +}
> +
> +static bool lpass_va_regmap_readable(struct device *dev, unsigned int reg)
> +{
> + return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_READ);
> +}
> +
> +static bool lpass_va_regmap_volatile(struct device *dev, unsigned int reg)
> +{
> + struct lpass_data *drvdata = dev_get_drvdata(dev);
> + struct lpass_variant *v = drvdata->variant;
> + int i;
> +
> + for (i = 0; i < v->va_irq_ports; ++i) {
> + if (reg == LPAIF_VA_IRQCLEAR_REG(v, i))
> + return true;
> + if (reg == LPAIF_VA_IRQSTAT_REG(v, i))
> + return true;
> + }
> +
> + for (i = 0; i < v->va_wrdma_channels; ++i) {
> + if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start,
> + LPASS_CDC_DMA_VA_TX0))
> + return true;
> + }
> +
> + return false;
> +}
> +
> +static struct regmap_config lpass_rxtx_regmap_config = {
> + .reg_bits = 32,
> + .reg_stride = 4,
> + .val_bits = 32,
> + .writeable_reg = lpass_rxtx_regmap_writeable,
> + .readable_reg = lpass_rxtx_regmap_readable,
> + .volatile_reg = lpass_rxtx_regmap_volatile,
> + .cache_type = REGCACHE_FLAT,
> +};
> +
> +static struct regmap_config lpass_va_regmap_config = {
> + .reg_bits = 32,
> + .reg_stride = 4,
> + .val_bits = 32,
> + .writeable_reg = lpass_va_regmap_writeable,
> + .readable_reg = lpass_va_regmap_readable,
> + .volatile_reg = lpass_va_regmap_volatile,
> + .cache_type = REGCACHE_FLAT,
> +};
> +
> static unsigned int of_lpass_cpu_parse_sd_lines(struct device *dev,
> struct device_node *node,
> const char *name)
>