2020-09-15 06:59:04

by Joakim Zhang

[permalink] [raw]
Subject: [PATCH] media: rc: gpio-ir-recv: add QoS support for cpuidle system

GPIO IR receive is much rely on interrupt response, uneven interrupt
latency will lead to incorrect timing, so the decoder fails to decode
it. The issue is particularly acute on systems which supports
cpuidle, dynamically disable and enable cpuidle can solve this problem
to a great extent.

However, there is a downside to this approach, the measurement of header
on the first frame may incorrect. Test on i.MX8M serials, when enable
cpuidle, interrupt latency could be about 500us.

With this patch:
1. has no side effect on non-cpuidle system.
2. latency is still much longer for the first gpio interrupt on cpuidle
system, so the first frame may not be decoded. Generally, RC would transmit
multiple frames at once press, we can sacrifice the first frame.

Signed-off-by: Joakim Zhang <[email protected]>
---
drivers/media/rc/gpio-ir-recv.c | 49 ++++++++++++++++++++++++++++++++-
1 file changed, 48 insertions(+), 1 deletion(-)

diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index a20413008c3c..42c942ce98cd 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -11,6 +11,8 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
#include <linux/irq.h>
#include <media/rc-core.h>

@@ -20,17 +22,36 @@ struct gpio_rc_dev {
struct rc_dev *rcdev;
struct gpio_desc *gpiod;
int irq;
+ struct pm_qos_request qos;
};

static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id)
{
- int val;
+ int ret, val;
struct gpio_rc_dev *gpio_dev = dev_id;
+ struct device *dev = gpio_dev->rcdev->dev.parent;
+
+ /*
+ * For cpuidle system:
+ * Respond to interrupt taking more latency when cpu in idle.
+ * Invoke asynchronous pm runtime get from interrupt context,
+ * this may introduce a millisecond delay to call resume callback,
+ * where to disable cpuilde.
+ *
+ * Two issues lead to fail to decode first frame, one is latency to
+ * respond interupt, another is delay introduced by async api.
+ */
+ ret = pm_runtime_get(dev);
+ if (ret < 0)
+ return IRQ_NONE;

val = gpiod_get_value(gpio_dev->gpiod);
if (val >= 0)
ir_raw_event_store_edge(gpio_dev->rcdev, val == 1);

+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return IRQ_HANDLED;
}

@@ -92,6 +113,12 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)

platform_set_drvdata(pdev, gpio_dev);

+
+ pm_runtime_set_autosuspend_delay(dev, (rcdev->timeout / 1000 / 1000));
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+
return devm_request_irq(dev, gpio_dev->irq, gpio_ir_recv_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"gpio-ir-recv-irq", gpio_dev);
@@ -122,9 +149,29 @@ static int gpio_ir_recv_resume(struct device *dev)
return 0;
}

+static int gpio_ir_recv_runtime_suspend(struct device *dev)
+{
+ struct gpio_rc_dev *gpio_dev = dev_get_drvdata(dev);
+
+ cpu_latency_qos_remove_request(&gpio_dev->qos);
+
+ return 0;
+}
+
+static int gpio_ir_recv_runtime_resume(struct device *dev)
+{
+ struct gpio_rc_dev *gpio_dev = dev_get_drvdata(dev);
+
+ cpu_latency_qos_add_request(&gpio_dev->qos, 0);
+
+ return 0;
+}
+
static const struct dev_pm_ops gpio_ir_recv_pm_ops = {
.suspend = gpio_ir_recv_suspend,
.resume = gpio_ir_recv_resume,
+ .runtime_suspend = gpio_ir_recv_runtime_suspend,
+ .runtime_resume = gpio_ir_recv_runtime_resume,
};
#endif

--
2.17.1