2022-06-30 00:08:20

by Lino Sanfilippo

[permalink] [raw]
Subject: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality

From: Lino Sanfilippo <[email protected]>

Implement a usage counter for the (default) locality used by the TPM TIS
driver:
Request the locality from the TPM if it has not been claimed yet, otherwise
only increment the counter. Also release the locality if the counter is 0
otherwise only decrement the counter. Ensure thread-safety by protecting
the counter with a mutex.

This allows to request and release the locality from a thread and the
interrupt handler at the same time without the danger to interfere with
each other.

By doing this refactor the names of the amended functions to use the proper
prefix.

Signed-off-by: Lino Sanfilippo <[email protected]>
Tested-by: Michael Niewöhner <[email protected]>
---
drivers/char/tpm/tpm_tis_core.c | 75 ++++++++++++++++++++++-----------
drivers/char/tpm/tpm_tis_core.h | 2 +
2 files changed, 53 insertions(+), 24 deletions(-)

diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index bd4eeb0b2192..e50a2c78de9f 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -165,16 +165,27 @@ static bool check_locality(struct tpm_chip *chip, int l)
return false;
}

-static int release_locality(struct tpm_chip *chip, int l)
+static int tpm_tis_release_locality_locked(struct tpm_tis_data *priv, int l)
+{
+ tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
+
+ return 0;
+}
+
+static int tpm_tis_release_locality(struct tpm_chip *chip, int l)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);

- tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
+ mutex_lock(&priv->locality_count_mutex);
+ priv->locality_count--;
+ if (priv->locality_count == 0)
+ tpm_tis_release_locality_locked(priv, l);
+ mutex_unlock(&priv->locality_count_mutex);

return 0;
}

-static int request_locality(struct tpm_chip *chip, int l)
+static int tpm_tis_request_locality_locked(struct tpm_chip *chip, int l)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
unsigned long stop, timeout;
@@ -215,6 +226,20 @@ static int request_locality(struct tpm_chip *chip, int l)
return -1;
}

+static int tpm_tis_request_locality(struct tpm_chip *chip, int l)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ int ret = 0;
+
+ mutex_lock(&priv->locality_count_mutex);
+ if (priv->locality_count == 0)
+ ret = tpm_tis_request_locality_locked(chip, l);
+ if (!ret)
+ priv->locality_count++;
+ mutex_unlock(&priv->locality_count_mutex);
+ return ret;
+}
+
static u8 tpm_tis_status(struct tpm_chip *chip)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
@@ -668,7 +693,7 @@ static int probe_itpm(struct tpm_chip *chip)
if (vendor != TPM_VID_INTEL)
return 0;

- if (request_locality(chip, 0) != 0)
+ if (tpm_tis_request_locality(chip, 0) != 0)
return -EBUSY;

rc = tpm_tis_send_data(chip, cmd_getticks, len);
@@ -689,7 +714,7 @@ static int probe_itpm(struct tpm_chip *chip)

out:
tpm_tis_ready(chip);
- release_locality(chip, priv->locality);
+ tpm_tis_release_locality(chip, priv->locality);

return rc;
}
@@ -751,7 +776,7 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
cap_t cap;
int ret;

- ret = request_locality(chip, 0);
+ ret = tpm_tis_request_locality(chip, 0);
if (ret < 0)
return ret;

@@ -760,7 +785,7 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
else
ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);

- release_locality(chip, 0);
+ tpm_tis_release_locality(chip, 0);

return ret;
}
@@ -785,33 +810,33 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
}
priv->irq = irq;

- rc = request_locality(chip, 0);
+ rc = tpm_tis_request_locality(chip, 0);
if (rc < 0)
return rc;

rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
&original_int_vec);
if (rc < 0) {
- release_locality(chip, priv->locality);
+ tpm_tis_release_locality(chip, priv->locality);
return rc;
}

rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq);
if (rc < 0) {
- release_locality(chip, priv->locality);
+ tpm_tis_release_locality(chip, priv->locality);
return rc;
}

rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status);
if (rc < 0) {
- release_locality(chip, priv->locality);
+ tpm_tis_release_locality(chip, priv->locality);
return rc;
}

/* Clear all existing */
rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status);
if (rc < 0) {
- release_locality(chip, priv->locality);
+ tpm_tis_release_locality(chip, priv->locality);
return rc;
}

@@ -819,11 +844,11 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality),
intmask | TPM_GLOBAL_INT_ENABLE);
if (rc < 0) {
- release_locality(chip, priv->locality);
+ tpm_tis_release_locality(chip, priv->locality);
return rc;
}

- release_locality(chip, priv->locality);
+ tpm_tis_release_locality(chip, priv->locality);
clear_bit(TPM_TIS_IRQ_TESTED, &priv->flags);

/* Generate an interrupt by having the core call through to
@@ -959,8 +984,8 @@ static const struct tpm_class_ops tpm_tis = {
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = tpm_tis_req_canceled,
- .request_locality = request_locality,
- .relinquish_locality = release_locality,
+ .request_locality = tpm_tis_request_locality,
+ .relinquish_locality = tpm_tis_release_locality,
.clk_enable = tpm_tis_clkrun_enable,
};

@@ -994,6 +1019,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->timeout_min = TPM_TIMEOUT_USECS_MIN;
priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
priv->phy_ops = phy_ops;
+ priv->locality_count = 0;
+ mutex_init(&priv->locality_count_mutex);

dev_set_drvdata(&chip->dev, priv);

@@ -1071,14 +1098,14 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,

intmask &= ~TPM_GLOBAL_INT_ENABLE;

- rc = request_locality(chip, 0);
+ rc = tpm_tis_request_locality(chip, 0);
if (rc < 0) {
rc = -ENODEV;
goto out_err;
}

tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
- release_locality(chip, 0);
+ tpm_tis_release_locality(chip, 0);

rc = tpm_chip_start(chip);
if (rc)
@@ -1112,13 +1139,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
* proper timeouts for the driver.
*/

- rc = request_locality(chip, 0);
+ rc = tpm_tis_request_locality(chip, 0);
if (rc < 0)
goto out_err;

rc = tpm_get_timeouts(chip);

- release_locality(chip, 0);
+ tpm_tis_release_locality(chip, 0);

if (rc) {
dev_err(dev, "Could not get TPM timeouts and durations\n");
@@ -1138,11 +1165,11 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
dev_err(&chip->dev, FW_BUG
"TPM interrupt not working, polling instead\n");

- rc = request_locality(chip, 0);
+ rc = tpm_tis_request_locality(chip, 0);
if (rc < 0)
goto out_err;
disable_interrupts(chip);
- release_locality(chip, 0);
+ tpm_tis_release_locality(chip, 0);
}
}

@@ -1209,13 +1236,13 @@ int tpm_tis_resume(struct device *dev)
* an error code but for unknown reason it isn't handled.
*/
if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
- ret = request_locality(chip, 0);
+ ret = tpm_tis_request_locality(chip, 0);
if (ret < 0)
return ret;

tpm1_do_selftest(chip);

- release_locality(chip, 0);
+ tpm_tis_release_locality(chip, 0);
}

return 0;
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index e005eb99480e..7c6c14707e31 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -91,6 +91,8 @@ enum tpm_tis_flags {

struct tpm_tis_data {
u16 manufacturer_id;
+ struct mutex locality_count_mutex;
+ unsigned int locality_count;
int locality;
int irq;
unsigned int int_mask;
--
2.25.1


2022-06-30 23:58:50

by Jarkko Sakkinen

[permalink] [raw]
Subject: Re: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality

On Thu, Jun 30, 2022 at 01:26:50AM +0200, Lino Sanfilippo wrote:
> From: Lino Sanfilippo <[email protected]>
>
> Implement a usage counter for the (default) locality used by the TPM TIS
> driver:
> Request the locality from the TPM if it has not been claimed yet, otherwise
> only increment the counter. Also release the locality if the counter is 0
> otherwise only decrement the counter. Ensure thread-safety by protecting
> the counter with a mutex.
>
> This allows to request and release the locality from a thread and the
> interrupt handler at the same time without the danger to interfere with
> each other.
>
> By doing this refactor the names of the amended functions to use the proper
> prefix.
>
> Signed-off-by: Lino Sanfilippo <[email protected]>
> Tested-by: Michael Niew??hner <[email protected]>
> ---
> drivers/char/tpm/tpm_tis_core.c | 75 ++++++++++++++++++++++-----------
> drivers/char/tpm/tpm_tis_core.h | 2 +
> 2 files changed, 53 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
> index bd4eeb0b2192..e50a2c78de9f 100644
> --- a/drivers/char/tpm/tpm_tis_core.c
> +++ b/drivers/char/tpm/tpm_tis_core.c
> @@ -165,16 +165,27 @@ static bool check_locality(struct tpm_chip *chip, int l)
> return false;
> }
>
> -static int release_locality(struct tpm_chip *chip, int l)
> +static int tpm_tis_release_locality_locked(struct tpm_tis_data *priv, int l)
> +{
> + tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
> +
> + return 0;
> +}
> +
> +static int tpm_tis_release_locality(struct tpm_chip *chip, int l)
> {
> struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
>
> - tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
> + mutex_lock(&priv->locality_count_mutex);
> + priv->locality_count--;
> + if (priv->locality_count == 0)
> + tpm_tis_release_locality_locked(priv, l);
> + mutex_unlock(&priv->locality_count_mutex);
>
> return 0;
> }
>
> -static int request_locality(struct tpm_chip *chip, int l)
> +static int tpm_tis_request_locality_locked(struct tpm_chip *chip, int l)
> {
> struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
> unsigned long stop, timeout;
> @@ -215,6 +226,20 @@ static int request_locality(struct tpm_chip *chip, int l)
> return -1;
> }
>
> +static int tpm_tis_request_locality(struct tpm_chip *chip, int l)
> +{
> + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
> + int ret = 0;
> +
> + mutex_lock(&priv->locality_count_mutex);
> + if (priv->locality_count == 0)
> + ret = tpm_tis_request_locality_locked(chip, l);
> + if (!ret)
> + priv->locality_count++;
> + mutex_unlock(&priv->locality_count_mutex);
> + return ret;
> +}
> +
> static u8 tpm_tis_status(struct tpm_chip *chip)
> {
> struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
> @@ -668,7 +693,7 @@ static int probe_itpm(struct tpm_chip *chip)
> if (vendor != TPM_VID_INTEL)
> return 0;
>
> - if (request_locality(chip, 0) != 0)
> + if (tpm_tis_request_locality(chip, 0) != 0)
> return -EBUSY;
>
> rc = tpm_tis_send_data(chip, cmd_getticks, len);
> @@ -689,7 +714,7 @@ static int probe_itpm(struct tpm_chip *chip)
>
> out:
> tpm_tis_ready(chip);
> - release_locality(chip, priv->locality);
> + tpm_tis_release_locality(chip, priv->locality);
>
> return rc;
> }
> @@ -751,7 +776,7 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
> cap_t cap;
> int ret;
>
> - ret = request_locality(chip, 0);
> + ret = tpm_tis_request_locality(chip, 0);
> if (ret < 0)
> return ret;
>
> @@ -760,7 +785,7 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
> else
> ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
>
> - release_locality(chip, 0);
> + tpm_tis_release_locality(chip, 0);
>
> return ret;
> }
> @@ -785,33 +810,33 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
> }
> priv->irq = irq;
>
> - rc = request_locality(chip, 0);
> + rc = tpm_tis_request_locality(chip, 0);
> if (rc < 0)
> return rc;
>
> rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
> &original_int_vec);
> if (rc < 0) {
> - release_locality(chip, priv->locality);
> + tpm_tis_release_locality(chip, priv->locality);
> return rc;
> }
>
> rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq);
> if (rc < 0) {
> - release_locality(chip, priv->locality);
> + tpm_tis_release_locality(chip, priv->locality);
> return rc;
> }
>
> rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status);
> if (rc < 0) {
> - release_locality(chip, priv->locality);
> + tpm_tis_release_locality(chip, priv->locality);
> return rc;
> }
>
> /* Clear all existing */
> rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status);
> if (rc < 0) {
> - release_locality(chip, priv->locality);
> + tpm_tis_release_locality(chip, priv->locality);
> return rc;
> }
>
> @@ -819,11 +844,11 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
> rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality),
> intmask | TPM_GLOBAL_INT_ENABLE);
> if (rc < 0) {
> - release_locality(chip, priv->locality);
> + tpm_tis_release_locality(chip, priv->locality);
> return rc;
> }
>
> - release_locality(chip, priv->locality);
> + tpm_tis_release_locality(chip, priv->locality);
> clear_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
>
> /* Generate an interrupt by having the core call through to
> @@ -959,8 +984,8 @@ static const struct tpm_class_ops tpm_tis = {
> .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
> .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
> .req_canceled = tpm_tis_req_canceled,
> - .request_locality = request_locality,
> - .relinquish_locality = release_locality,
> + .request_locality = tpm_tis_request_locality,
> + .relinquish_locality = tpm_tis_release_locality,
> .clk_enable = tpm_tis_clkrun_enable,
> };
>
> @@ -994,6 +1019,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
> priv->timeout_min = TPM_TIMEOUT_USECS_MIN;
> priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
> priv->phy_ops = phy_ops;
> + priv->locality_count = 0;
> + mutex_init(&priv->locality_count_mutex);
>
> dev_set_drvdata(&chip->dev, priv);
>
> @@ -1071,14 +1098,14 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
>
> intmask &= ~TPM_GLOBAL_INT_ENABLE;
>
> - rc = request_locality(chip, 0);
> + rc = tpm_tis_request_locality(chip, 0);
> if (rc < 0) {
> rc = -ENODEV;
> goto out_err;
> }
>
> tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
> - release_locality(chip, 0);
> + tpm_tis_release_locality(chip, 0);
>
> rc = tpm_chip_start(chip);
> if (rc)
> @@ -1112,13 +1139,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
> * proper timeouts for the driver.
> */
>
> - rc = request_locality(chip, 0);
> + rc = tpm_tis_request_locality(chip, 0);
> if (rc < 0)
> goto out_err;
>
> rc = tpm_get_timeouts(chip);
>
> - release_locality(chip, 0);
> + tpm_tis_release_locality(chip, 0);
>
> if (rc) {
> dev_err(dev, "Could not get TPM timeouts and durations\n");
> @@ -1138,11 +1165,11 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
> dev_err(&chip->dev, FW_BUG
> "TPM interrupt not working, polling instead\n");
>
> - rc = request_locality(chip, 0);
> + rc = tpm_tis_request_locality(chip, 0);
> if (rc < 0)
> goto out_err;
> disable_interrupts(chip);
> - release_locality(chip, 0);
> + tpm_tis_release_locality(chip, 0);
> }
> }
>
> @@ -1209,13 +1236,13 @@ int tpm_tis_resume(struct device *dev)
> * an error code but for unknown reason it isn't handled.
> */
> if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
> - ret = request_locality(chip, 0);
> + ret = tpm_tis_request_locality(chip, 0);
> if (ret < 0)
> return ret;
>
> tpm1_do_selftest(chip);
>
> - release_locality(chip, 0);
> + tpm_tis_release_locality(chip, 0);
> }
>
> return 0;
> diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
> index e005eb99480e..7c6c14707e31 100644
> --- a/drivers/char/tpm/tpm_tis_core.h
> +++ b/drivers/char/tpm/tpm_tis_core.h
> @@ -91,6 +91,8 @@ enum tpm_tis_flags {
>
> struct tpm_tis_data {
> u16 manufacturer_id;
> + struct mutex locality_count_mutex;
> + unsigned int locality_count;
> int locality;
> int irq;
> unsigned int int_mask;
> --
> 2.25.1
>

I'm kind of thinking that should tpm_tis_data have a lock for its
contents?

I kind of doubt that we would ever need more than one lock for it,
and it would give some more ensurance to not be race, especially
when re-enabling interrupts this feels important to be "extra safe".

I looked at this commit, and did not see anything that would prevent
using a spin lock instead of mutex. With a spin lock priv can be
accessed also in the interrupt context.

So instead prepend this patch with a patch that adds:

struct spin_lock lock;

And something like:

static inline struct tpm_tis_data *tpm_tis_priv_get(struct tpm_chip *chip)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);

spin_lock(&priv->lock);
return priv;
}

static inline void tpm_tis_priv_put(struct tpm_tis_data *priv)
{
spin_unlock(&priv->lock);
}

And change the sites where priv is used to acquire the instance with this.

BR, Jarkko

2022-07-01 00:01:57

by Jarkko Sakkinen

[permalink] [raw]
Subject: Re: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality

On Fri, Jul 01, 2022 at 02:29:47AM +0300, Jarkko Sakkinen wrote:
> On Thu, Jun 30, 2022 at 01:26:50AM +0200, Lino Sanfilippo wrote:
> > From: Lino Sanfilippo <[email protected]>
> >
> > Implement a usage counter for the (default) locality used by the TPM TIS
> > driver:
> > Request the locality from the TPM if it has not been claimed yet, otherwise
> > only increment the counter. Also release the locality if the counter is 0
> > otherwise only decrement the counter. Ensure thread-safety by protecting
> > the counter with a mutex.
> >
> > This allows to request and release the locality from a thread and the
> > interrupt handler at the same time without the danger to interfere with
> > each other.
> >
> > By doing this refactor the names of the amended functions to use the proper
> > prefix.
> >
> > Signed-off-by: Lino Sanfilippo <[email protected]>
> > Tested-by: Michael Niew??hner <[email protected]>
> > ---
> > drivers/char/tpm/tpm_tis_core.c | 75 ++++++++++++++++++++++-----------
> > drivers/char/tpm/tpm_tis_core.h | 2 +
> > 2 files changed, 53 insertions(+), 24 deletions(-)
> >
> > diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
> > index bd4eeb0b2192..e50a2c78de9f 100644
> > --- a/drivers/char/tpm/tpm_tis_core.c
> > +++ b/drivers/char/tpm/tpm_tis_core.c
> > @@ -165,16 +165,27 @@ static bool check_locality(struct tpm_chip *chip, int l)
> > return false;
> > }
> >
> > -static int release_locality(struct tpm_chip *chip, int l)
> > +static int tpm_tis_release_locality_locked(struct tpm_tis_data *priv, int l)
> > +{
> > + tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
> > +
> > + return 0;
> > +}
> > +
> > +static int tpm_tis_release_locality(struct tpm_chip *chip, int l)
> > {
> > struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
> >
> > - tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
> > + mutex_lock(&priv->locality_count_mutex);
> > + priv->locality_count--;
> > + if (priv->locality_count == 0)
> > + tpm_tis_release_locality_locked(priv, l);
> > + mutex_unlock(&priv->locality_count_mutex);
> >
> > return 0;
> > }
> >
> > -static int request_locality(struct tpm_chip *chip, int l)
> > +static int tpm_tis_request_locality_locked(struct tpm_chip *chip, int l)
> > {
> > struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
> > unsigned long stop, timeout;
> > @@ -215,6 +226,20 @@ static int request_locality(struct tpm_chip *chip, int l)
> > return -1;
> > }
> >
> > +static int tpm_tis_request_locality(struct tpm_chip *chip, int l)
> > +{
> > + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
> > + int ret = 0;
> > +
> > + mutex_lock(&priv->locality_count_mutex);
> > + if (priv->locality_count == 0)
> > + ret = tpm_tis_request_locality_locked(chip, l);
> > + if (!ret)
> > + priv->locality_count++;
> > + mutex_unlock(&priv->locality_count_mutex);
> > + return ret;
> > +}
> > +
> > static u8 tpm_tis_status(struct tpm_chip *chip)
> > {
> > struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
> > @@ -668,7 +693,7 @@ static int probe_itpm(struct tpm_chip *chip)
> > if (vendor != TPM_VID_INTEL)
> > return 0;
> >
> > - if (request_locality(chip, 0) != 0)
> > + if (tpm_tis_request_locality(chip, 0) != 0)
> > return -EBUSY;
> >
> > rc = tpm_tis_send_data(chip, cmd_getticks, len);
> > @@ -689,7 +714,7 @@ static int probe_itpm(struct tpm_chip *chip)
> >
> > out:
> > tpm_tis_ready(chip);
> > - release_locality(chip, priv->locality);
> > + tpm_tis_release_locality(chip, priv->locality);
> >
> > return rc;
> > }
> > @@ -751,7 +776,7 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
> > cap_t cap;
> > int ret;
> >
> > - ret = request_locality(chip, 0);
> > + ret = tpm_tis_request_locality(chip, 0);
> > if (ret < 0)
> > return ret;
> >
> > @@ -760,7 +785,7 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
> > else
> > ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
> >
> > - release_locality(chip, 0);
> > + tpm_tis_release_locality(chip, 0);
> >
> > return ret;
> > }
> > @@ -785,33 +810,33 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
> > }
> > priv->irq = irq;
> >
> > - rc = request_locality(chip, 0);
> > + rc = tpm_tis_request_locality(chip, 0);
> > if (rc < 0)
> > return rc;
> >
> > rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
> > &original_int_vec);
> > if (rc < 0) {
> > - release_locality(chip, priv->locality);
> > + tpm_tis_release_locality(chip, priv->locality);
> > return rc;
> > }
> >
> > rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq);
> > if (rc < 0) {
> > - release_locality(chip, priv->locality);
> > + tpm_tis_release_locality(chip, priv->locality);
> > return rc;
> > }
> >
> > rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status);
> > if (rc < 0) {
> > - release_locality(chip, priv->locality);
> > + tpm_tis_release_locality(chip, priv->locality);
> > return rc;
> > }
> >
> > /* Clear all existing */
> > rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status);
> > if (rc < 0) {
> > - release_locality(chip, priv->locality);
> > + tpm_tis_release_locality(chip, priv->locality);
> > return rc;
> > }
> >
> > @@ -819,11 +844,11 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
> > rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality),
> > intmask | TPM_GLOBAL_INT_ENABLE);
> > if (rc < 0) {
> > - release_locality(chip, priv->locality);
> > + tpm_tis_release_locality(chip, priv->locality);
> > return rc;
> > }
> >
> > - release_locality(chip, priv->locality);
> > + tpm_tis_release_locality(chip, priv->locality);
> > clear_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
> >
> > /* Generate an interrupt by having the core call through to
> > @@ -959,8 +984,8 @@ static const struct tpm_class_ops tpm_tis = {
> > .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
> > .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
> > .req_canceled = tpm_tis_req_canceled,
> > - .request_locality = request_locality,
> > - .relinquish_locality = release_locality,
> > + .request_locality = tpm_tis_request_locality,
> > + .relinquish_locality = tpm_tis_release_locality,
> > .clk_enable = tpm_tis_clkrun_enable,
> > };
> >
> > @@ -994,6 +1019,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
> > priv->timeout_min = TPM_TIMEOUT_USECS_MIN;
> > priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
> > priv->phy_ops = phy_ops;
> > + priv->locality_count = 0;
> > + mutex_init(&priv->locality_count_mutex);
> >
> > dev_set_drvdata(&chip->dev, priv);
> >
> > @@ -1071,14 +1098,14 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
> >
> > intmask &= ~TPM_GLOBAL_INT_ENABLE;
> >
> > - rc = request_locality(chip, 0);
> > + rc = tpm_tis_request_locality(chip, 0);
> > if (rc < 0) {
> > rc = -ENODEV;
> > goto out_err;
> > }
> >
> > tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
> > - release_locality(chip, 0);
> > + tpm_tis_release_locality(chip, 0);
> >
> > rc = tpm_chip_start(chip);
> > if (rc)
> > @@ -1112,13 +1139,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
> > * proper timeouts for the driver.
> > */
> >
> > - rc = request_locality(chip, 0);
> > + rc = tpm_tis_request_locality(chip, 0);
> > if (rc < 0)
> > goto out_err;
> >
> > rc = tpm_get_timeouts(chip);
> >
> > - release_locality(chip, 0);
> > + tpm_tis_release_locality(chip, 0);
> >
> > if (rc) {
> > dev_err(dev, "Could not get TPM timeouts and durations\n");
> > @@ -1138,11 +1165,11 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
> > dev_err(&chip->dev, FW_BUG
> > "TPM interrupt not working, polling instead\n");
> >
> > - rc = request_locality(chip, 0);
> > + rc = tpm_tis_request_locality(chip, 0);
> > if (rc < 0)
> > goto out_err;
> > disable_interrupts(chip);
> > - release_locality(chip, 0);
> > + tpm_tis_release_locality(chip, 0);
> > }
> > }
> >
> > @@ -1209,13 +1236,13 @@ int tpm_tis_resume(struct device *dev)
> > * an error code but for unknown reason it isn't handled.
> > */
> > if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
> > - ret = request_locality(chip, 0);
> > + ret = tpm_tis_request_locality(chip, 0);
> > if (ret < 0)
> > return ret;
> >
> > tpm1_do_selftest(chip);
> >
> > - release_locality(chip, 0);
> > + tpm_tis_release_locality(chip, 0);
> > }
> >
> > return 0;
> > diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
> > index e005eb99480e..7c6c14707e31 100644
> > --- a/drivers/char/tpm/tpm_tis_core.h
> > +++ b/drivers/char/tpm/tpm_tis_core.h
> > @@ -91,6 +91,8 @@ enum tpm_tis_flags {
> >
> > struct tpm_tis_data {
> > u16 manufacturer_id;
> > + struct mutex locality_count_mutex;
> > + unsigned int locality_count;
> > int locality;
> > int irq;
> > unsigned int int_mask;
> > --
> > 2.25.1
> >
>
> I'm kind of thinking that should tpm_tis_data have a lock for its
> contents?
>
> I kind of doubt that we would ever need more than one lock for it,
> and it would give some more ensurance to not be race, especially
> when re-enabling interrupts this feels important to be "extra safe".
>
> I looked at this commit, and did not see anything that would prevent
> using a spin lock instead of mutex. With a spin lock priv can be
> accessed also in the interrupt context.
>
> So instead prepend this patch with a patch that adds:
>
> struct spin_lock lock;
>
> And something like:
>
> static inline struct tpm_tis_data *tpm_tis_priv_get(struct tpm_chip *chip)
> {
> struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
>
> spin_lock(&priv->lock);
> return priv;
> }
>
> static inline void tpm_tis_priv_put(struct tpm_tis_data *priv)
> {
> spin_unlock(&priv->lock);
> }
>
> And change the sites where priv is used to acquire the instance with this.

I.e.

struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);

becomes:

struct tpm_tis_data *priv = tpm_tis_priv_get(&chip);

In some simes most likely the acquirance must be done later, e.g.
because of locking order with chip's lock (perhaps).

BR, Jarkko

2022-07-04 17:47:12

by Lino Sanfilippo

[permalink] [raw]
Subject: Re: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality



On 01.07.22 01:29, Jarkko Sakkinen wrote:

>
> I'm kind of thinking that should tpm_tis_data have a lock for its
> contents?

Most of the tpm_tis_data structure elements are set once during init and
then never changed but only read. So no need for locking for these. The
exceptions I see are

- flags
- locality_count
- locality


whereby "flags" is accessed by atomic bit manipulating functions and thus
does not need extra locking. "locality_count" is protected by the locality_count_mutex.
"locality" is only set in check_locality() which is called from tpm_tis_request_locality_locked()
which holds the locality_count_mutex. So check_locality() is also protected by the locality_count_mutex
(which for this reason should probably rather be called locality_mutex since it protects both the "locality_count"
and the "locality" variable).

There is one other place check_locality() is called from, namely the interrupt handler. This is also the only
place in which "locality" could be assigned another value than 0 (aka the default). In this case there
is no lock, so this could indeed by racy.

The solution I see for this is:
1. remove the entire loop that checks for the current locality, i.e. this code:

if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
for (i = 0; i < 5; i++)
if (check_locality(chip, i))
break;

So we avoid "locality" from being changed to something that is not the default.


2. grab the locality_count_mutex and protect "locality":

if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
mutex_lock(&priv->locality_count_mutex);
for (i = 0; i < 5; i++)
if (check_locality(chip, i))
break;
mutex_unlock(&priv->locality_count_mutex);


I dont see the reason why we should store which locality is the active one, since the only thing
that ever would change it from 0 (i.e. the default which we use) to something else is some external instance.

So I would vote for option 1.



>
> I kind of doubt that we would ever need more than one lock for it,
> and it would give some more ensurance to not be race, especially
> when re-enabling interrupts this feels important to be "extra safe".
>
> I looked at this commit, and did not see anything that would prevent
> using a spin lock instead of mutex. With a spin lock priv can be
> accessed also in the interrupt context.
>
> So instead prepend this patch with a patch that adds:
>
> struct spin_lock lock;
>
> And something like:
>
> static inline struct tpm_tis_data *tpm_tis_priv_get(struct tpm_chip *chip)
> {
> struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
>
> spin_lock(&priv->lock);
> return priv;
> }
>
> static inline void tpm_tis_priv_put(struct tpm_tis_data *priv)
> {
> spin_unlock(&priv->lock);
> }
>
> And change the sites where priv is used to acquire the instance with this.
>

In this patch we need the mutex to protect the locality counter. We have to hold the mutex
while we do a register access that requires a locality (to make sure that the locality is not
released by another thread shortly before we do the access).

We cannot do the register access while holding a spinlock, since for SPI the (SPI) bus
lock mutex is used which needs a sleepable context. That is not given while holding a spinlock,
so I think we have no choice here unfortunately.

Regards,
Lino





2022-07-11 03:30:10

by Jarkko Sakkinen

[permalink] [raw]
Subject: Re: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality

On Mon, Jul 04, 2022 at 07:45:12PM +0200, Lino Sanfilippo wrote:
>
>
> On 01.07.22 01:29, Jarkko Sakkinen wrote:
>
> >
> > I'm kind of thinking that should tpm_tis_data have a lock for its
> > contents?
>
> Most of the tpm_tis_data structure elements are set once during init and
> then never changed but only read. So no need for locking for these. The
> exceptions I see are
>
> - flags
> - locality_count
> - locality

I'd still go for single data struct lock, since this lock would
be taken in every transmit flow. It makes the whole thing easier
to maintain over time, and does not really affect scalability.

This brings me to another question: what does this lock protect
against given that tpm_try_get_ops() already takes tpm_mutex?
It's not clear and that should be somehow reasoned in the commit
message.

Anyway, *if* a lock is needed the granularity should be the whole
struct.

BR, Jarkko

2022-07-11 20:11:46

by Jason Andryuk

[permalink] [raw]
Subject: Re: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality

Hi,

This patch subject has a typo "tmp, tmp_tis" -> "tpm, tpm_tis"

On Wed, Jun 29, 2022 at 7:28 PM Lino Sanfilippo <[email protected]> wrote:
>
> From: Lino Sanfilippo <[email protected]>
>
> Implement a usage counter for the (default) locality used by the TPM TIS
> driver:
> Request the locality from the TPM if it has not been claimed yet, otherwise
> only increment the counter. Also release the locality if the counter is 0
> otherwise only decrement the counter. Ensure thread-safety by protecting
> the counter with a mutex.
>
> This allows to request and release the locality from a thread and the
> interrupt handler at the same time without the danger to interfere with
> each other.
>
> By doing this refactor the names of the amended functions to use the proper
> prefix.
>
> Signed-off-by: Lino Sanfilippo <[email protected]>
> Tested-by: Michael Niewöhner <[email protected]>
> ---
> drivers/char/tpm/tpm_tis_core.c | 75 ++++++++++++++++++++++-----------
> drivers/char/tpm/tpm_tis_core.h | 2 +
> 2 files changed, 53 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
> index bd4eeb0b2192..e50a2c78de9f 100644
> --- a/drivers/char/tpm/tpm_tis_core.c
> +++ b/drivers/char/tpm/tpm_tis_core.c

> @@ -215,6 +226,20 @@ static int request_locality(struct tpm_chip *chip, int l)
> return -1;
> }
>
> +static int tpm_tis_request_locality(struct tpm_chip *chip, int l)
> +{
> + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
> + int ret = 0;
> +
> + mutex_lock(&priv->locality_count_mutex);
> + if (priv->locality_count == 0)
> + ret = tpm_tis_request_locality_locked(chip, l);
> + if (!ret)
> + priv->locality_count++;
> + mutex_unlock(&priv->locality_count_mutex);
> + return ret;
> +}
> +

This function should check that the requested locality matches the
current locality otherwise this sequence would seemingly succeed
though locality 0 is the one acquired.

tpm_tis_request_locality(chip, 0);
tpm_tis_request_locality(chip, 1);

Regards,
Jason

2022-07-11 21:09:34

by Lino Sanfilippo

[permalink] [raw]
Subject: Re: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality


On 11.07.22 04:50, Jarkko Sakkinen wrote:
> On Mon, Jul 04, 2022 at 07:45:12PM +0200, Lino Sanfilippo wrote:
>>
>>
>> On 01.07.22 01:29, Jarkko Sakkinen wrote:
>>
>>>
>>> I'm kind of thinking that should tpm_tis_data have a lock for its
>>> contents?
>>
>> Most of the tpm_tis_data structure elements are set once during init and
>> then never changed but only read. So no need for locking for these. The
>> exceptions I see are
>>
>> - flags
>> - locality_count
>> - locality
>
> I'd still go for single data struct lock, since this lock would
> be taken in every transmit flow.

Well in both cases, transmit and receive, we end up in wait_for_tmp_stat().
Whatever lock we hold at this time cannot be taken in the interrupt
handler, since this would deadlock (wait_for_tmp_stat() waits for the interrupt
handler to complete but holds the lock that the interrupt handler needs to proceed).

So in the interrupt handler we need something that is not held during the whole
transmit/receive flow.

This is the reason why the locality_count_mutex only protects the one thing we
have to take care of in the interrupt handler, namely the locality counter.


> It makes the whole thing easier
> to maintain over time, and does not really affect scalability>
> This brings me to another question: what does this lock protect
> against given that tpm_try_get_ops() already takes tpm_mutex?
> It's not clear and that should be somehow reasoned in the commit
> message.

See above, we cannot take the tpm mutex in the interrupt handler for the same
reason.

> Anyway, *if* a lock is needed the granularity should be the whole
> struct.
>
> BR, Jarkko

Regards,
Lino

2022-07-11 21:32:46

by Lino Sanfilippo

[permalink] [raw]
Subject: Re: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality

Hi,

On 11.07.22 21:39, Jason Andryuk wrote:
> Hi,
>
> This patch subject has a typo "tmp, tmp_tis" -> "tpm, tpm_tis"
>

Right, thanks for the hint!

> On Wed, Jun 29, 2022 at 7:28 PM Lino Sanfilippo <[email protected]> wrote:
>>
>> From: Lino Sanfilippo <[email protected]>
>>
>> Implement a usage counter for the (default) locality used by the TPM TIS
>> driver:
>> Request the locality from the TPM if it has not been claimed yet, otherwise
>> only increment the counter. Also release the locality if the counter is 0
>> otherwise only decrement the counter. Ensure thread-safety by protecting
>> the counter with a mutex.
>>
>> This allows to request and release the locality from a thread and the
>> interrupt handler at the same time without the danger to interfere with
>> each other.
>>
>> By doing this refactor the names of the amended functions to use the proper
>> prefix.
>>
>> Signed-off-by: Lino Sanfilippo <[email protected]>
>> Tested-by: Michael Niewöhner <[email protected]>
>> ---
>> drivers/char/tpm/tpm_tis_core.c | 75 ++++++++++++++++++++++-----------
>> drivers/char/tpm/tpm_tis_core.h | 2 +
>> 2 files changed, 53 insertions(+), 24 deletions(-)
>>
>> diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
>> index bd4eeb0b2192..e50a2c78de9f 100644
>> --- a/drivers/char/tpm/tpm_tis_core.c
>> +++ b/drivers/char/tpm/tpm_tis_core.c
>
>> @@ -215,6 +226,20 @@ static int request_locality(struct tpm_chip *chip, int l)
>> return -1;
>> }
>>
>> +static int tpm_tis_request_locality(struct tpm_chip *chip, int l)
>> +{
>> + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
>> + int ret = 0;
>> +
>> + mutex_lock(&priv->locality_count_mutex);
>> + if (priv->locality_count == 0)
>> + ret = tpm_tis_request_locality_locked(chip, l);
>> + if (!ret)
>> + priv->locality_count++;
>> + mutex_unlock(&priv->locality_count_mutex);
>> + return ret;
>> +}
>> +
>
> This function should check that the requested locality matches the
> current locality otherwise this sequence would seemingly succeed
> though locality 0 is the one acquired.
>
> tpm_tis_request_locality(chip, 0);
> tpm_tis_request_locality(chip, 1);

This should not really be an issue since the TPM TIS driver only uses
locality 0.

>
> Regards,
> Jason

Regards,
Lino

2022-07-15 13:43:29

by Jarkko Sakkinen

[permalink] [raw]
Subject: Re: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality

On Mon, Jul 11, 2022 at 11:03:05PM +0200, Lino Sanfilippo wrote:
>
> On 11.07.22 04:50, Jarkko Sakkinen wrote:
> > On Mon, Jul 04, 2022 at 07:45:12PM +0200, Lino Sanfilippo wrote:
> >>
> >>
> >> On 01.07.22 01:29, Jarkko Sakkinen wrote:
> >>
> >>>
> >>> I'm kind of thinking that should tpm_tis_data have a lock for its
> >>> contents?
> >>
> >> Most of the tpm_tis_data structure elements are set once during init and
> >> then never changed but only read. So no need for locking for these. The
> >> exceptions I see are
> >>
> >> - flags
> >> - locality_count
> >> - locality
> >
> > I'd still go for single data struct lock, since this lock would
> > be taken in every transmit flow.
>
> Well in both cases, transmit and receive, we end up in wait_for_tmp_stat().
> Whatever lock we hold at this time cannot be taken in the interrupt
> handler, since this would deadlock (wait_for_tmp_stat() waits for the interrupt
> handler to complete but holds the lock that the interrupt handler needs to proceed).
>
> So in the interrupt handler we need something that is not held during the whole
> transmit/receive flow.
>
> This is the reason why the locality_count_mutex only protects the one thing we
> have to take care of in the interrupt handler, namely the locality counter.
>
>
> > It makes the whole thing easier
> > to maintain over time, and does not really affect scalability>
> > This brings me to another question: what does this lock protect
> > against given that tpm_try_get_ops() already takes tpm_mutex?
> > It's not clear and that should be somehow reasoned in the commit
> > message.
>
> See above, we cannot take the tpm mutex in the interrupt handler for the same
> reason.

You should squash this then with the following patch.

Also, I'm not sure why you don't use kref for this.

> > Anyway, *if* a lock is needed the granularity should be the whole
> > struct.
> >
> > BR, Jarkko
>
> Regards,
> Lino

BR, Jarkko

2022-07-27 12:31:11

by Lino Sanfilippo

[permalink] [raw]
Subject: Re: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality



On 11.07.22 04:50, Jarkko Sakkinen wrote:
> On Mon, Jul 04, 2022 at 07:45:12PM +0200, Lino Sanfilippo wrote:
>>
>>
>> On 01.07.22 01:29, Jarkko Sakkinen wrote:
>>
>>>
>>> I'm kind of thinking that should tpm_tis_data have a lock for its
>>> contents?
>>
>> Most of the tpm_tis_data structure elements are set once during init and
>> then never changed but only read. So no need for locking for these. The
>> exceptions I see are
>>
>> - flags
>> - locality_count
>> - locality
>
> I'd still go for single data struct lock, since this lock would
> be taken in every transmit flow. It makes the whole thing easier
> to maintain over time, and does not really affect scalability.
>

This means switching to a complete new locking scheme which affects many
parts of the TIS core code. It is also not directly related to what this patch series
is about, namely activating the interrupts for TPM TIS.

I suggest to first finish polishing this series especially since there have
only been minor issues in the last versions. Once the interrupts work we
still can think of implementing another lock handling in a follow up series.


> This brings me to another question: what does this lock protect
> against given that tpm_try_get_ops() already takes tpm_mutex?
> It's not clear and that should be somehow reasoned in the commit
> message.
>
> Anyway, *if* a lock is needed the granularity should be the whole
> struct.
>
> BR, Jarkko

Regards,
Lino

2022-07-28 08:18:08

by Jarkko Sakkinen

[permalink] [raw]
Subject: Re: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality

On Wed, Jul 27, 2022 at 02:16:56PM +0200, Lino Sanfilippo wrote:
>
>
> On 11.07.22 04:50, Jarkko Sakkinen wrote:
> > On Mon, Jul 04, 2022 at 07:45:12PM +0200, Lino Sanfilippo wrote:
> >>
> >>
> >> On 01.07.22 01:29, Jarkko Sakkinen wrote:
> >>
> >>>
> >>> I'm kind of thinking that should tpm_tis_data have a lock for its
> >>> contents?
> >>
> >> Most of the tpm_tis_data structure elements are set once during init and
> >> then never changed but only read. So no need for locking for these. The
> >> exceptions I see are
> >>
> >> - flags
> >> - locality_count
> >> - locality
> >
> > I'd still go for single data struct lock, since this lock would
> > be taken in every transmit flow. It makes the whole thing easier
> > to maintain over time, and does not really affect scalability.
> >
>
> This means switching to a complete new locking scheme which affects many
> parts of the TIS core code. It is also not directly related to what this patch series
> is about, namely activating the interrupts for TPM TIS.
>
> I suggest to first finish polishing this series especially since there have
> only been minor issues in the last versions. Once the interrupts work we
> still can think of implementing another lock handling in a follow up series.

So what if you would use kref instead here?

On surface this looks like ad-hoc kref but I could wrong too (as always).

BR, Jarkko

2022-07-28 15:47:22

by Lino Sanfilippo

[permalink] [raw]
Subject: Re: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality



On 28.07.22 10:15, Jarkko Sakkinen wrote:
> On Wed, Jul 27, 2022 at 02:16:56PM +0200, Lino Sanfilippo wrote:
>>
>>
>> On 11.07.22 04:50, Jarkko Sakkinen wrote:
>>> On Mon, Jul 04, 2022 at 07:45:12PM +0200, Lino Sanfilippo wrote:
>>>>
>>>>
>>>> On 01.07.22 01:29, Jarkko Sakkinen wrote:
>>>>
>>>>>
>>>>> I'm kind of thinking that should tpm_tis_data have a lock for its
>>>>> contents?
>>>>
>>>> Most of the tpm_tis_data structure elements are set once during init and
>>>> then never changed but only read. So no need for locking for these. The
>>>> exceptions I see are
>>>>
>>>> - flags
>>>> - locality_count
>>>> - locality
>>>
>>> I'd still go for single data struct lock, since this lock would
>>> be taken in every transmit flow. It makes the whole thing easier
>>> to maintain over time, and does not really affect scalability.
>>>
>>
>> This means switching to a complete new locking scheme which affects many
>> parts of the TIS core code. It is also not directly related to what this patch series
>> is about, namely activating the interrupts for TPM TIS.
>>
>> I suggest to first finish polishing this series especially since there have
>> only been minor issues in the last versions. Once the interrupts work we
>> still can think of implementing another lock handling in a follow up series.
>
> So what if you would use kref instead here?
>

Sure, that should not be too difficult to do. I will implement this for the next version.

Regards,
Lino

2022-07-28 18:03:10

by Lino Sanfilippo

[permalink] [raw]
Subject: Re: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality



On 04.07.22 19:45, Lino Sanfilippo wrote:
>
>
> On 01.07.22 01:29, Jarkko Sakkinen wrote:
>
>>
>> I'm kind of thinking that should tpm_tis_data have a lock for its
>> contents?
>
> Most of the tpm_tis_data structure elements are set once during init and
> then never changed but only read. So no need for locking for these. The
> exceptions I see are
>
> - flags
> - locality_count
> - locality
>
>
> whereby "flags" is accessed by atomic bit manipulating functions and thus
> does not need extra locking. "locality_count" is protected by the locality_count_mutex.
> "locality" is only set in check_locality() which is called from tpm_tis_request_locality_locked()
> which holds the locality_count_mutex. So check_locality() is also protected by the locality_count_mutex
> (which for this reason should probably rather be called locality_mutex since it protects both the "locality_count"
> and the "locality" variable).
>
> There is one other place check_locality() is called from, namely the interrupt handler. This is also the only
> place in which "locality" could be assigned another value than 0 (aka the default). In this case there
> is no lock, so this could indeed by racy.
>
> The solution I see for this is:
> 1. remove the entire loop that checks for the current locality, i.e. this code:
>
> if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
> for (i = 0; i < 5; i++)
> if (check_locality(chip, i))
> break;
>
> So we avoid "locality" from being changed to something that is not the default.
>
>

I wonder if we need tpm_tis_data->locality at all: the claimed locality is already tracked in
chip->locality and in TPM TIS we never use anything else than locality 0 so it never changes.

Is there any good reason not to remove it?



Regards,
Lino

2022-08-01 17:08:30

by Jarkko Sakkinen

[permalink] [raw]
Subject: Re: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality

On Thu, Jul 28, 2022 at 07:36:19PM +0200, Lino Sanfilippo wrote:
>
>
> On 04.07.22 19:45, Lino Sanfilippo wrote:
> >
> >
> > On 01.07.22 01:29, Jarkko Sakkinen wrote:
> >
> >>
> >> I'm kind of thinking that should tpm_tis_data have a lock for its
> >> contents?
> >
> > Most of the tpm_tis_data structure elements are set once during init and
> > then never changed but only read. So no need for locking for these. The
> > exceptions I see are
> >
> > - flags
> > - locality_count
> > - locality
> >
> >
> > whereby "flags" is accessed by atomic bit manipulating functions and thus
> > does not need extra locking. "locality_count" is protected by the locality_count_mutex.
> > "locality" is only set in check_locality() which is called from tpm_tis_request_locality_locked()
> > which holds the locality_count_mutex. So check_locality() is also protected by the locality_count_mutex
> > (which for this reason should probably rather be called locality_mutex since it protects both the "locality_count"
> > and the "locality" variable).
> >
> > There is one other place check_locality() is called from, namely the interrupt handler. This is also the only
> > place in which "locality" could be assigned another value than 0 (aka the default). In this case there
> > is no lock, so this could indeed by racy.
> >
> > The solution I see for this is:
> > 1. remove the entire loop that checks for the current locality, i.e. this code:
> >
> > if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
> > for (i = 0; i < 5; i++)
> > if (check_locality(chip, i))
> > break;
> >
> > So we avoid "locality" from being changed to something that is not the default.
> >
> >
>
> I wonder if we need tpm_tis_data->locality at all: the claimed locality is already tracked in
> chip->locality and in TPM TIS we never use anything else than locality 0 so it never changes.
>
> Is there any good reason not to remove it?

I think it would be a great idea to unify them.

BR, Jarkko

2022-10-08 17:36:41

by Lino Sanfilippo

[permalink] [raw]
Subject: Re: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality


Hi Jarkko,

On 28.07.22 at 17:45, Lino Sanfilippo wrote:

>>>
>>> This means switching to a complete new locking scheme which affects many
>>> parts of the TIS core code. It is also not directly related to what this patch series
>>> is about, namely activating the interrupts for TPM TIS.
>>>
>>> I suggest to first finish polishing this series especially since there have
>>> only been minor issues in the last versions. Once the interrupts work we
>>> still can think of implementing another lock handling in a follow up series.
>>
>> So what if you would use kref instead here?
>>
>
> Sure, that should not be too difficult to do. I will implement this for the next version.
>
> Regards,
> Lino
>

First of all, sorry for this very late reply. Unfortunately in the last weeks I was
not able to work further on this series due to my private situation.
Nevertheless I tried to implement your suggestion (using krefs for the locality counting)
meanwhile. However krefs turned out to be a rather bad fit for this task.

The reason is that for the locality handling we have to perform a certain action (i.e.
writing to the access register) on two occasions:

1. When the locality is requested while no locality is active
2. When the locality has been released the number of times it has been requested before

Since a kref is designed to track the lifetime of an object which is freed as soon as the
kref counter hits 0, it starts with a counter of 1 when it is created, not with a counter 0
(as we would need it, since at the beginning nothing has claimed the locality yet).
Furthermore while kref provides a built-in mechanism to execute a function when the counter
hits 0 it does not provide anything similar for the case that the counter is increased the
first time (i.e when we want to claim the locality by writing to the access register).

So although certainly doable I do not see much gain from using krefs in this case. Again,
sorry for this late reply.

Regards,
Lino