2009-07-20 22:27:27

by Linus Walleij

[permalink] [raw]
Subject: [PATCH 1/2] MMC Agressive clocking framework v5

This patch modified the MMC core code to optionally call the
set_ios() operation on the driver with the clock frequency set
to 0 (gate) after a grace period of at least 8 MCLK cycles, then
restore it (ungate) before any new request. This gives
the driver the option to shut down the hardware block clock and
thus (in known designs) the MCI clock to the MMC/SD card when
the clock frequency is 0, i.e. the core has stated that the MCI
clock does not need to be generated.

It is inspired by existing clock gating code found in the OMAP
and Atmel drivers and brings this up to the host abstraction.
Gating is performed before and after any MMC request.

It exemplifies by implementing this for the MMCI/PL180 MMC/SD
host controller, but it should be simple to switch OMAP and
Atmel over to using this instead.

Signed-off-by: Linus Walleij <[email protected]>
---
drivers/mmc/core/Kconfig | 11 +++
drivers/mmc/core/core.c | 39 +++++++++-
drivers/mmc/core/core.h | 2 +
drivers/mmc/core/debugfs.c | 10 ++-
drivers/mmc/core/host.c | 191 +++++++++++++++++++++++++++++++++++++++++++-
drivers/mmc/core/host.h | 4 +
include/linux/mmc/host.h | 10 +++
7 files changed, 263 insertions(+), 4 deletions(-)

diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index ab37a6d..5372fc9 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -14,3 +14,14 @@ config MMC_UNSAFE_RESUME
This option is usually just for embedded systems which use
a MMC/SD card for rootfs. Most people should say N here.

+config MMC_CLKGATE
+ bool "MMC host clock gaing (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ This will attempt to agressively gate the clock to the MMC host,
+ which typically also will gate the MCI clock to the card. This
+ is done to save power due to gating off the logic and bus noise
+ when MMC is not in use. Your host driver has to support this in
+ order for it to be of any use.
+
+ If unsure, say N.
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index d84c880..30a940b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -113,6 +113,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)

if (mrq->done)
mrq->done(mrq);
+
+ mmc_host_clk_gate(host);
}
}

@@ -173,6 +175,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
mrq->stop->mrq = mrq;
}
}
+ mmc_host_clk_ungate(host);
host->ops->request(host, mrq);
}

@@ -279,7 +282,7 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)

timeout_us = data->timeout_ns / 1000;
timeout_us += data->timeout_clks * 1000 /
- (card->host->ios.clock / 1000);
+ (mmc_host_clk_rate(card->host) / 1000);

if (data->flags & MMC_DATA_WRITE)
/*
@@ -447,6 +450,40 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
mmc_set_ios(host);
}

+#ifdef CONFIG_MMC_CLKGATE
+/*
+ * This gates the clock by setting it to 0 Hz.
+ */
+void mmc_gate_clock(struct mmc_host *host)
+{
+ host->clk_old = host->ios.clock;
+ host->ios.clock = 0;
+ host->clk_gated = true;
+ mmc_set_ios(host);
+}
+
+/*
+ * This restores the clock from gating by using the cached
+ * clock value.
+ */
+void mmc_ungate_clock(struct mmc_host *host)
+{
+ /*
+ * We should previously have gated the clock, so the clock
+ * shall be 0 here!
+ * The clock may however be 0 during intialization,
+ * when some request operations are performed before setting
+ * the frequency. When ungate is requested in that situation
+ * we just ignore the call.
+ */
+ if (host->clk_old) {
+ BUG_ON(host->ios.clock);
+ mmc_set_clock(host, host->clk_old);
+ }
+ host->clk_gated = false;
+}
+#endif
+
/*
* Change the bus mode (open drain/push-pull) of a host.
*/
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index c819eff..ee27f81 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -27,6 +27,8 @@ void mmc_detach_bus(struct mmc_host *host);

void mmc_set_chip_select(struct mmc_host *host, int mode);
void mmc_set_clock(struct mmc_host *host, unsigned int hz);
+void mmc_gate_clock(struct mmc_host *host);
+void mmc_ungate_clock(struct mmc_host *host);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 610dbd1..1a969bd 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -149,11 +149,17 @@ void mmc_add_host_debugfs(struct mmc_host *host)
host->debugfs_root = root;

if (!debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops))
- goto err_ios;
+ goto err_remove_files;
+
+#ifdef CONFIG_MMC_CLKGATE
+ if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
+ root, &host->clk_delay))
+ goto err_remove_files;
+#endif

return;

-err_ios:
+err_remove_files:
debugfs_remove_recursive(root);
host->debugfs_root = NULL;
err_root:
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 5e945e6..a046b2c 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright (C) 2007-2008 Pierre Ossman
+ * Copyright (C) 2009 Linus Walleij
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -48,6 +49,191 @@ void mmc_unregister_host_class(void)
static DEFINE_IDR(mmc_host_idr);
static DEFINE_SPINLOCK(mmc_host_lock);

+#ifdef CONFIG_MMC_CLKGATE
+
+/*
+ * Enabling clock gating will make the core call out to the host
+ * once up and once down when it performs a request or card operation
+ * intermingled in any fashion. The driver will see this through
+ * set_ios() operations with ios.clock field set to 0 to gate
+ * (disable) the block clock, and to the old frequency to enable
+ * it again.
+ */
+static void mmc_host_clk_gate_delayed(struct mmc_host *host)
+{
+ unsigned long tick_ns;
+ unsigned long freq = host->ios.clock;
+ unsigned long flags;
+ int users;
+
+ if (!freq) {
+ pr_err("%s: frequency set to 0 in disable function, "
+ "this means the clock is already disabled.\n",
+ mmc_hostname(host));
+ return;
+ }
+ /*
+ * New requests may have appeared while we were scheduling,
+ * then there is no reason to delay the check before
+ * clk_disable().
+ */
+ spin_lock_irqsave(&host->clk_lock, flags);
+ users = host->clk_requests;
+ /*
+ * Delay 8 bus cycles (from MMC spec) before attempting
+ * to disable the MMCI block clock. The reference count
+ * may have gone up again after this delay due to
+ * rescheduling!
+ */
+ if (!users) {
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ tick_ns = (1000000000 + freq - 1) / freq;
+ ndelay(host->clk_delay * tick_ns);
+ } else {
+ /* New users appeared while waiting for this work */
+ host->clk_pending_gate = false;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ return;
+ }
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (!host->clk_requests) {
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ /* this will set host->ios.clock to 0 */
+ mmc_gate_clock(host);
+ spin_lock_irqsave(&host->clk_lock, flags);
+#ifdef CONFIG_MMC_DEBUG
+ pr_debug("%s: disabled MCI clock\n",
+ mmc_hostname(host));
+#endif
+ }
+ host->clk_pending_gate = false;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+/*
+ * Internal work. Work to disable the clock at some later point.
+ */
+static void mmc_host_clk_gate_work(struct work_struct *work)
+{
+ struct mmc_host *host = container_of(work, struct mmc_host,
+ clk_disable_work);
+
+ mmc_host_clk_gate_delayed(host);
+}
+
+/*
+ * mmc_host_clk_ungate - make sure the host ios.clock is
+ * restored to some non-zero value past this call.
+ * @host: host to ungate.
+ *
+ * Increase clock reference count and ungate clock if first user.
+ */
+void mmc_host_clk_ungate(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (host->clk_gated) {
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mmc_ungate_clock(host);
+ spin_lock_irqsave(&host->clk_lock, flags);
+#ifdef CONFIG_MMC_DEBUG
+ pr_debug("%s: ungated MCI clock\n",
+ mmc_hostname(host));
+#endif
+ }
+ host->clk_requests++;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+/*
+ * mmc_host_clk_gate - call the host driver with ios.clock
+ * set to zero as often as possible so as to make it
+ * possible to gate off hardware MCI clocks.
+ * @host: host to gate.
+ *
+ * Decrease clock reference count and schedule disablement of clock.
+ */
+void mmc_host_clk_gate(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clk_requests--;
+ if (!host->clk_requests) {
+ host->clk_pending_gate = true;
+ schedule_work(&host->clk_disable_work);
+ }
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+/*
+ * mmc_host_clk_rate - get current clock frequency setting no matter
+ * whether it's gated or not.
+ * @host: host to get the clock frequency for.
+ */
+unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+ unsigned long freq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (host->clk_gated)
+ freq = host->clk_old;
+ else
+ freq = host->ios.clock;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ return freq;
+}
+
+/*
+ * mmc_host_clk_init - set up clock gating code
+ * @host: host with potential hardware clock to control
+ */
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+ host->clk_requests = 0;
+ host->clk_delay = 8; /* hold MCI clock in 8 cycles by default */
+ host->clk_gated = false;
+ host->clk_pending_gate = false;
+ INIT_WORK(&host->clk_disable_work, mmc_host_clk_gate_work);
+ spin_lock_init(&host->clk_lock);
+}
+
+/*
+ * mmc_host_clk_exit - shut down clock gating code
+ * @host: host with potential hardware clock to control
+ */
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+ if (cancel_work_sync(&host->clk_disable_work))
+ mmc_host_clk_gate_delayed(host);
+ BUG_ON(host->clk_requests > 0);
+}
+
+#else
+inline void mmc_host_clk_ungate(struct mmc_host *host)
+{
+}
+
+inline void mmc_host_clk_gate(struct mmc_host *host)
+{
+}
+
+inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+ return host->ios.clock;
+}
+
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+}
+#endif
+
/**
* mmc_alloc_host - initialise the per-host structure.
* @extra: sizeof private data structure
@@ -80,6 +266,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->class_dev.class = &mmc_host_class;
device_initialize(&host->class_dev);

+ mmc_host_clk_init(host);
+
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -156,6 +344,8 @@ void mmc_remove_host(struct mmc_host *host)
device_del(&host->class_dev);

led_trigger_unregister_simple(host->led);
+
+ mmc_host_clk_exit(host);
}

EXPORT_SYMBOL(mmc_remove_host);
@@ -176,4 +366,3 @@ void mmc_free_host(struct mmc_host *host)
}

EXPORT_SYMBOL(mmc_free_host);
-
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index c2dc3d2..81ff77b 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -10,9 +10,13 @@
*/
#ifndef _MMC_CORE_HOST_H
#define _MMC_CORE_HOST_H
+#include <linux/mmc/host.h>

int mmc_register_host_class(void);
void mmc_unregister_host_class(void);
+void mmc_host_clk_ungate(struct mmc_host *host);
+void mmc_host_clk_gate(struct mmc_host *host);
+unsigned int mmc_host_clk_rate(struct mmc_host *host);

#endif

diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 3e7615e..06cefe3 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -119,6 +119,16 @@ struct mmc_host {
#define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */
#define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */

+#ifdef CONFIG_MMC_CLKGATE
+ int clk_requests; /* internal reference counter */
+ unsigned int clk_delay; /* number of MCI clk hold cycles */
+ bool clk_gated; /* clock gated */
+ bool clk_pending_gate; /* pending clock gating */
+ struct work_struct clk_disable_work; /* delayed clock disablement */
+ unsigned int clk_old; /* old clock value cache */
+ spinlock_t clk_lock; /* lock for clk fields */
+#endif
+
/* host specific block data */
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
unsigned short max_hw_segs; /* see blk_queue_max_hw_segments */
--
1.6.2.5


2009-07-21 20:28:28

by Linus Walleij

[permalink] [raw]
Subject: Re: [PATCH 1/2] MMC Agressive clocking framework v5

Then the ChangeLog I missed yesterday due to sitting up too
late:

ChangeLog v4->v5:

* Make mmc_set_data_timeout() in core.c work even if ios.clock is
set to zero by the clock gating code.

* Give the host-centric functions the mmc_host_clk* prefix instead
so it is clear that it's host functionality that is being
called.

* Mailing this to Andrew Morton and LKML since the previous MMC
maintainer stepped down.

Linus Walleij

2009-07-21 20:36:09

by Marek Vasut

[permalink] [raw]
Subject: Re: [PATCH 1/2] MMC Agressive clocking framework v5

Dne Út 21. července 2009 22:28:25 Linus Walleij napsal(a):
> Then the ChangeLog I missed yesterday due to sitting up too
> late:
>
> ChangeLog v4->v5:
>
> * Make mmc_set_data_timeout() in core.c work even if ios.clock is
> set to zero by the clock gating code.
>
> * Give the host-centric functions the mmc_host_clk* prefix instead
> so it is clear that it's host functionality that is being
> called.
>
> * Mailing this to Andrew Morton and LKML since the previous MMC
> maintainer stepped down.
Pierre gave up on mmc subsystem maintainance ?
>
> Linus Walleij
>
> -------------------------------------------------------------------
> List admin: http://lists.arm.linux.org.uk/mailman/listinfo/linux-arm-kernel
> FAQ: http://www.arm.linux.org.uk/mailinglists/faq.php
> Etiquette: http://www.arm.linux.org.uk/mailinglists/etiquette.php

2009-07-21 22:44:21

by Linus Walleij

[permalink] [raw]
Subject: Re: [PATCH 1/2] MMC Agressive clocking framework v5

2009/7/21 Marek Vasut <[email protected]>:
>> * Mailing this to Andrew Morton and LKML since the previous MMC
>> ? maintainer stepped down.

> Pierre gave up on mmc subsystem maintainance ?

Sadly yes:
http://lkml.org/lkml/2009/7/14/134

:-(

Linus Walleij

2009-07-22 00:47:18

by Madhusudhan

[permalink] [raw]
Subject: RE: [PATCH 1/2] MMC Agressive clocking framework v5



> -----Original Message-----
> From: [email protected] [mailto:linux-arm-
> [email protected]] On Behalf Of Linus Walleij
> Sent: Monday, July 20, 2009 5:27 PM
> To: Andrew Morton; [email protected]
> Cc: Pierre Ossman; [email protected]; Linus Walleij
> Subject: [PATCH 1/2] MMC Agressive clocking framework v5
>
> This patch modified the MMC core code to optionally call the
> set_ios() operation on the driver with the clock frequency set
> to 0 (gate) after a grace period of at least 8 MCLK cycles, then
> restore it (ungate) before any new request. This gives
> the driver the option to shut down the hardware block clock and
> thus (in known designs) the MCI clock to the MMC/SD card when
> the clock frequency is 0, i.e. the core has stated that the MCI
> clock does not need to be generated.
>
> It is inspired by existing clock gating code found in the OMAP
> and Atmel drivers and brings this up to the host abstraction.
> Gating is performed before and after any MMC request.
>
> It exemplifies by implementing this for the MMCI/PL180 MMC/SD
> host controller, but it should be simple to switch OMAP and
> Atmel over to using this instead.
>
> Signed-off-by: Linus Walleij <[email protected]>
> ---
> drivers/mmc/core/Kconfig | 11 +++
> drivers/mmc/core/core.c | 39 +++++++++-
> drivers/mmc/core/core.h | 2 +
> drivers/mmc/core/debugfs.c | 10 ++-
> drivers/mmc/core/host.c | 191
> +++++++++++++++++++++++++++++++++++++++++++-
> drivers/mmc/core/host.h | 4 +
> include/linux/mmc/host.h | 10 +++
> 7 files changed, 263 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
> index ab37a6d..5372fc9 100644
> --- a/drivers/mmc/core/Kconfig
> +++ b/drivers/mmc/core/Kconfig
> @@ -14,3 +14,14 @@ config MMC_UNSAFE_RESUME
> This option is usually just for embedded systems which use
> a MMC/SD card for rootfs. Most people should say N here.
>
> +config MMC_CLKGATE
> + bool "MMC host clock gaing (EXPERIMENTAL)"
> + depends on EXPERIMENTAL
> + help
> + This will attempt to agressively gate the clock to the MMC host,
> + which typically also will gate the MCI clock to the card. This
> + is done to save power due to gating off the logic and bus noise
> + when MMC is not in use. Your host driver has to support this in
> + order for it to be of any use.
> +
> + If unsure, say N.
> diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
> index d84c880..30a940b 100644
> --- a/drivers/mmc/core/core.c
> +++ b/drivers/mmc/core/core.c
> @@ -113,6 +113,8 @@ void mmc_request_done(struct mmc_host *host, struct
> mmc_request *mrq)
>
> if (mrq->done)
> mrq->done(mrq);
> +
> + mmc_host_clk_gate(host);
> }
> }
>
> @@ -173,6 +175,7 @@ mmc_start_request(struct mmc_host *host, struct
> mmc_request *mrq)
> mrq->stop->mrq = mrq;
> }
> }
> + mmc_host_clk_ungate(host);
> host->ops->request(host, mrq);
> }
>
> @@ -279,7 +282,7 @@ void mmc_set_data_timeout(struct mmc_data *data, const
> struct mmc_card *card)
>
> timeout_us = data->timeout_ns / 1000;
> timeout_us += data->timeout_clks * 1000 /
> - (card->host->ios.clock / 1000);
> + (mmc_host_clk_rate(card->host) / 1000);
>
> if (data->flags & MMC_DATA_WRITE)
> /*
> @@ -447,6 +450,40 @@ void mmc_set_clock(struct mmc_host *host, unsigned
> int hz)
> mmc_set_ios(host);
> }
>
> +#ifdef CONFIG_MMC_CLKGATE
> +/*
> + * This gates the clock by setting it to 0 Hz.
> + */
> +void mmc_gate_clock(struct mmc_host *host)
> +{
> + host->clk_old = host->ios.clock;
> + host->ios.clock = 0;
> + host->clk_gated = true;
> + mmc_set_ios(host);
> +}
> +
> +/*
> + * This restores the clock from gating by using the cached
> + * clock value.
> + */
> +void mmc_ungate_clock(struct mmc_host *host)
> +{
> + /*
> + * We should previously have gated the clock, so the clock
> + * shall be 0 here!
> + * The clock may however be 0 during intialization,
> + * when some request operations are performed before setting
> + * the frequency. When ungate is requested in that situation
> + * we just ignore the call.
> + */
> + if (host->clk_old) {
> + BUG_ON(host->ios.clock);

NAK for the BUG_ON here.

This could be hit potentially when the MMC core bumps up the frequency from
400K to whatever the card supports in init phase. Look at the init sequence
in MMC core. The clocks could be gated after reading the CSD or EXT_CSD to
determine the max_dtr. Further the MMC core sets up the ios.clock to max_dtr
(before issuing the next req) hence clk_old could be 400K and ios.clock
could be max_dtr. IMHO hitting BUG_ON here could be incorrect.

> + mmc_set_clock(host, host->clk_old);
> + }
> + host->clk_gated = false;
> +}
> +#endif
> +
> /*
> * Change the bus mode (open drain/push-pull) of a host.
> */
> diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
> index c819eff..ee27f81 100644
> --- a/drivers/mmc/core/core.h
> +++ b/drivers/mmc/core/core.h
> @@ -27,6 +27,8 @@ void mmc_detach_bus(struct mmc_host *host);
>
> void mmc_set_chip_select(struct mmc_host *host, int mode);
> void mmc_set_clock(struct mmc_host *host, unsigned int hz);
> +void mmc_gate_clock(struct mmc_host *host);
> +void mmc_ungate_clock(struct mmc_host *host);
> void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
> void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
> u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
> diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
> index 610dbd1..1a969bd 100644
> --- a/drivers/mmc/core/debugfs.c
> +++ b/drivers/mmc/core/debugfs.c
> @@ -149,11 +149,17 @@ void mmc_add_host_debugfs(struct mmc_host *host)
> host->debugfs_root = root;
>
> if (!debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops))
> - goto err_ios;
> + goto err_remove_files;
> +
> +#ifdef CONFIG_MMC_CLKGATE
> + if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
> + root, &host->clk_delay))
> + goto err_remove_files;
> +#endif
>
> return;
>
> -err_ios:
> +err_remove_files:
> debugfs_remove_recursive(root);
> host->debugfs_root = NULL;
> err_root:
> diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
> index 5e945e6..a046b2c 100644
> --- a/drivers/mmc/core/host.c
> +++ b/drivers/mmc/core/host.c
> @@ -3,6 +3,7 @@
> *
> * Copyright (C) 2003 Russell King, All Rights Reserved.
> * Copyright (C) 2007-2008 Pierre Ossman
> + * Copyright (C) 2009 Linus Walleij
> *
> * This program is free software; you can redistribute it and/or modify
> * it under the terms of the GNU General Public License version 2 as
> @@ -48,6 +49,191 @@ void mmc_unregister_host_class(void)
> static DEFINE_IDR(mmc_host_idr);
> static DEFINE_SPINLOCK(mmc_host_lock);
>
> +#ifdef CONFIG_MMC_CLKGATE
> +
> +/*
> + * Enabling clock gating will make the core call out to the host
> + * once up and once down when it performs a request or card operation
> + * intermingled in any fashion. The driver will see this through
> + * set_ios() operations with ios.clock field set to 0 to gate
> + * (disable) the block clock, and to the old frequency to enable
> + * it again.
> + */
> +static void mmc_host_clk_gate_delayed(struct mmc_host *host)
> +{
> + unsigned long tick_ns;
> + unsigned long freq = host->ios.clock;
> + unsigned long flags;
> + int users;
> +
> + if (!freq) {
> + pr_err("%s: frequency set to 0 in disable function, "
> + "this means the clock is already disabled.\n",
> + mmc_hostname(host));
> + return;
> + }
> + /*
> + * New requests may have appeared while we were scheduling,
> + * then there is no reason to delay the check before
> + * clk_disable().
> + */
> + spin_lock_irqsave(&host->clk_lock, flags);
> + users = host->clk_requests;
> + /*
> + * Delay 8 bus cycles (from MMC spec) before attempting
> + * to disable the MMCI block clock. The reference count
> + * may have gone up again after this delay due to
> + * rescheduling!
> + */
> + if (!users) {
> + spin_unlock_irqrestore(&host->clk_lock, flags);
> + tick_ns = (1000000000 + freq - 1) / freq;
> + ndelay(host->clk_delay * tick_ns);
> + } else {
> + /* New users appeared while waiting for this work */
> + host->clk_pending_gate = false;
> + spin_unlock_irqrestore(&host->clk_lock, flags);
> + return;
> + }
> + spin_lock_irqsave(&host->clk_lock, flags);
> + if (!host->clk_requests) {
> + spin_unlock_irqrestore(&host->clk_lock, flags);
> + /* this will set host->ios.clock to 0 */
> + mmc_gate_clock(host);
> + spin_lock_irqsave(&host->clk_lock, flags);
> +#ifdef CONFIG_MMC_DEBUG
> + pr_debug("%s: disabled MCI clock\n",
> + mmc_hostname(host));
> +#endif
> + }
> + host->clk_pending_gate = false;
> + spin_unlock_irqrestore(&host->clk_lock, flags);
> +}
> +
> +/*
> + * Internal work. Work to disable the clock at some later point.
> + */
> +static void mmc_host_clk_gate_work(struct work_struct *work)
> +{
> + struct mmc_host *host = container_of(work, struct mmc_host,
> + clk_disable_work);
> +
> + mmc_host_clk_gate_delayed(host);
> +}
> +
> +/*
> + * mmc_host_clk_ungate - make sure the host ios.clock is
> + * restored to some non-zero value past this call.
> + * @host: host to ungate.
> + *
> + * Increase clock reference count and ungate clock if first user.
> + */
> +void mmc_host_clk_ungate(struct mmc_host *host)
> +{
> + unsigned long flags;
> +
> + spin_lock_irqsave(&host->clk_lock, flags);
> + if (host->clk_gated) {
> + spin_unlock_irqrestore(&host->clk_lock, flags);
> + mmc_ungate_clock(host);
> + spin_lock_irqsave(&host->clk_lock, flags);
> +#ifdef CONFIG_MMC_DEBUG
> + pr_debug("%s: ungated MCI clock\n",
> + mmc_hostname(host));
> +#endif
> + }
> + host->clk_requests++;
> + spin_unlock_irqrestore(&host->clk_lock, flags);
> +}
> +
> +/*
> + * mmc_host_clk_gate - call the host driver with ios.clock
> + * set to zero as often as possible so as to make it
> + * possible to gate off hardware MCI clocks.
> + * @host: host to gate.
> + *
> + * Decrease clock reference count and schedule disablement of clock.
> + */
> +void mmc_host_clk_gate(struct mmc_host *host)
> +{
> + unsigned long flags;
> +
> + spin_lock_irqsave(&host->clk_lock, flags);
> + host->clk_requests--;
> + if (!host->clk_requests) {
> + host->clk_pending_gate = true;
> + schedule_work(&host->clk_disable_work);
> + }
> + spin_unlock_irqrestore(&host->clk_lock, flags);
> +}
> +
> +/*
> + * mmc_host_clk_rate - get current clock frequency setting no matter
> + * whether it's gated or not.
> + * @host: host to get the clock frequency for.
> + */
> +unsigned int mmc_host_clk_rate(struct mmc_host *host)
> +{
> + unsigned long freq;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&host->clk_lock, flags);
> + if (host->clk_gated)
> + freq = host->clk_old;
> + else
> + freq = host->ios.clock;
> + spin_unlock_irqrestore(&host->clk_lock, flags);
> + return freq;
> +}
> +
> +/*
> + * mmc_host_clk_init - set up clock gating code
> + * @host: host with potential hardware clock to control
> + */
> +static inline void mmc_host_clk_init(struct mmc_host *host)
> +{
> + host->clk_requests = 0;
> + host->clk_delay = 8; /* hold MCI clock in 8 cycles by default */
> + host->clk_gated = false;
> + host->clk_pending_gate = false;
> + INIT_WORK(&host->clk_disable_work, mmc_host_clk_gate_work);
> + spin_lock_init(&host->clk_lock);
> +}
> +
> +/*
> + * mmc_host_clk_exit - shut down clock gating code
> + * @host: host with potential hardware clock to control
> + */
> +static inline void mmc_host_clk_exit(struct mmc_host *host)
> +{
> + if (cancel_work_sync(&host->clk_disable_work))
> + mmc_host_clk_gate_delayed(host);
> + BUG_ON(host->clk_requests > 0);
> +}
> +
> +#else
> +inline void mmc_host_clk_ungate(struct mmc_host *host)
> +{
> +}
> +
> +inline void mmc_host_clk_gate(struct mmc_host *host)
> +{
> +}
> +
> +inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
> +{
> + return host->ios.clock;
> +}
> +
> +static inline void mmc_host_clk_init(struct mmc_host *host)
> +{
> +}
> +
> +static inline void mmc_host_clk_exit(struct mmc_host *host)
> +{
> +}
> +#endif
> +
> /**
> * mmc_alloc_host - initialise the per-host structure.
> * @extra: sizeof private data structure
> @@ -80,6 +266,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct
> device *dev)
> host->class_dev.class = &mmc_host_class;
> device_initialize(&host->class_dev);
>
> + mmc_host_clk_init(host);
> +
> spin_lock_init(&host->lock);
> init_waitqueue_head(&host->wq);
> INIT_DELAYED_WORK(&host->detect, mmc_rescan);
> @@ -156,6 +344,8 @@ void mmc_remove_host(struct mmc_host *host)
> device_del(&host->class_dev);
>
> led_trigger_unregister_simple(host->led);
> +
> + mmc_host_clk_exit(host);
> }
>
> EXPORT_SYMBOL(mmc_remove_host);
> @@ -176,4 +366,3 @@ void mmc_free_host(struct mmc_host *host)
> }
>
> EXPORT_SYMBOL(mmc_free_host);
> -
> diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
> index c2dc3d2..81ff77b 100644
> --- a/drivers/mmc/core/host.h
> +++ b/drivers/mmc/core/host.h
> @@ -10,9 +10,13 @@
> */
> #ifndef _MMC_CORE_HOST_H
> #define _MMC_CORE_HOST_H
> +#include <linux/mmc/host.h>
>
> int mmc_register_host_class(void);
> void mmc_unregister_host_class(void);
> +void mmc_host_clk_ungate(struct mmc_host *host);
> +void mmc_host_clk_gate(struct mmc_host *host);
> +unsigned int mmc_host_clk_rate(struct mmc_host *host);
>
> #endif
>
> diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
> index 3e7615e..06cefe3 100644
> --- a/include/linux/mmc/host.h
> +++ b/include/linux/mmc/host.h
> @@ -119,6 +119,16 @@ struct mmc_host {
> #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-
> detection */
> #define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit
> transfers */
>
> +#ifdef CONFIG_MMC_CLKGATE
> + int clk_requests; /* internal reference
counter */
> + unsigned int clk_delay; /* number of MCI clk hold
cycles
> */
> + bool clk_gated; /* clock gated */
> + bool clk_pending_gate; /* pending clock gating */
> + struct work_struct clk_disable_work; /* delayed clock
> disablement */
> + unsigned int clk_old; /* old clock value cache */
> + spinlock_t clk_lock; /* lock for clk fields */
> +#endif
> +
> /* host specific block data */
> unsigned int max_seg_size; /* see
> blk_queue_max_segment_size */
> unsigned short max_hw_segs; /* see
> blk_queue_max_hw_segments */
> --
> 1.6.2.5
>
>
> -------------------------------------------------------------------
> List admin: http://lists.arm.linux.org.uk/mailman/listinfo/linux-arm-
> kernel
> FAQ: http://www.arm.linux.org.uk/mailinglists/faq.php
> Etiquette: http://www.arm.linux.org.uk/mailinglists/etiquette.php

2009-07-22 12:12:14

by David Vrabel

[permalink] [raw]
Subject: Re: [PATCH 1/2] MMC Agressive clocking framework v5

Linus Walleij wrote:
> This patch modified the MMC core code to optionally call the
> set_ios() operation on the driver with the clock frequency set
> to 0 (gate) after a grace period of at least 8 MCLK cycles, then
> restore it (ungate) before any new request. This gives
> the driver the option to shut down the hardware block clock and
> thus (in known designs) the MCI clock to the MMC/SD card when
> the clock frequency is 0, i.e. the core has stated that the MCI
> clock does not need to be generated.

I'm not sure this is the right approach.

1. With some controllers (e.g., PXA270 I think) turning the clock on and
off is slow. This means if you're doing back-to-back commands you
should leave the clock on for best performance. I think there needs to
be a higher level active/idle knob for the user of the card (be it the
block driver or an SDIO function driver) to control whether to idle the
bus clock or controller.

2. Some controllers cannot detect SDIO interrupts if the clock is
stopped. There should either be a distinction between clock off and
clock idle.

3. Regardless of point 1 above. Using a workqueue item in this way
seems overkill. Consider using a timer and simply calling mod_timer()
at the start of every command. When the timer expires, idle the clock.
You will probably need a "command in progress" bit to ensure you don't
idle the clock if the timer expires in the middle of a command.

David
--
David Vrabel, Senior Software Engineer, Drivers
CSR, Churchill House, Cambridge Business Park, Tel: +44 (0)1223 692562
Cowley Road, Cambridge, CB4 0WZ http://www.csr.com/


'member of the CSR plc group of companies. CSR plc registered in England and Wales, registered number 4187346, registered office Churchill House, Cambridge Business Park, Cowley Road, Cambridge, CB4 0WZ, United Kingdom'

2009-07-22 22:33:15

by Linus Walleij

[permalink] [raw]
Subject: Re: [PATCH 1/2] MMC Agressive clocking framework v5

2009/7/22 David Vrabel <[email protected]>:

> Linus Walleij wrote:

> I'm not sure this is the right approach.

It's been discussed a bit back and forth for a few months, but let's keep
at it...

> 1. With some controllers (e.g., PXA270 I think) turning the clock on and
> off is slow. ?This means if you're doing back-to-back commands you
> should leave the clock on for best performance.

OK, when I've been testing this using the default workqueue and
schedule_work() covered these cases. Back-on-back commands
seemingly doesn't allow the timeout work to schedule, but I might be
overseeing the case of several CPU:s there though :-/

>?I think there needs to
> be a higher level active/idle knob for the user of the card (be it the
> block driver or an SDIO function driver) to control whether to idle the
> bus clock or controller.

The code doesn't tell the driver to idle the controller, it sets ios.clock
to zero to give the host driver the *opportunity* to turn controller clocks
off when it's OK from the MMC spec to do so (after 8 MCI cycles), the
driver doesn't *have* to do that. It can add addtional logic for different
HW. So it's only about the bus clock (whereas my patch to mmci.c takes
advantage of the possibility to also gate the block clock).

> 2. Some controllers cannot detect SDIO interrupts if the clock is
> stopped. ?There should either be a distinction between clock off and
> clock idle.

This is on the driver level, not in the core. The core doesn't tell whether
to clock off or not, it only tells the device driver that the MCI clk doesn't
need to run by setting it to 0. Clearly, some hardware cannot exploit
that, but some (like PL180, OMAP and Atmel) can, easily.

There can possibly also be HW that can turn of MCI clk but not the
clock to the HW block itself, that is fine with the implementation.

> 3. Regardless of point 1 above. ?Using a workqueue item in this way
> seems overkill. ?Consider using a timer and simply calling mod_timer()
> at the start of every command. ?When the timer expires, idle the clock.
> ?You will probably need a "command in progress" bit to ensure you don't
> idle the clock if the timer expires in the middle of a command.

I would agree if I created a new workqueue, but the timeout of this
particular workqueue is unimportant and that's why I'm using the
global workqueue and just schedule_work(). This means no extra
overhead, no extra thread and basically does the exact same thing.

But I could experiment with switching that for a timer if I get time
at my hands, so point taken.

Linus Walleij

2009-07-23 14:17:39

by David Vrabel

[permalink] [raw]
Subject: Re: [PATCH 1/2] MMC Agressive clocking framework v5

Linus Walleij wrote:
> 2009/7/22 David Vrabel <[email protected]>:
>
>> Linus Walleij wrote:
>
>> I'm not sure this is the right approach.
>
> It's been discussed a bit back and forth for a few months, but let's keep
> at it...
>
>> 1. With some controllers (e.g., PXA270 I think) turning the clock on and
>> off is slow. This means if you're doing back-to-back commands you
>> should leave the clock on for best performance.
>
> OK, when I've been testing this using the default workqueue and
> schedule_work() covered these cases. Back-on-back commands
> seemingly doesn't allow the timeout work to schedule, but I might be
> overseeing the case of several CPU:s there though :-/

Ok, with a configurable timeout your scheme is fine. The timeout can be
extended beyond 8 SDCLKs if this is beneficial.

>> I think there needs to
>> be a higher level active/idle knob for the user of the card (be it the
>> block driver or an SDIO function driver) to control whether to idle the
>> bus clock or controller.
>
> The code doesn't tell the driver to idle the controller, it sets ios.clock
> to zero to give the host driver the *opportunity* to turn controller clocks
> off when it's OK from the MMC spec to do so (after 8 MCI cycles), the
> driver doesn't *have* to do that. It can add addtional logic for different
> HW. So it's only about the bus clock (whereas my patch to mmci.c takes
> advantage of the possibility to also gate the block clock).

Many host controllers do stop the clock when ios->clock == 0 (pxamci and
sdhci, for example). Therefore. your patch will stop SDIO cards from
working on (at least) PXA27x platforms.

There needs to be three different bus states: active (max clock), idle
(minimal clock, SDIO interrupts work), and off (minimal clocks, SDIO
interrupts not required).

I'm not sure what the best way to add this would be. You could:

1. Have a special clock frequency to mean idle and fix up all existing
controller drivers to interpret this as 400 kHz unless you know the
controller handles SDIO interrupts with no SDCLK.

or:

2. Add an additional controller method (set_bus_state?) and only provide
this on controller drivers you're interested in.

>> 2. Some controllers cannot detect SDIO interrupts if the clock is
>> stopped. There should either be a distinction between clock off and
>> clock idle.
>
> This is on the driver level, not in the core. The core doesn't tell whether
> to clock off or not, it only tells the device driver that the MCI clk doesn't
> need to run by setting it to 0. Clearly, some hardware cannot exploit
> that, but some (like PL180, OMAP and Atmel) can, easily.

See previous comment.

> There can possibly also be HW that can turn of MCI clk but not the
> clock to the HW block itself, that is fine with the implementation.

Ok.

>> 3. Regardless of point 1 above. Using a workqueue item in this way
>> seems overkill. Consider using a timer and simply calling mod_timer()
>> at the start of every command. When the timer expires, idle the clock.
>> You will probably need a "command in progress" bit to ensure you don't
>> idle the clock if the timer expires in the middle of a command.
>
> I would agree if I created a new workqueue, but the timeout of this
> particular workqueue is unimportant and that's why I'm using the
> global workqueue and just schedule_work(). This means no extra
> overhead, no extra thread and basically does the exact same thing.

You currently queue a work item and wake the workqueue every command.
This is considerably more overhead (when doing back-to-back commands)
than simply calling mod_timer().

You also potentially delay for a considerable amount of time in the work
item.

David
--
David Vrabel, Senior Software Engineer, Drivers
CSR, Churchill House, Cambridge Business Park, Tel: +44 (0)1223 692562
Cowley Road, Cambridge, CB4 0WZ http://www.csr.com/


'member of the CSR plc group of companies. CSR plc registered in England and Wales, registered number 4187346, registered office Churchill House, Cambridge Business Park, Cowley Road, Cambridge, CB4 0WZ, United Kingdom'

2009-07-23 18:16:41

by Adrian Hunter

[permalink] [raw]
Subject: Re: [PATCH 1/2] MMC Agressive clocking framework v5

Hi

Have you considered using our enable/disable approach instead?

It is here:

http://groups.google.com/group/linux.kernel/browse_frm/thread/a5caec1de1274be5/2ba7dee2a187a268

Perhaps it meets your needs, whereas your patches do not seem
to cover the functionality we need.

Regards
Adrian Hunter

2009-07-23 20:25:23

by Linus Walleij

[permalink] [raw]
Subject: Re: [PATCH 1/2] MMC Agressive clocking framework v5

2009/7/23 Adrian Hunter <[email protected]>:

> Have you considered using our enable/disable approach instead?

Didn't see that before, still an ran these patches by Tony... But
yes, I have *indeed* considered that approach and my first patch
was a lot like your, but at the time Pierre said:

> I'm not sure I agree with the design here. What's wrong with using the
> set_ios() callback and the normal clock field?

So I switched to using ios.clock instead, and that's what's been iterated.

That said, *I* am perfectly happy with your patch, so if you send that to
Andrew I will happily drop my patch and adapt the MMCI driver for
your stuff instead if that goes in.

Yours,
Linus Walleij

2009-07-23 20:30:43

by Linus Walleij

[permalink] [raw]
Subject: Re: [PATCH 1/2] MMC Agressive clocking framework v5

2009/7/23 David Vrabel <[email protected]>:
> Linus Walleij wrote:
>> 2009/7/22 David Vrabel <[email protected]>:

>>> 1. With some controllers (e.g., PXA270 I think) turning the clock on and
>>> off is slow. ?This means if you're doing back-to-back commands you
>>> should leave the clock on for best performance.
>>
>> OK, when I've been testing this using the default workqueue and
>> schedule_work() covered these cases. Back-on-back commands
>> seemingly doesn't allow the timeout work to schedule, but I might be
>> overseeing the case of several CPU:s there though :-/
>
> Ok, with a configurable timeout your scheme is fine. ?The timeout can be
> extended beyond 8 SDCLKs if this is beneficial.

Yep I have that in debugfs, but when I look at Adrians code I see he instead
added a disable delay field to the host struct so let's use his patch
instead then.

> I'm not sure what the best way to add this would be. ?You could:
>
> 1. Have a special clock frequency to mean idle and fix up all existing
> controller drivers to interpret this as 400 kHz unless you know the
> controller handles SDIO interrupts with no SDCLK.
>
> or:
>
> 2. Add an additional controller method (set_bus_state?) and only provide
> this on controller drivers you're interested in.

As discussed with Adrian this is what his patch does (adding a new host->ops
function for enable/disable) so let's use his patch.

>>> 3. Regardless of point 1 above. ?Using a workqueue item in this way
>>> seems overkill. ?Consider using a timer and simply calling mod_timer()
>>> at the start of every command. ?When the timer expires, idle the clock.
>>> ?You will probably need a "command in progress" bit to ensure you don't
>>> idle the clock if the timer expires in the middle of a command.
>>
>> I would agree if I created a new workqueue, but the timeout of this
>> particular workqueue is unimportant and that's why I'm using the
>> global workqueue and just schedule_work(). This means no extra
>> overhead, no extra thread and basically does the exact same thing.
>
> You currently queue a work item and wake the workqueue every command.
> This is considerably more overhead (when doing back-to-back commands)
> than simply calling mod_timer().
>
> You also potentially delay for a considerable amount of time in the work
> item.

Yep that's the idea almost... But let's raise the timer debate again with
Adrian's patch instead :-)

Linus Walleij

2009-07-24 22:03:40

by Madhusudhan

[permalink] [raw]
Subject: RE: [PATCH 1/2] MMC Agressive clocking framework v5

Hi Adrian,

The patch #28 "The omap hsmmc driver code refactoring" in the series does
not seem to apply.

Regards,
Madhu

> -----Original Message-----
> From: Adrian Hunter [mailto:[email protected]]
> Sent: Thursday, July 23, 2009 1:16 PM
> To: Linus Walleij
> Cc: Linus Walleij; Andrew Morton; [email protected]; Pierre
> Ossman; [email protected]; David Vrabel;
> [email protected]; Madhusudhan Chikkature
> Subject: Re: [PATCH 1/2] MMC Agressive clocking framework v5
>
> Hi
>
> Have you considered using our enable/disable approach instead?
>
> It is here:
>
> http://groups.google.com/group/linux.kernel/browse_frm/thread/a5caec1de127
> 4be5/2ba7dee2a187a268
>
> Perhaps it meets your needs, whereas your patches do not seem
> to cover the functionality we need.
>
> Regards
> Adrian Hunter

2009-07-24 23:00:20

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH 1/2] MMC Agressive clocking framework v5

On Fri, 24 Jul 2009 17:03:01 -0500
"Madhusudhan" <[email protected]> wrote:

>
> > -----Original Message-----
> > From: Adrian Hunter [mailto:[email protected]]
> > Sent: Thursday, July 23, 2009 1:16 PM
> > To: Linus Walleij
> > Cc: Linus Walleij; Andrew Morton; [email protected]; Pierre
> > Ossman; [email protected]; David Vrabel;
> > [email protected]; Madhusudhan Chikkature
> > Subject: Re: [PATCH 1/2] MMC Agressive clocking framework v5
> >
> > Hi
> >
> > Have you considered using our enable/disable approach instead?
> >
> > It is here:
> >
> > http://groups.google.com/group/linux.kernel/browse_frm/thread/a5caec1de127
> > 4be5/2ba7dee2a187a268
> >
> > Perhaps it meets your needs, whereas your patches do not seem
> > to cover the functionality we need.
> >
>
> The patch #28 "The omap hsmmc driver code refactoring" in the series does
> not seem to apply.

(top-posting repaired - please don't!)

I got one reject when applying that patch, due to
mmc-register-mmci-omap-hs-using-platform_driver_probe.patch. It was
simple to fix that up.

2009-07-25 11:35:26

by Adrian Hunter

[permalink] [raw]
Subject: Re: [PATCH 1/2] MMC Agressive clocking framework v5

Madhusudhan wrote:
> Hi Adrian,
>
> The patch #28 "The omap hsmmc driver code refactoring" in the series does
> not seem to apply.

Yes in my mailbox too, it has tabs replaced by spaces.
Here is another copy.

>
> Regards,
> Madhu
>
>> -----Original Message-----
>> From: Adrian Hunter [mailto:[email protected]]
>> Sent: Thursday, July 23, 2009 1:16 PM
>> To: Linus Walleij
>> Cc: Linus Walleij; Andrew Morton; [email protected]; Pierre
>> Ossman; [email protected]; David Vrabel;
>> [email protected]; Madhusudhan Chikkature
>> Subject: Re: [PATCH 1/2] MMC Agressive clocking framework v5
>>
>> Hi
>>
>> Have you considered using our enable/disable approach instead?
>>
>> It is here:
>>
>> http://groups.google.com/group/linux.kernel/browse_frm/thread/a5caec1de127
>> 4be5/2ba7dee2a187a268
>>
>> Perhaps it meets your needs, whereas your patches do not seem
>> to cover the functionality we need.
>>
>> Regards
>> Adrian Hunter
>
>
>

>From 10908c5264a19de415af6406ee19ef2bd68928c6 Mon Sep 17 00:00:00 2001
From: Denis Karpov <[email protected]>
Date: Mon, 18 May 2009 13:29:18 +0300
Subject: [PATCH] omap_hsmmc: code refactoring

Functions', structures', variables' names are changed to start
with omap_hsmmc_ prefix.

Signed-off-by: Denis Karpov <[email protected]>
Signed-off-by: Adrian Hunter <[email protected]>
---
drivers/mmc/host/omap_hsmmc.c | 322 +++++++++++++++++++++--------------------
1 files changed, 162 insertions(+), 160 deletions(-)

diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index fa5f401..5055d52 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -133,7 +133,7 @@
#define OMAP_HSMMC_WRITE(base, reg, val) \
__raw_writel((val), (base) + OMAP_HSMMC_##reg)

-struct mmc_omap_host {
+struct omap_hsmmc_host {
struct device *dev;
struct mmc_host *mmc;
struct mmc_request *mrq;
@@ -170,7 +170,7 @@ struct mmc_omap_host {
/*
* Stop clock to the card
*/
-static void omap_mmc_stop_clock(struct mmc_omap_host *host)
+static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
{
OMAP_HSMMC_WRITE(host->base, SYSCTL,
OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
@@ -184,7 +184,7 @@ static void omap_mmc_stop_clock(struct mmc_omap_host *host)
* Restore the MMC host context, if it was lost as result of a
* power state change.
*/
-static int omap_mmc_restore_ctx(struct mmc_omap_host *host)
+static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
{
struct mmc_ios *ios = &host->mmc->ios;
struct omap_mmc_platform_data *pdata = host->pdata;
@@ -312,7 +312,7 @@ out:
/*
* Save the MMC host context (store the number of power state changes so far).
*/
-static void omap_mmc_save_ctx(struct mmc_omap_host *host)
+static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
{
struct omap_mmc_platform_data *pdata = host->pdata;
int context_loss;
@@ -327,12 +327,12 @@ static void omap_mmc_save_ctx(struct mmc_omap_host *host)

#else

-static int omap_mmc_restore_ctx(struct mmc_omap_host *host)
+static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
{
return 0;
}

-static void omap_mmc_save_ctx(struct mmc_omap_host *host)
+static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
{
}

@@ -342,7 +342,7 @@ static void omap_mmc_save_ctx(struct mmc_omap_host *host)
* Send init stream sequence to card
* before sending IDLE command
*/
-static void send_init_stream(struct mmc_omap_host *host)
+static void send_init_stream(struct omap_hsmmc_host *host)
{
int reg = 0;
unsigned long timeout;
@@ -366,7 +366,7 @@ static void send_init_stream(struct mmc_omap_host *host)
}

static inline
-int mmc_omap_cover_is_closed(struct mmc_omap_host *host)
+int omap_hsmmc_cover_is_closed(struct omap_hsmmc_host *host)
{
int r = 1;

@@ -376,35 +376,35 @@ int mmc_omap_cover_is_closed(struct mmc_omap_host *host)
}

static ssize_t
-mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr,
+omap_hsmmc_show_cover_switch(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
- struct mmc_omap_host *host = mmc_priv(mmc);
+ struct omap_hsmmc_host *host = mmc_priv(mmc);

- return sprintf(buf, "%s\n", mmc_omap_cover_is_closed(host) ? "closed" :
- "open");
+ return sprintf(buf, "%s\n",
+ omap_hsmmc_cover_is_closed(host) ? "closed" : "open");
}

-static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL);
+static DEVICE_ATTR(cover_switch, S_IRUGO, omap_hsmmc_show_cover_switch, NULL);

static ssize_t
-mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr,
+omap_hsmmc_show_slot_name(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
- struct mmc_omap_host *host = mmc_priv(mmc);
+ struct omap_hsmmc_host *host = mmc_priv(mmc);

return sprintf(buf, "%s\n", mmc_slot(host).name);
}

-static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL);
+static DEVICE_ATTR(slot_name, S_IRUGO, omap_hsmmc_show_slot_name, NULL);

/*
* Configure the response type and send the cmd.
*/
static void
-mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd,
+omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
struct mmc_data *data)
{
int cmdreg = 0, resptype = 0, cmdtype = 0;
@@ -464,7 +464,7 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd,
}

static int
-mmc_omap_get_dma_dir(struct mmc_omap_host *host, struct mmc_data *data)
+omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
{
if (data->flags & MMC_DATA_WRITE)
return DMA_TO_DEVICE;
@@ -476,7 +476,7 @@ mmc_omap_get_dma_dir(struct mmc_omap_host *host, struct mmc_data *data)
* Notify the transfer complete to MMC core
*/
static void
-mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
+omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
{
if (!data) {
struct mmc_request *mrq = host->mrq;
@@ -497,7 +497,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)

if (host->use_dma && host->dma_ch != -1)
dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
- mmc_omap_get_dma_dir(host, data));
+ omap_hsmmc_get_dma_dir(host, data));

if (!data->error)
data->bytes_xfered += data->blocks * (data->blksz);
@@ -509,14 +509,14 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
mmc_request_done(host->mmc, data->mrq);
return;
}
- mmc_omap_start_command(host, data->stop, NULL);
+ omap_hsmmc_start_command(host, data->stop, NULL);
}

/*
* Notify the core about command completion
*/
static void
-mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
+omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
{
host->cmd = NULL;

@@ -541,13 +541,13 @@ mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd)
/*
* DMA clean up for command errors
*/
-static void mmc_dma_cleanup(struct mmc_omap_host *host, int errno)
+static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
{
host->data->error = errno;

if (host->use_dma && host->dma_ch != -1) {
dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
- mmc_omap_get_dma_dir(host, host->data));
+ omap_hsmmc_get_dma_dir(host, host->data));
omap_free_dma(host->dma_ch);
host->dma_ch = -1;
up(&host->sem);
@@ -559,10 +559,10 @@ static void mmc_dma_cleanup(struct mmc_omap_host *host, int errno)
* Readable error output
*/
#ifdef CONFIG_MMC_DEBUG
-static void mmc_omap_report_irq(struct mmc_omap_host *host, u32 status)
+static void omap_hsmmc_report_irq(struct omap_hsmmc_host *host, u32 status)
{
/* --- means reserved bit without definition at documentation */
- static const char *mmc_omap_status_bits[] = {
+ static const char *omap_hsmmc_status_bits[] = {
"CC", "TC", "BGE", "---", "BWR", "BRR", "---", "---", "CIRQ",
"OBI", "---", "---", "---", "---", "---", "ERRI", "CTO", "CCRC",
"CEB", "CIE", "DTO", "DCRC", "DEB", "---", "ACE", "---",
@@ -575,9 +575,9 @@ static void mmc_omap_report_irq(struct mmc_omap_host *host, u32 status)
len = sprintf(buf, "MMC IRQ 0x%x :", status);
buf += len;

- for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++)
+ for (i = 0; i < ARRAY_SIZE(omap_hsmmc_status_bits); i++)
if (status & (1 << i)) {
- len = sprintf(buf, " %s", mmc_omap_status_bits[i]);
+ len = sprintf(buf, " %s", omap_hsmmc_status_bits[i]);
buf += len;
}

@@ -592,8 +592,8 @@ static void mmc_omap_report_irq(struct mmc_omap_host *host, u32 status)
* SRC or SRD bit of SYSCTL register
* Can be called from interrupt context
*/
-static inline void mmc_omap_reset_controller_fsm(struct mmc_omap_host *host,
- unsigned long bit)
+static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
+ unsigned long bit)
{
unsigned long i = 0;
unsigned long limit = (loops_per_jiffy *
@@ -615,9 +615,9 @@ static inline void mmc_omap_reset_controller_fsm(struct mmc_omap_host *host,
/*
* MMC controller IRQ handler
*/
-static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
+static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
{
- struct mmc_omap_host *host = dev_id;
+ struct omap_hsmmc_host *host = dev_id;
struct mmc_data *data;
int end_cmd = 0, end_trans = 0, status;

@@ -635,14 +635,14 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)

if (status & ERR) {
#ifdef CONFIG_MMC_DEBUG
- mmc_omap_report_irq(host, status);
+ omap_hsmmc_report_irq(host, status);
#endif
if ((status & CMD_TIMEOUT) ||
(status & CMD_CRC)) {
if (host->cmd) {
if (status & CMD_TIMEOUT) {
- mmc_omap_reset_controller_fsm(host,
- SRC);
+ omap_hsmmc_reset_controller_fsm(host,
+ SRC);
host->cmd->error = -ETIMEDOUT;
} else {
host->cmd->error = -EILSEQ;
@@ -651,9 +651,10 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
}
if (host->data || host->response_busy) {
if (host->data)
- mmc_dma_cleanup(host, -ETIMEDOUT);
+ omap_hsmmc_dma_cleanup(host,
+ -ETIMEDOUT);
host->response_busy = 0;
- mmc_omap_reset_controller_fsm(host, SRD);
+ omap_hsmmc_reset_controller_fsm(host, SRD);
}
}
if ((status & DATA_TIMEOUT) ||
@@ -663,11 +664,11 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
-ETIMEDOUT : -EILSEQ;

if (host->data)
- mmc_dma_cleanup(host, err);
+ omap_hsmmc_dma_cleanup(host, err);
else
host->mrq->cmd->error = err;
host->response_busy = 0;
- mmc_omap_reset_controller_fsm(host, SRD);
+ omap_hsmmc_reset_controller_fsm(host, SRD);
end_trans = 1;
}
}
@@ -686,14 +687,14 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
OMAP_HSMMC_READ(host->base, STAT);

if (end_cmd || ((status & CC) && host->cmd))
- mmc_omap_cmd_done(host, host->cmd);
+ omap_hsmmc_cmd_done(host, host->cmd);
if ((end_trans || (status & TC)) && host->mrq)
- mmc_omap_xfer_done(host, data);
+ omap_hsmmc_xfer_done(host, data);

return IRQ_HANDLED;
}

-static void set_sd_bus_power(struct mmc_omap_host *host)
+static void set_sd_bus_power(struct omap_hsmmc_host *host)
{
unsigned long i;

@@ -713,7 +714,7 @@ static void set_sd_bus_power(struct mmc_omap_host *host)
* The MMC2 transceiver controls are used instead of DAT4..DAT7.
* Some chips, like eMMC ones, use internal transceivers.
*/
-static int omap_mmc_switch_opcond(struct mmc_omap_host *host, int vdd)
+static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
{
u32 reg_val = 0;
int ret;
@@ -744,7 +745,7 @@ static int omap_mmc_switch_opcond(struct mmc_omap_host *host, int vdd)
/*
* If a MMC dual voltage card is detected, the set_ios fn calls
* this fn with VDD bit set for 1.8V. Upon card removal from the
- * slot, omap_mmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF.
+ * slot, omap_hsmmc_set_ios sets the VDD back to 3V on MMC_POWER_OFF.
*
* Cope with a bit of slop in the range ... per data sheets:
* - "1.8V" for vdds_mmc1/vdds_mmc1a can be up to 2.45V max,
@@ -773,10 +774,10 @@ err:
/*
* Work Item to notify the core about card insertion/removal
*/
-static void mmc_omap_detect(struct work_struct *work)
+static void omap_hsmmc_detect(struct work_struct *work)
{
- struct mmc_omap_host *host = container_of(work, struct mmc_omap_host,
- mmc_carddetect_work);
+ struct omap_hsmmc_host *host =
+ container_of(work, struct omap_hsmmc_host, mmc_carddetect_work);
struct omap_mmc_slot_data *slot = &mmc_slot(host);
int carddetect;

@@ -794,8 +795,9 @@ static void mmc_omap_detect(struct work_struct *work)
mmc_detect_change(host->mmc, (HZ * 200) / 1000);
} else {
mmc_host_enable(host->mmc);
- mmc_omap_reset_controller_fsm(host, SRD);
+ omap_hsmmc_reset_controller_fsm(host, SRD);
mmc_host_lazy_disable(host->mmc);
+
mmc_detect_change(host->mmc, (HZ * 50) / 1000);
}
}
@@ -803,9 +805,9 @@ static void mmc_omap_detect(struct work_struct *work)
/*
* ISR for handling card insertion and removal
*/
-static irqreturn_t omap_mmc_cd_handler(int irq, void *dev_id)
+static irqreturn_t omap_hsmmc_cd_handler(int irq, void *dev_id)
{
- struct mmc_omap_host *host = (struct mmc_omap_host *)dev_id;
+ struct omap_hsmmc_host *host = (struct omap_hsmmc_host *)dev_id;

if (host->suspended)
return IRQ_HANDLED;
@@ -814,7 +816,7 @@ static irqreturn_t omap_mmc_cd_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}

-static int mmc_omap_get_dma_sync_dev(struct mmc_omap_host *host,
+static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host,
struct mmc_data *data)
{
int sync_dev;
@@ -826,7 +828,7 @@ static int mmc_omap_get_dma_sync_dev(struct mmc_omap_host *host,
return sync_dev;
}

-static void mmc_omap_config_dma_params(struct mmc_omap_host *host,
+static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
struct mmc_data *data,
struct scatterlist *sgl)
{
@@ -850,7 +852,7 @@ static void mmc_omap_config_dma_params(struct mmc_omap_host *host,

omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
- mmc_omap_get_dma_sync_dev(host, data),
+ omap_hsmmc_get_dma_sync_dev(host, data),
!(data->flags & MMC_DATA_WRITE));

omap_start_dma(dma_ch);
@@ -859,9 +861,9 @@ static void mmc_omap_config_dma_params(struct mmc_omap_host *host,
/*
* DMA call back function
*/
-static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
+static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data)
{
- struct mmc_omap_host *host = data;
+ struct omap_hsmmc_host *host = data;

if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ)
dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n");
@@ -872,7 +874,7 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
host->dma_sg_idx++;
if (host->dma_sg_idx < host->dma_len) {
/* Fire up the next transfer. */
- mmc_omap_config_dma_params(host, host->data,
+ omap_hsmmc_config_dma_params(host, host->data,
host->data->sg + host->dma_sg_idx);
return;
}
@@ -889,8 +891,8 @@ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
/*
* Routine to configure and start DMA for the MMC card
*/
-static int
-mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req)
+static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
+ struct mmc_request *req)
{
int dma_ch = 0, ret = 0, err = 1, i;
struct mmc_data *data = req->data;
@@ -927,8 +929,8 @@ mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req)
return err;
}

- ret = omap_request_dma(mmc_omap_get_dma_sync_dev(host, data), "MMC/SD",
- mmc_omap_dma_cb, host, &dma_ch);
+ ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
+ "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
if (ret != 0) {
dev_err(mmc_dev(host->mmc),
"%s: omap_request_dma() failed with %d\n",
@@ -937,16 +939,16 @@ mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req)
}

host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, mmc_omap_get_dma_dir(host, data));
+ data->sg_len, omap_hsmmc_get_dma_dir(host, data));
host->dma_ch = dma_ch;
host->dma_sg_idx = 0;

- mmc_omap_config_dma_params(host, data, data->sg);
+ omap_hsmmc_config_dma_params(host, data, data->sg);

return 0;
}

-static void set_data_timeout(struct mmc_omap_host *host,
+static void set_data_timeout(struct omap_hsmmc_host *host,
struct mmc_request *req)
{
unsigned int timeout, cycle_ns;
@@ -986,7 +988,7 @@ static void set_data_timeout(struct mmc_omap_host *host,
* Configure block length for MMC/SD cards and initiate the transfer.
*/
static int
-mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
+omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
{
int ret;
host->data = req->data;
@@ -1001,7 +1003,7 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
set_data_timeout(host, req);

if (host->use_dma) {
- ret = mmc_omap_start_dma_transfer(host, req);
+ ret = omap_hsmmc_start_dma_transfer(host, req);
if (ret != 0) {
dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n");
return ret;
@@ -1013,9 +1015,9 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
/*
* Request function. for read/write operation
*/
-static void omap_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
+static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
{
- struct mmc_omap_host *host = mmc_priv(mmc);
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
int err;

/*
@@ -1027,7 +1029,7 @@ static void omap_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
disable_irq(host->irq);
WARN_ON(host->mrq != NULL);
host->mrq = req;
- err = mmc_omap_prepare_data(host, req);
+ err = omap_hsmmc_prepare_data(host, req);
if (err) {
req->cmd->error = err;
if (req->data)
@@ -1039,14 +1041,13 @@ static void omap_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
return;
}

- mmc_omap_start_command(host, req->cmd, req->data);
+ omap_hsmmc_start_command(host, req->cmd, req->data);
}

-
/* Routine to configure clock values. Exposed API to core */
-static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
- struct mmc_omap_host *host = mmc_priv(mmc);
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
u16 dsor = 0;
unsigned long regval;
unsigned long timeout;
@@ -1105,8 +1106,8 @@ static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
* MMC_POWER_UP upon recalculating the voltage.
* vdd 1.8v.
*/
- if (omap_mmc_switch_opcond(host, ios->vdd) != 0)
- dev_dbg(mmc_dev(host->mmc),
+ if (omap_hsmmc_switch_opcond(host, ios->vdd) != 0)
+ dev_dbg(mmc_dev(host->mmc),
"Switch operation failed\n");
}
}
@@ -1122,7 +1123,7 @@ static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (dsor > 250)
dsor = 250;
}
- omap_mmc_stop_clock(host);
+ omap_hsmmc_stop_clock(host);
regval = OMAP_HSMMC_READ(host->base, SYSCTL);
regval = regval & ~(CLKD_MASK);
regval = regval | (dsor << 6) | (DTO << 16);
@@ -1156,7 +1157,7 @@ static void omap_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)

static int omap_hsmmc_get_cd(struct mmc_host *mmc)
{
- struct mmc_omap_host *host = mmc_priv(mmc);
+ struct omap_hsmmc_host *host = mmc_priv(mmc);

if (!mmc_slot(host).card_detect)
return -ENOSYS;
@@ -1165,14 +1166,14 @@ static int omap_hsmmc_get_cd(struct mmc_host *mmc)

static int omap_hsmmc_get_ro(struct mmc_host *mmc)
{
- struct mmc_omap_host *host = mmc_priv(mmc);
+ struct omap_hsmmc_host *host = mmc_priv(mmc);

if (!mmc_slot(host).get_ro)
return -ENOSYS;
return mmc_slot(host).get_ro(host->dev, 0);
}

-static void omap_hsmmc_init(struct mmc_omap_host *host)
+static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
{
u32 hctl, capa, value;

@@ -1218,9 +1219,9 @@ static void omap_hsmmc_init(struct mmc_omap_host *host)
enum {ENABLED = 0, DISABLED, CARDSLEEP, REGSLEEP, OFF};

/* Handler for [ENABLED -> DISABLED] transition */
-static int omap_mmc_enabled_to_disabled(struct mmc_omap_host *host)
+static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host)
{
- omap_mmc_save_ctx(host);
+ omap_hsmmc_context_save(host);
clk_disable(host->fclk);
host->dpm_state = DISABLED;

@@ -1233,7 +1234,7 @@ static int omap_mmc_enabled_to_disabled(struct mmc_omap_host *host)
}

/* Handler for [DISABLED -> REGSLEEP / CARDSLEEP] transition */
-static int omap_mmc_disabled_to_sleep(struct mmc_omap_host *host)
+static int omap_hsmmc_disabled_to_sleep(struct omap_hsmmc_host *host)
{
int err, new_state;

@@ -1241,7 +1242,7 @@ static int omap_mmc_disabled_to_sleep(struct mmc_omap_host *host)
return 0;

clk_enable(host->fclk);
- omap_mmc_restore_ctx(host);
+ omap_hsmmc_context_restore(host);
if (mmc_card_can_sleep(host->mmc)) {
err = mmc_card_sleep(host->mmc);
if (err < 0) {
@@ -1250,8 +1251,9 @@ static int omap_mmc_disabled_to_sleep(struct mmc_omap_host *host)
return err;
}
new_state = CARDSLEEP;
- } else
+ } else {
new_state = REGSLEEP;
+ }
if (mmc_slot(host).set_sleep)
mmc_slot(host).set_sleep(host->dev, host->slot_id, 1, 0,
new_state == CARDSLEEP);
@@ -1274,7 +1276,7 @@ static int omap_mmc_disabled_to_sleep(struct mmc_omap_host *host)
}

/* Handler for [REGSLEEP / CARDSLEEP -> OFF] transition */
-static int omap_mmc_sleep_to_off(struct mmc_omap_host *host)
+static int omap_hsmmc_sleep_to_off(struct omap_hsmmc_host *host)
{
if (!mmc_try_claim_host(host->mmc))
return 0;
@@ -1302,7 +1304,7 @@ static int omap_mmc_sleep_to_off(struct mmc_omap_host *host)
}

/* Handler for [DISABLED -> ENABLED] transition */
-static int omap_mmc_disabled_to_enabled(struct mmc_omap_host *host)
+static int omap_hsmmc_disabled_to_enabled(struct omap_hsmmc_host *host)
{
int err;

@@ -1310,8 +1312,7 @@ static int omap_mmc_disabled_to_enabled(struct mmc_omap_host *host)
if (err < 0)
return err;

- omap_mmc_restore_ctx(host);
-
+ omap_hsmmc_context_restore(host);
host->dpm_state = ENABLED;

dev_dbg(mmc_dev(host->mmc), "DISABLED -> ENABLED\n");
@@ -1320,13 +1321,13 @@ static int omap_mmc_disabled_to_enabled(struct mmc_omap_host *host)
}

/* Handler for [SLEEP -> ENABLED] transition */
-static int omap_mmc_sleep_to_enabled(struct mmc_omap_host *host)
+static int omap_hsmmc_sleep_to_enabled(struct omap_hsmmc_host *host)
{
if (!mmc_try_claim_host(host->mmc))
return 0;

clk_enable(host->fclk);
- omap_mmc_restore_ctx(host);
+ omap_hsmmc_context_restore(host);
if (mmc_slot(host).set_sleep)
mmc_slot(host).set_sleep(host->dev, host->slot_id, 0,
host->vdd, host->dpm_state == CARDSLEEP);
@@ -1344,12 +1345,12 @@ static int omap_mmc_sleep_to_enabled(struct mmc_omap_host *host)
}

/* Handler for [OFF -> ENABLED] transition */
-static int omap_mmc_off_to_enabled(struct mmc_omap_host *host)
+static int omap_hsmmc_off_to_enabled(struct omap_hsmmc_host *host)
{
clk_enable(host->fclk);

- omap_mmc_restore_ctx(host);
- omap_hsmmc_init(host);
+ omap_hsmmc_context_restore(host);
+ omap_hsmmc_conf_bus_power(host);
mmc_power_restore_host(host->mmc);

host->dpm_state = ENABLED;
@@ -1362,18 +1363,18 @@ static int omap_mmc_off_to_enabled(struct mmc_omap_host *host)
/*
* Bring MMC host to ENABLED from any other PM state.
*/
-static int omap_mmc_enable(struct mmc_host *mmc)
+static int omap_hsmmc_enable(struct mmc_host *mmc)
{
- struct mmc_omap_host *host = mmc_priv(mmc);
+ struct omap_hsmmc_host *host = mmc_priv(mmc);

switch (host->dpm_state) {
case DISABLED:
- return omap_mmc_disabled_to_enabled(host);
+ return omap_hsmmc_disabled_to_enabled(host);
case CARDSLEEP:
case REGSLEEP:
- return omap_mmc_sleep_to_enabled(host);
+ return omap_hsmmc_sleep_to_enabled(host);
case OFF:
- return omap_mmc_off_to_enabled(host);
+ return omap_hsmmc_off_to_enabled(host);
default:
dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n");
return -EINVAL;
@@ -1383,68 +1384,68 @@ static int omap_mmc_enable(struct mmc_host *mmc)
/*
* Bring MMC host in PM state (one level deeper).
*/
-static int omap_mmc_disable(struct mmc_host *mmc, int lazy)
+static int omap_hsmmc_disable(struct mmc_host *mmc, int lazy)
{
- struct mmc_omap_host *host = mmc_priv(mmc);
+ struct omap_hsmmc_host *host = mmc_priv(mmc);

switch (host->dpm_state) {
case ENABLED: {
int delay;

- delay = omap_mmc_enabled_to_disabled(host);
+ delay = omap_hsmmc_enabled_to_disabled(host);
if (lazy || delay < 0)
return delay;
return 0;
}
case DISABLED:
- return omap_mmc_disabled_to_sleep(host);
+ return omap_hsmmc_disabled_to_sleep(host);
case CARDSLEEP:
case REGSLEEP:
- return omap_mmc_sleep_to_off(host);
+ return omap_hsmmc_sleep_to_off(host);
default:
dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n");
return -EINVAL;
}
}

-static int omap_mmc_enable_fclk(struct mmc_host *mmc)
+static int omap_hsmmc_enable_fclk(struct mmc_host *mmc)
{
- struct mmc_omap_host *host = mmc_priv(mmc);
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
int err;

err = clk_enable(host->fclk);
if (err)
return err;
dev_dbg(mmc_dev(host->mmc), "mmc_fclk: enabled\n");
- omap_mmc_restore_ctx(host);
+ omap_hsmmc_context_restore(host);
return 0;
}

-static int omap_mmc_disable_fclk(struct mmc_host *mmc, int lazy)
+static int omap_hsmmc_disable_fclk(struct mmc_host *mmc, int lazy)
{
- struct mmc_omap_host *host = mmc_priv(mmc);
+ struct omap_hsmmc_host *host = mmc_priv(mmc);

- omap_mmc_save_ctx(host);
+ omap_hsmmc_context_save(host);
clk_disable(host->fclk);
dev_dbg(mmc_dev(host->mmc), "mmc_fclk: disabled\n");
return 0;
}

-static const struct mmc_host_ops mmc_omap_ops = {
- .enable = omap_mmc_enable_fclk,
- .disable = omap_mmc_disable_fclk,
- .request = omap_mmc_request,
- .set_ios = omap_mmc_set_ios,
+static const struct mmc_host_ops omap_hsmmc_ops = {
+ .enable = omap_hsmmc_enable_fclk,
+ .disable = omap_hsmmc_disable_fclk,
+ .request = omap_hsmmc_request,
+ .set_ios = omap_hsmmc_set_ios,
.get_cd = omap_hsmmc_get_cd,
.get_ro = omap_hsmmc_get_ro,
/* NYET -- enable_sdio_irq */
};

-static const struct mmc_host_ops mmc_omap_ps_ops = {
- .enable = omap_mmc_enable,
- .disable = omap_mmc_disable,
- .request = omap_mmc_request,
- .set_ios = omap_mmc_set_ios,
+static const struct mmc_host_ops omap_hsmmc_ps_ops = {
+ .enable = omap_hsmmc_enable,
+ .disable = omap_hsmmc_disable,
+ .request = omap_hsmmc_request,
+ .set_ios = omap_hsmmc_set_ios,
.get_cd = omap_hsmmc_get_cd,
.get_ro = omap_hsmmc_get_ro,
/* NYET -- enable_sdio_irq */
@@ -1452,15 +1453,14 @@ static const struct mmc_host_ops mmc_omap_ps_ops = {

#ifdef CONFIG_DEBUG_FS

-static int mmc_regs_show(struct seq_file *s, void *data)
+static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
{
struct mmc_host *mmc = s->private;
- struct mmc_omap_host *host = mmc_priv(mmc);
- struct omap_mmc_platform_data *pdata = host->pdata;
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
int context_loss = 0;

- if (pdata->get_context_loss_count)
- context_loss = pdata->get_context_loss_count(host->dev);
+ if (host->pdata->get_context_loss_count)
+ context_loss = host->pdata->get_context_loss_count(host->dev);

seq_printf(s, "mmc%d:\n"
" enabled:\t%d\n"
@@ -1502,19 +1502,19 @@ static int mmc_regs_show(struct seq_file *s, void *data)
return 0;
}

-static int mmc_regs_open(struct inode *inode, struct file *file)
+static int omap_hsmmc_regs_open(struct inode *inode, struct file *file)
{
- return single_open(file, mmc_regs_show, inode->i_private);
+ return single_open(file, omap_hsmmc_regs_show, inode->i_private);
}

static const struct file_operations mmc_regs_fops = {
- .open = mmc_regs_open,
+ .open = omap_hsmmc_regs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};

-static void omap_mmc_debugfs(struct mmc_host *mmc)
+static void omap_hsmmc_debugfs(struct mmc_host *mmc)
{
if (mmc->debugfs_root)
debugfs_create_file("regs", S_IRUSR, mmc->debugfs_root,
@@ -1523,17 +1523,17 @@ static void omap_mmc_debugfs(struct mmc_host *mmc)

#else

-static void omap_mmc_debugfs(struct mmc_host *mmc)
+static void omap_hsmmc_debugfs(struct mmc_host *mmc)
{
}

#endif

-static int __init omap_mmc_probe(struct platform_device *pdev)
+static int __init omap_hsmmc_probe(struct platform_device *pdev)
{
struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
struct mmc_host *mmc;
- struct mmc_omap_host *host = NULL;
+ struct omap_hsmmc_host *host = NULL;
struct resource *res;
int ret = 0, irq;

@@ -1557,7 +1557,7 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
if (res == NULL)
return -EBUSY;

- mmc = mmc_alloc_host(sizeof(struct mmc_omap_host), &pdev->dev);
+ mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
goto err;
@@ -1578,12 +1578,12 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
host->power_mode = -1;

platform_set_drvdata(pdev, host);
- INIT_WORK(&host->mmc_carddetect_work, mmc_omap_detect);
+ INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect);

if (mmc_slot(host).power_saving)
- mmc->ops = &mmc_omap_ps_ops;
+ mmc->ops = &omap_hsmmc_ps_ops;
else
- mmc->ops = &mmc_omap_ops;
+ mmc->ops = &omap_hsmmc_ops;

mmc->f_min = 400000;
mmc->f_max = 52000000;
@@ -1604,7 +1604,7 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
goto err1;
}

- omap_mmc_save_ctx(host);
+ omap_hsmmc_context_save(host);

mmc->caps |= MMC_CAP_DISABLE;
mmc_set_disable_delay(mmc, msecs_to_jiffies(OMAP_MMC_DISABLED_TIMEOUT));
@@ -1657,7 +1657,7 @@ static int __init omap_mmc_probe(struct platform_device *pdev)

mmc->caps |= mmc_slot(host).caps;

- omap_hsmmc_init(host);
+ omap_hsmmc_conf_bus_power(host);

/* Select DMA lines */
switch (host->id) {
@@ -1679,7 +1679,7 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
}

/* Request IRQ for MMC operations */
- ret = request_irq(host->irq, mmc_omap_irq, IRQF_DISABLED,
+ ret = request_irq(host->irq, omap_hsmmc_irq, IRQF_DISABLED,
mmc_hostname(mmc), host);
if (ret) {
dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
@@ -1689,7 +1689,8 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
/* initialize power supplies, gpios, etc */
if (pdata->init != NULL) {
if (pdata->init(&pdev->dev) != 0) {
- dev_dbg(mmc_dev(host->mmc), "late init error\n");
+ dev_dbg(mmc_dev(host->mmc),
+ "Unable to configure MMC IRQs\n");
goto err_irq_cd_init;
}
}
@@ -1698,7 +1699,7 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
/* Request IRQ for card detect */
if ((mmc_slot(host).card_detect_irq)) {
ret = request_irq(mmc_slot(host).card_detect_irq,
- omap_mmc_cd_handler,
+ omap_hsmmc_cd_handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
| IRQF_DISABLED,
mmc_hostname(mmc), host);
@@ -1728,7 +1729,7 @@ static int __init omap_mmc_probe(struct platform_device *pdev)
goto err_cover_switch;
}

- omap_mmc_debugfs(mmc);
+ omap_hsmmc_debugfs(mmc);

return 0;

@@ -1760,9 +1761,9 @@ err:
return ret;
}

-static int omap_mmc_remove(struct platform_device *pdev)
+static int omap_hsmmc_remove(struct platform_device *pdev)
{
- struct mmc_omap_host *host = platform_get_drvdata(pdev);
+ struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
struct resource *res;

if (host) {
@@ -1797,10 +1798,10 @@ static int omap_mmc_remove(struct platform_device *pdev)
}

#ifdef CONFIG_PM
-static int omap_mmc_suspend(struct platform_device *pdev, pm_message_t state)
+static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state)
{
int ret = 0;
- struct mmc_omap_host *host = platform_get_drvdata(pdev);
+ struct omap_hsmmc_host *host = platform_get_drvdata(pdev);

if (host && host->suspended)
return 0;
@@ -1848,10 +1849,10 @@ static int omap_mmc_suspend(struct platform_device *pdev, pm_message_t state)
}

/* Routine to resume the MMC device */
-static int omap_mmc_resume(struct platform_device *pdev)
+static int omap_hsmmc_resume(struct platform_device *pdev)
{
int ret = 0;
- struct mmc_omap_host *host = platform_get_drvdata(pdev);
+ struct omap_hsmmc_host *host = platform_get_drvdata(pdev);

if (host && !host->suspended)
return 0;
@@ -1870,7 +1871,7 @@ static int omap_mmc_resume(struct platform_device *pdev)
goto clk_en_err;
}

- omap_hsmmc_init(host);
+ omap_hsmmc_conf_bus_power(host);

if (host->pdata->resume) {
ret = host->pdata->resume(&pdev->dev, host->slot_id);
@@ -1883,6 +1884,7 @@ static int omap_mmc_resume(struct platform_device *pdev)
ret = mmc_resume_host(host->mmc);
if (ret == 0)
host->suspended = 0;
+
mmc_host_lazy_disable(host->mmc);
}

@@ -1895,35 +1897,35 @@ clk_en_err:
}

#else
-#define omap_mmc_suspend NULL
-#define omap_mmc_resume NULL
+#define omap_hsmmc_suspend NULL
+#define omap_hsmmc_resume NULL
#endif

-static struct platform_driver omap_mmc_driver = {
- .probe = omap_mmc_probe,
- .remove = omap_mmc_remove,
- .suspend = omap_mmc_suspend,
- .resume = omap_mmc_resume,
+static struct platform_driver omap_hsmmc_driver = {
+ .probe = omap_hsmmc_probe,
+ .remove = omap_hsmmc_remove,
+ .suspend = omap_hsmmc_suspend,
+ .resume = omap_hsmmc_resume,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
},
};

-static int __init omap_mmc_init(void)
+static int __init omap_hsmmc_init(void)
{
/* Register the MMC driver */
- return platform_driver_register(&omap_mmc_driver);
+ return platform_driver_register(&omap_hsmmc_driver);
}

-static void __exit omap_mmc_cleanup(void)
+static void __exit omap_hsmmc_cleanup(void)
{
/* Unregister MMC driver */
- platform_driver_unregister(&omap_mmc_driver);
+ platform_driver_unregister(&omap_hsmmc_driver);
}

-module_init(omap_mmc_init);
-module_exit(omap_mmc_cleanup);
+module_init(omap_hsmmc_init);
+module_exit(omap_hsmmc_cleanup);

MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver");
MODULE_LICENSE("GPL");
--
1.5.6.3