From: Tao Ma <[email protected]>
Currently, if the IO is throttled by io-throttle, the SA has no idea of
the situation and can't report it to the real application user about
that he/she has to do something. So this patch adds a new interface
named blkio.throttle.io_throttled which indicates how many IOs are
currently throttled.
I am not sure whether it is OK to add this information to the generic
blkcg since it is only io-throttle related, but I don't find a way to
only store it into the blkcg io-throttle. And that's the reason this
is only a RFC. Any suggestions? Thanks.
Cc: Tejun Heo <[email protected]>
Cc: Vivek Goyal <[email protected]>
Signed-off-by: Tao Ma <[email protected]>
---
block/blk-cgroup.c | 39 +++++++++++++++++++++++++++++++++++++++
block/blk-cgroup.h | 13 +++++++++++++
block/blk-throttle.c | 7 ++++++-
3 files changed, 58 insertions(+), 1 deletions(-)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index ea84a23..bf4d11b 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -348,6 +348,31 @@ static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
#endif
+void blkiocg_update_io_throttled_stats(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg, bool direction,
+ bool sync)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_THROTTLED], 1, direction,
+ sync);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_io_throttled_stats);
+
+void blkiocg_update_io_throttled_remove_stats(struct blkio_group *blkg,
+ bool direction, bool sync)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkg->stats_lock, flags);
+ blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_THROTTLED],
+ direction, sync);
+ spin_unlock_irqrestore(&blkg->stats_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blkiocg_update_io_throttled_remove_stats);
+
void blkiocg_update_io_add_stats(struct blkio_group *blkg,
struct blkio_group *curr_blkg, bool direction,
bool sync)
@@ -578,6 +603,7 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
struct blkio_group_stats *stats;
struct hlist_node *n;
uint64_t queued[BLKIO_STAT_TOTAL];
+ uint64_t throttled[BLKIO_STAT_TOTAL];
int i;
#ifdef CONFIG_DEBUG_BLK_CGROUP
bool idling, waiting, empty;
@@ -596,9 +622,13 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
#endif
for (i = 0; i < BLKIO_STAT_TOTAL; i++)
queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
+ for (i = 0; i < BLKIO_STAT_TOTAL; i++)
+ throttled[i] = stats->stat_arr[BLKIO_STAT_THROTTLED][i];
memset(stats, 0, sizeof(struct blkio_group_stats));
for (i = 0; i < BLKIO_STAT_TOTAL; i++)
stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
+ for (i = 0; i < BLKIO_STAT_TOTAL; i++)
+ stats->stat_arr[BLKIO_STAT_THROTTLED][i] = throttled[i];
#ifdef CONFIG_DEBUG_BLK_CGROUP
if (idling) {
blkio_mark_blkg_idling(stats);
@@ -1301,6 +1331,9 @@ static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
case BLKIO_THROTL_io_serviced:
return blkio_read_blkg_stats(blkcg, cft, cb,
BLKIO_STAT_CPU_SERVICED, 1, 1);
+ case BLKIO_THROTL_io_throttled:
+ return blkio_read_blkg_stats(blkcg, cft, cb,
+ BLKIO_STAT_THROTTLED, 1, 0);
default:
BUG();
}
@@ -1497,6 +1530,12 @@ struct cftype blkio_files[] = {
BLKIO_THROTL_io_serviced),
.read_map = blkiocg_file_read_map,
},
+ {
+ .name = "throttle.io_throttled",
+ .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
+ BLKIO_THROTL_io_throttled),
+ .read_map = blkiocg_file_read_map,
+ },
#endif /* CONFIG_BLK_DEV_THROTTLING */
#ifdef CONFIG_DEBUG_BLK_CGROUP
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 6f3ace7..5b97eb7 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -39,6 +39,8 @@ enum stat_type {
BLKIO_STAT_SERVICE_TIME = 0,
/* Total time spent waiting in scheduler queue in ns */
BLKIO_STAT_WAIT_TIME,
+ /* Number of IOs throttled */
+ BLKIO_STAT_THROTTLED,
/* Number of IOs queued up */
BLKIO_STAT_QUEUED,
/* All the single valued stats go below this */
@@ -109,6 +111,7 @@ enum blkcg_file_name_throtl {
BLKIO_THROTL_write_iops_device,
BLKIO_THROTL_io_service_bytes,
BLKIO_THROTL_io_serviced,
+ BLKIO_THROTL_io_throttled,
};
struct blkio_cgroup {
@@ -327,6 +330,11 @@ void blkiocg_update_io_add_stats(struct blkio_group *blkg,
struct blkio_group *curr_blkg, bool direction, bool sync);
void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
bool direction, bool sync);
+void blkiocg_update_io_throttled_stats(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg, bool direction, bool sync);
+void blkiocg_update_io_throttled_remove_stats(struct blkio_group *blkg,
+ bool direction, bool sync);
+
#else
struct cgroup;
static inline struct blkio_cgroup *
@@ -360,5 +368,10 @@ static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
struct blkio_group *curr_blkg, bool direction, bool sync) {}
static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
bool direction, bool sync) {}
+static inline void blkiocg_update_io_throttled_stats(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg, bool direction, bool sync) {}
+static inline void
+blkiocg_update_io_throttled_remove_stats(struct blkio_group *blkg,
+ struct blkio_group *curr_blkg, bool direction, bool sync) {}
#endif
#endif /* _BLK_CGROUP_H */
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index f2ddb94..1a39305 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -749,13 +749,15 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
struct bio *bio)
{
- bool rw = bio_data_dir(bio);
+ int rw = bio_data_dir(bio);
+ int sync = !(bio->bi_rw & REQ_WRITE) || (bio->bi_rw & REQ_SYNC);
bio_list_add(&tg->bio_lists[rw], bio);
/* Take a bio reference on tg */
throtl_ref_get_tg(tg);
tg->nr_queued[rw]++;
td->nr_queued[rw]++;
+ blkiocg_update_io_throttled_stats(&tg->blkg, NULL, rw, sync);
throtl_enqueue_tg(td, tg);
}
@@ -783,9 +785,12 @@ static void tg_dispatch_one_bio(struct throtl_data *td, struct throtl_grp *tg,
bool rw, struct bio_list *bl)
{
struct bio *bio;
+ bool sync;
bio = bio_list_pop(&tg->bio_lists[rw]);
+ sync = !(bio->bi_rw & REQ_WRITE) || (bio->bi_rw & REQ_SYNC);
tg->nr_queued[rw]--;
+ blkiocg_update_io_throttled_remove_stats(&tg->blkg, rw, sync);
/* Drop bio reference on tg */
throtl_put_tg(tg);
--
1.7.1
On Tue, May 22, 2012 at 04:10:36PM +0800, Tao Ma wrote:
> From: Tao Ma <[email protected]>
>
> Currently, if the IO is throttled by io-throttle, the SA has no idea of
> the situation and can't report it to the real application user about
> that he/she has to do something. So this patch adds a new interface
> named blkio.throttle.io_throttled which indicates how many IOs are
> currently throttled.
If the only purpose is to know whether IOs are being throttled, why
not just scan for the rules and see if respective device has any
throttling rules or not.
Even if you introduce this interface, you will end up scanning for
throttled ios against that particular device. And if IO is not happening
at that moment or if IO rate is not exceeding the rate limit, there
might not be any throttled ios and one might get misled.
So for your purpose a better interface sounds like scanning for throttling
rules instead of this new interface.
>
> I am not sure whether it is OK to add this information to the generic
> blkcg since it is only io-throttle related, but I don't find a way to
> only store it into the blkcg io-throttle. And that's the reason this
> is only a RFC. Any suggestions? Thanks.
Tejun has changed the code in this area and new code will allow you to
introduce this file in blk-throttle.c. All that code is sitting in Jens's
block tree.
Thanks
Vivek
Hi Vivek,
Thanks for the quick response.
On 05/22/2012 07:11 PM, Vivek Goyal wrote:
> On Tue, May 22, 2012 at 04:10:36PM +0800, Tao Ma wrote:
>> From: Tao Ma <[email protected]>
>>
>> Currently, if the IO is throttled by io-throttle, the SA has no idea of
>> the situation and can't report it to the real application user about
>> that he/she has to do something. So this patch adds a new interface
>> named blkio.throttle.io_throttled which indicates how many IOs are
>> currently throttled.
>
> If the only purpose is to know whether IOs are being throttled, why
> not just scan for the rules and see if respective device has any
> throttling rules or not.
Sorry, but setting a throttling rules doesn't mean the IOs are
throttled, right? So scanning doesn't work here IMHO.
>
> Even if you introduce this interface, you will end up scanning for
> throttled ios against that particular device. And if IO is not happening
> at that moment or if IO rate is not exceeding the rate limit, there
> might not be any throttled ios and one might get misled.
Oh, no actually in a *clound computing* environment, it is really
useful, not misled. So let me describe it in more detail. Our product
system will limit every instance to an approximate number at first, and
then watch out the IOs being throttled. If these numbers is high, it can:
1) Shout loudly to the application programmer about the abuse if he
sends out too much IO requests.
2) If it is not too much and some other instances are not active, adjust
the throttled ratio so that this instance can work much faster.
All these 2 needs to know the throttled status for the cgroup and a
negative feedback is really useful for the elastic control of IO cgroups.
>
> So for your purpose a better interface sounds like scanning for throttling
> rules instead of this new interface.
Sorry, as I have said above, I don't know how to get the current status
of the throttled IOs.
>
>>
>> I am not sure whether it is OK to add this information to the generic
>> blkcg since it is only io-throttle related, but I don't find a way to
>> only store it into the blkcg io-throttle. And that's the reason this
>> is only a RFC. Any suggestions? Thanks.
>
> Tejun has changed the code in this area and new code will allow you to
> introduce this file in blk-throttle.c. All that code is sitting in Jens's
> block tree.
Oh, cool, I will check that. Thanks.
Thanks
Tao
On Tue, May 22, 2012 at 10:44:11PM +0800, Tao Ma wrote:
> Hi Vivek,
> Thanks for the quick response.
> On 05/22/2012 07:11 PM, Vivek Goyal wrote:
> > On Tue, May 22, 2012 at 04:10:36PM +0800, Tao Ma wrote:
> >> From: Tao Ma <[email protected]>
> >>
> >> Currently, if the IO is throttled by io-throttle, the SA has no idea of
> >> the situation and can't report it to the real application user about
> >> that he/she has to do something. So this patch adds a new interface
> >> named blkio.throttle.io_throttled which indicates how many IOs are
> >> currently throttled.
> >
> > If the only purpose is to know whether IOs are being throttled, why
> > not just scan for the rules and see if respective device has any
> > throttling rules or not.
> Sorry, but setting a throttling rules doesn't mean the IOs are
> throttled, right? So scanning doesn't work here IMHO.
It means IOs will be throttled if you cross a certain rate. But yes, it
does not give any information that if at time T if there are any bios
throttled in the queue or not.
> >
> > Even if you introduce this interface, you will end up scanning for
> > throttled ios against that particular device. And if IO is not happening
> > at that moment or if IO rate is not exceeding the rate limit, there
> > might not be any throttled ios and one might get misled.
> Oh, no actually in a *clound computing* environment, it is really
> useful, not misled. So let me describe it in more detail. Our product
> system will limit every instance to an approximate number at first, and
> then watch out the IOs being throttled. If these numbers is high, it can:
> 1) Shout loudly to the application programmer about the abuse if he
> sends out too much IO requests.
> 2) If it is not too much and some other instances are not active, adjust
> the throttled ratio so that this instance can work much faster.
Ok, so you want to use this more as "congestion" parameter which tells at
a given moment how busy the queue is, or in this instance how many IOs
are backlogged in a cgroup due to throttling limits.
I guess, it is not a bad idea to export this stat then. Will
"blkio.throttle.queued" be a better name to reflect that how many bios
are currently queued in throttling layer of request queue.
Thanks
Vivek
On 05/22/2012 11:06 PM, Vivek Goyal wrote:
> On Tue, May 22, 2012 at 10:44:11PM +0800, Tao Ma wrote:
>> Hi Vivek,
>> Thanks for the quick response.
>> On 05/22/2012 07:11 PM, Vivek Goyal wrote:
>>> On Tue, May 22, 2012 at 04:10:36PM +0800, Tao Ma wrote:
>>>> From: Tao Ma <[email protected]>
>>>>
>>>> Currently, if the IO is throttled by io-throttle, the SA has no idea of
>>>> the situation and can't report it to the real application user about
>>>> that he/she has to do something. So this patch adds a new interface
>>>> named blkio.throttle.io_throttled which indicates how many IOs are
>>>> currently throttled.
>>>
>>> If the only purpose is to know whether IOs are being throttled, why
>>> not just scan for the rules and see if respective device has any
>>> throttling rules or not.
>> Sorry, but setting a throttling rules doesn't mean the IOs are
>> throttled, right? So scanning doesn't work here IMHO.
>
> It means IOs will be throttled if you cross a certain rate. But yes, it
> does not give any information that if at time T if there are any bios
> throttled in the queue or not.
>
>>>
>>> Even if you introduce this interface, you will end up scanning for
>>> throttled ios against that particular device. And if IO is not happening
>>> at that moment or if IO rate is not exceeding the rate limit, there
>>> might not be any throttled ios and one might get misled.
>> Oh, no actually in a *clound computing* environment, it is really
>> useful, not misled. So let me describe it in more detail. Our product
>> system will limit every instance to an approximate number at first, and
>> then watch out the IOs being throttled. If these numbers is high, it can:
>> 1) Shout loudly to the application programmer about the abuse if he
>> sends out too much IO requests.
>> 2) If it is not too much and some other instances are not active, adjust
>> the throttled ratio so that this instance can work much faster.
>
> Ok, so you want to use this more as "congestion" parameter which tells at
> a given moment how busy the queue is, or in this instance how many IOs
> are backlogged in a cgroup due to throttling limits.
yeah, with this information the daemon can adjust these limits
automatically.
>
> I guess, it is not a bad idea to export this stat then. Will
> "blkio.throttle.queued" be a better name to reflect that how many bios
> are currently queued in throttling layer of request queue.
I have thought of this name at the very first time. But there is also
another one named "blkio.queued" which indicated the IOs being queued in
the scheduler. I don't want the user to be confused and that's the
reason I use "blkio.throttle.io_throttled".
Thanks
Tao
On Tue, May 22, 2012 at 11:14:55PM +0800, Tao Ma wrote:
> On 05/22/2012 11:06 PM, Vivek Goyal wrote:
> > On Tue, May 22, 2012 at 10:44:11PM +0800, Tao Ma wrote:
> >> Hi Vivek,
> >> Thanks for the quick response.
> >> On 05/22/2012 07:11 PM, Vivek Goyal wrote:
> >>> On Tue, May 22, 2012 at 04:10:36PM +0800, Tao Ma wrote:
> >>>> From: Tao Ma <[email protected]>
> >>>>
> >>>> Currently, if the IO is throttled by io-throttle, the SA has no idea of
> >>>> the situation and can't report it to the real application user about
> >>>> that he/she has to do something. So this patch adds a new interface
> >>>> named blkio.throttle.io_throttled which indicates how many IOs are
> >>>> currently throttled.
> >>>
> >>> If the only purpose is to know whether IOs are being throttled, why
> >>> not just scan for the rules and see if respective device has any
> >>> throttling rules or not.
> >> Sorry, but setting a throttling rules doesn't mean the IOs are
> >> throttled, right? So scanning doesn't work here IMHO.
> >
> > It means IOs will be throttled if you cross a certain rate. But yes, it
> > does not give any information that if at time T if there are any bios
> > throttled in the queue or not.
> >
> >>>
> >>> Even if you introduce this interface, you will end up scanning for
> >>> throttled ios against that particular device. And if IO is not happening
> >>> at that moment or if IO rate is not exceeding the rate limit, there
> >>> might not be any throttled ios and one might get misled.
> >> Oh, no actually in a *clound computing* environment, it is really
> >> useful, not misled. So let me describe it in more detail. Our product
> >> system will limit every instance to an approximate number at first, and
> >> then watch out the IOs being throttled. If these numbers is high, it can:
> >> 1) Shout loudly to the application programmer about the abuse if he
> >> sends out too much IO requests.
> >> 2) If it is not too much and some other instances are not active, adjust
> >> the throttled ratio so that this instance can work much faster.
> >
> > Ok, so you want to use this more as "congestion" parameter which tells at
> > a given moment how busy the queue is, or in this instance how many IOs
> > are backlogged in a cgroup due to throttling limits.
> yeah, with this information the daemon can adjust these limits
> automatically.
I am hoping that this daemon will monitor the file for long periods and
will not reach to bursty traffic from application.
> >
> > I guess, it is not a bad idea to export this stat then. Will
> > "blkio.throttle.queued" be a better name to reflect that how many bios
> > are currently queued in throttling layer of request queue.
> I have thought of this name at the very first time. But there is also
> another one named "blkio.queued" which indicated the IOs being queued in
> the scheduler. I don't want the user to be confused and that's the
> reason I use "blkio.throttle.io_throttled".
Actually it is blkio.io_queued which shows number of requests queued in
CFQ in that cgroup.
CFQ and throttling are two different policies and they have separate
files in cgroup. Ideally blkio.io_queued should have been
blkio.cfq.io_queued but initially it did not occur to me that I should
qualify these files with policy name.
Later when throttling policy came along, then I qualified new files with
policy name. blkio.throttle.*.
In summary, blkio.io_queued gives stats of io queued at CFQ level. So it
makes sense to create blkio.throttle.io_queued which tells how many
bios are currently throttled and queued in throttling layer in this
request queue from this cgroup.
Thanks
Vivek
On 05/23/2012 04:08 AM, Vivek Goyal wrote:
> On Tue, May 22, 2012 at 11:14:55PM +0800, Tao Ma wrote:
>> On 05/22/2012 11:06 PM, Vivek Goyal wrote:
>>> On Tue, May 22, 2012 at 10:44:11PM +0800, Tao Ma wrote:
>>>> Hi Vivek,
>>>> Thanks for the quick response.
>>>> On 05/22/2012 07:11 PM, Vivek Goyal wrote:
>>>>> On Tue, May 22, 2012 at 04:10:36PM +0800, Tao Ma wrote:
>>>>>> From: Tao Ma <[email protected]>
>>>>>>
>>>>>> Currently, if the IO is throttled by io-throttle, the SA has no idea of
>>>>>> the situation and can't report it to the real application user about
>>>>>> that he/she has to do something. So this patch adds a new interface
>>>>>> named blkio.throttle.io_throttled which indicates how many IOs are
>>>>>> currently throttled.
>>>>>
>>>>> If the only purpose is to know whether IOs are being throttled, why
>>>>> not just scan for the rules and see if respective device has any
>>>>> throttling rules or not.
>>>> Sorry, but setting a throttling rules doesn't mean the IOs are
>>>> throttled, right? So scanning doesn't work here IMHO.
>>>
>>> It means IOs will be throttled if you cross a certain rate. But yes, it
>>> does not give any information that if at time T if there are any bios
>>> throttled in the queue or not.
>>>
>>>>>
>>>>> Even if you introduce this interface, you will end up scanning for
>>>>> throttled ios against that particular device. And if IO is not happening
>>>>> at that moment or if IO rate is not exceeding the rate limit, there
>>>>> might not be any throttled ios and one might get misled.
>>>> Oh, no actually in a *clound computing* environment, it is really
>>>> useful, not misled. So let me describe it in more detail. Our product
>>>> system will limit every instance to an approximate number at first, and
>>>> then watch out the IOs being throttled. If these numbers is high, it can:
>>>> 1) Shout loudly to the application programmer about the abuse if he
>>>> sends out too much IO requests.
>>>> 2) If it is not too much and some other instances are not active, adjust
>>>> the throttled ratio so that this instance can work much faster.
>>>
>>> Ok, so you want to use this more as "congestion" parameter which tells at
>>> a given moment how busy the queue is, or in this instance how many IOs
>>> are backlogged in a cgroup due to throttling limits.
>> yeah, with this information the daemon can adjust these limits
>> automatically.
>
> I am hoping that this daemon will monitor the file for long periods and
> will not reach to bursty traffic from application.
>
>>>
>>> I guess, it is not a bad idea to export this stat then. Will
>>> "blkio.throttle.queued" be a better name to reflect that how many bios
>>> are currently queued in throttling layer of request queue.
>> I have thought of this name at the very first time. But there is also
>> another one named "blkio.queued" which indicated the IOs being queued in
>> the scheduler. I don't want the user to be confused and that's the
>> reason I use "blkio.throttle.io_throttled".
>
> Actually it is blkio.io_queued which shows number of requests queued in
> CFQ in that cgroup.
>
> CFQ and throttling are two different policies and they have separate
> files in cgroup. Ideally blkio.io_queued should have been
> blkio.cfq.io_queued but initially it did not occur to me that I should
> qualify these files with policy name.
>
> Later when throttling policy came along, then I qualified new files with
> policy name. blkio.throttle.*.
>
> In summary, blkio.io_queued gives stats of io queued at CFQ level. So it
> makes sense to create blkio.throttle.io_queued which tells how many
> bios are currently throttled and queued in throttling layer in this
> request queue from this cgroup.
OK, I am fine with any name actually. ;) I will use it in the v2.
Thanks
Tao
>
> Thanks
> Vivek
Hi Vivek,
On 05/22/2012 07:11 PM, Vivek Goyal wrote:
> On Tue, May 22, 2012 at 04:10:36PM +0800, Tao Ma wrote:
>> From: Tao Ma <[email protected]>
>>
>> Currently, if the IO is throttled by io-throttle, the SA has no idea of
>> the situation and can't report it to the real application user about
>> that he/she has to do something. So this patch adds a new interface
>> named blkio.throttle.io_throttled which indicates how many IOs are
>> currently throttled.
>
> If the only purpose is to know whether IOs are being throttled, why
> not just scan for the rules and see if respective device has any
> throttling rules or not.
>
> Even if you introduce this interface, you will end up scanning for
> throttled ios against that particular device. And if IO is not happening
> at that moment or if IO rate is not exceeding the rate limit, there
> might not be any throttled ios and one might get misled.
>
> So for your purpose a better interface sounds like scanning for throttling
> rules instead of this new interface.
>
>>
>> I am not sure whether it is OK to add this information to the generic
>> blkcg since it is only io-throttle related, but I don't find a way to
>> only store it into the blkcg io-throttle. And that's the reason this
>> is only a RFC. Any suggestions? Thanks.
>
> Tejun has changed the code in this area and new code will allow you to
> introduce this file in blk-throttle.c. All that code is sitting in Jens's
> block tree.
I am just trying to rebase my patch according to your suggestion, but I
can't find the change you mentioned above. So could you please tell me
where I can find the related change so that I can base my work on?
Thanks
Tao
>
> Thanks
> Vivek
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
On Tue, May 29, 2012 at 03:51:59PM +0800, Tao Ma wrote:
[..]
> > Tejun has changed the code in this area and new code will allow you to
> > introduce this file in blk-throttle.c. All that code is sitting in Jens's
> > block tree.
> I am just trying to rebase my patch according to your suggestion, but I
> can't find the change you mentioned above. So could you please tell me
> where I can find the related change so that I can base my work on?
>
Hi Tao,
Jens has now pushed the changes to Linus for 3.5 and Linus has merged all
the patches.
So look at block/blk-throttle.c and look for "throtl_files[]" where rest
of the throttle related cgroup files are declared.
Thanks
Vivek