mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-20 13:41:30 +00:00
sched: Accumulate per-cfs_rq cpu usage and charge against bandwidth
Account bandwidth usage on the cfs_rq level versus the task_groups to which they belong. Whether we are tracking bandwidth on a given cfs_rq is maintained under cfs_rq->runtime_enabled. cfs_rq's which belong to a bandwidth constrained task_group have their runtime accounted via the update_curr() path, which withdraws bandwidth from the global pool as desired. Updates involving the global pool are currently protected under cfs_bandwidth->lock, local runtime is protected by rq->lock. This patch only assigns and tracks quota, no action is taken in the case that cfs_rq->runtime_used exceeds cfs_rq->runtime_assigned. Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Nikhil Rao <ncrao@google.com> Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com> Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110721184757.179386821@google.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
a790de9959
commit
ec12cb7f31
4 changed files with 94 additions and 3 deletions
|
@ -251,7 +251,7 @@ struct cfs_bandwidth {
|
|||
#ifdef CONFIG_CFS_BANDWIDTH
|
||||
raw_spinlock_t lock;
|
||||
ktime_t period;
|
||||
u64 quota;
|
||||
u64 quota, runtime;
|
||||
s64 hierarchal_quota;
|
||||
#endif
|
||||
};
|
||||
|
@ -407,6 +407,7 @@ static inline u64 default_cfs_period(void);
|
|||
static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
|
||||
{
|
||||
raw_spin_lock_init(&cfs_b->lock);
|
||||
cfs_b->runtime = 0;
|
||||
cfs_b->quota = RUNTIME_INF;
|
||||
cfs_b->period = ns_to_ktime(default_cfs_period());
|
||||
}
|
||||
|
@ -9107,6 +9108,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
|
|||
raw_spin_lock_irq(&cfs_b->lock);
|
||||
cfs_b->period = ns_to_ktime(period);
|
||||
cfs_b->quota = quota;
|
||||
cfs_b->runtime = quota;
|
||||
raw_spin_unlock_irq(&cfs_b->lock);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue