mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
itimers: remove the per-cpu-ish-ness
Either we bounce once cacheline per cpu per tick, yielding n^2 bounces or we just bounce a single.. Also, using per-cpu allocations for the thread-groups complicates the per-cpu allocator in that its currently aimed to be a fixed sized allocator and the only possible extention to that would be vmap based, which is seriously constrained on 32 bit archs. So making the per-cpu memory requirement depend on the number of processes is an issue. Lastly, it didn't deal with cpu-hotplug, although admittedly that might be fixable. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ede6f5aea0
commit
490dea45d0
5 changed files with 46 additions and 107 deletions
|
@ -450,6 +450,7 @@ struct task_cputime {
|
|||
cputime_t utime;
|
||||
cputime_t stime;
|
||||
unsigned long long sum_exec_runtime;
|
||||
spinlock_t lock;
|
||||
};
|
||||
/* Alternate field names when used to cache expirations. */
|
||||
#define prof_exp stime
|
||||
|
@ -465,7 +466,7 @@ struct task_cputime {
|
|||
* used for thread group CPU clock calculations.
|
||||
*/
|
||||
struct thread_group_cputime {
|
||||
struct task_cputime *totals;
|
||||
struct task_cputime totals;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -2180,24 +2181,30 @@ static inline int spin_needbreak(spinlock_t *lock)
|
|||
* Thread group CPU time accounting.
|
||||
*/
|
||||
|
||||
extern int thread_group_cputime_alloc(struct task_struct *);
|
||||
extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
|
||||
static inline
|
||||
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
|
||||
{
|
||||
struct task_cputime *totals = &tsk->signal->cputime.totals;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&totals->lock, flags);
|
||||
*times = *totals;
|
||||
spin_unlock_irqrestore(&totals->lock, flags);
|
||||
}
|
||||
|
||||
static inline void thread_group_cputime_init(struct signal_struct *sig)
|
||||
{
|
||||
sig->cputime.totals = NULL;
|
||||
}
|
||||
sig->cputime.totals = (struct task_cputime){
|
||||
.utime = cputime_zero,
|
||||
.stime = cputime_zero,
|
||||
.sum_exec_runtime = 0,
|
||||
};
|
||||
|
||||
static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
|
||||
{
|
||||
if (curr->signal->cputime.totals)
|
||||
return 0;
|
||||
return thread_group_cputime_alloc(curr);
|
||||
spin_lock_init(&sig->cputime.totals.lock);
|
||||
}
|
||||
|
||||
static inline void thread_group_cputime_free(struct signal_struct *sig)
|
||||
{
|
||||
free_percpu(sig->cputime.totals);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue