mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-04 21:31:51 +00:00
x86/fpu: Move thread_info::fpu_counter into thread_info::fpu.counter
This field is kept separate from the main FPU state structure for no good reason. Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
3f6a0bce90
commit
c0c2803dee
4 changed files with 16 additions and 16 deletions
|
@ -384,7 +384,7 @@ static inline void drop_fpu(struct task_struct *tsk)
|
||||||
* Forget coprocessor state..
|
* Forget coprocessor state..
|
||||||
*/
|
*/
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
tsk->thread.fpu_counter = 0;
|
tsk->thread.fpu.counter = 0;
|
||||||
|
|
||||||
if (__thread_has_fpu(tsk)) {
|
if (__thread_has_fpu(tsk)) {
|
||||||
/* Ignore delayed exceptions from user space */
|
/* Ignore delayed exceptions from user space */
|
||||||
|
@ -441,7 +441,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
|
||||||
* or if the past 5 consecutive context-switches used math.
|
* or if the past 5 consecutive context-switches used math.
|
||||||
*/
|
*/
|
||||||
fpu.preload = tsk_used_math(new) &&
|
fpu.preload = tsk_used_math(new) &&
|
||||||
(use_eager_fpu() || new->thread.fpu_counter > 5);
|
(use_eager_fpu() || new->thread.fpu.counter > 5);
|
||||||
|
|
||||||
if (__thread_has_fpu(old)) {
|
if (__thread_has_fpu(old)) {
|
||||||
if (!__save_init_fpu(old))
|
if (!__save_init_fpu(old))
|
||||||
|
@ -454,16 +454,16 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
|
||||||
|
|
||||||
/* Don't change CR0.TS if we just switch! */
|
/* Don't change CR0.TS if we just switch! */
|
||||||
if (fpu.preload) {
|
if (fpu.preload) {
|
||||||
new->thread.fpu_counter++;
|
new->thread.fpu.counter++;
|
||||||
__thread_set_has_fpu(new);
|
__thread_set_has_fpu(new);
|
||||||
prefetch(new->thread.fpu.state);
|
prefetch(new->thread.fpu.state);
|
||||||
} else if (!use_eager_fpu())
|
} else if (!use_eager_fpu())
|
||||||
stts();
|
stts();
|
||||||
} else {
|
} else {
|
||||||
old->thread.fpu_counter = 0;
|
old->thread.fpu.counter = 0;
|
||||||
task_disable_lazy_fpu_restore(old);
|
task_disable_lazy_fpu_restore(old);
|
||||||
if (fpu.preload) {
|
if (fpu.preload) {
|
||||||
new->thread.fpu_counter++;
|
new->thread.fpu.counter++;
|
||||||
if (fpu_lazy_restore(new, cpu))
|
if (fpu_lazy_restore(new, cpu))
|
||||||
fpu.preload = 0;
|
fpu.preload = 0;
|
||||||
else
|
else
|
||||||
|
|
|
@ -433,6 +433,15 @@ struct fpu {
|
||||||
unsigned int last_cpu;
|
unsigned int last_cpu;
|
||||||
unsigned int has_fpu;
|
unsigned int has_fpu;
|
||||||
union thread_xstate *state;
|
union thread_xstate *state;
|
||||||
|
/*
|
||||||
|
* This counter contains the number of consecutive context switches
|
||||||
|
* that the FPU is used. If this is over a threshold, the lazy fpu
|
||||||
|
* saving becomes unlazy to save the trap. This is an unsigned char
|
||||||
|
* so that after 256 times the counter wraps and the behavior turns
|
||||||
|
* lazy again; this to deal with bursty apps that only use FPU for
|
||||||
|
* a short time
|
||||||
|
*/
|
||||||
|
unsigned char counter;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
|
@ -535,15 +544,6 @@ struct thread_struct {
|
||||||
unsigned long iopl;
|
unsigned long iopl;
|
||||||
/* Max allowed port in the bitmap, in bytes: */
|
/* Max allowed port in the bitmap, in bytes: */
|
||||||
unsigned io_bitmap_max;
|
unsigned io_bitmap_max;
|
||||||
/*
|
|
||||||
* fpu_counter contains the number of consecutive context switches
|
|
||||||
* that the FPU is used. If this is over a threshold, the lazy fpu
|
|
||||||
* saving becomes unlazy to save the trap. This is an unsigned char
|
|
||||||
* so that after 256 times the counter wraps and the behavior turns
|
|
||||||
* lazy again; this to deal with bursty apps that only use FPU for
|
|
||||||
* a short time
|
|
||||||
*/
|
|
||||||
unsigned char fpu_counter;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -87,7 +87,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||||
{
|
{
|
||||||
*dst = *src;
|
*dst = *src;
|
||||||
|
|
||||||
dst->thread.fpu_counter = 0;
|
dst->thread.fpu.counter = 0;
|
||||||
dst->thread.fpu.has_fpu = 0;
|
dst->thread.fpu.has_fpu = 0;
|
||||||
dst->thread.fpu.state = NULL;
|
dst->thread.fpu.state = NULL;
|
||||||
task_disable_lazy_fpu_restore(dst);
|
task_disable_lazy_fpu_restore(dst);
|
||||||
|
|
|
@ -863,7 +863,7 @@ void math_state_restore(void)
|
||||||
fpu_reset_state(tsk);
|
fpu_reset_state(tsk);
|
||||||
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
|
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
|
||||||
} else {
|
} else {
|
||||||
tsk->thread.fpu_counter++;
|
tsk->thread.fpu.counter++;
|
||||||
}
|
}
|
||||||
kernel_fpu_enable();
|
kernel_fpu_enable();
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue