mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-21 22:21:21 +00:00
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "Two FPU rewrite related fixes. This addresses all known x86 regressions at this stage. Also some other misc fixes" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/fpu: Fix boot crash in the early FPU code x86/asm/entry/64: Update path names x86/fpu: Fix FPU related boot regression when CPUID masking BIOS feature is enabled x86/boot/setup: Clean up the e820_reserve_setup_data() code x86/kaslr: Fix typo in the KASLR_FLAG documentation
This commit is contained in:
commit
b1be9ead13
5 changed files with 13 additions and 13 deletions
|
@ -406,7 +406,7 @@ Protocol: 2.00+
|
||||||
- If 0, the protected-mode code is loaded at 0x10000.
|
- If 0, the protected-mode code is loaded at 0x10000.
|
||||||
- If 1, the protected-mode code is loaded at 0x100000.
|
- If 1, the protected-mode code is loaded at 0x100000.
|
||||||
|
|
||||||
Bit 1 (kernel internal): ALSR_FLAG
|
Bit 1 (kernel internal): KASLR_FLAG
|
||||||
- Used internally by the compressed kernel to communicate
|
- Used internally by the compressed kernel to communicate
|
||||||
KASLR status to kernel proper.
|
KASLR status to kernel proper.
|
||||||
If 1, KASLR enabled.
|
If 1, KASLR enabled.
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
This file documents some of the kernel entries in
|
This file documents some of the kernel entries in
|
||||||
arch/x86/kernel/entry_64.S. A lot of this explanation is adapted from
|
arch/x86/entry/entry_64.S. A lot of this explanation is adapted from
|
||||||
an email from Ingo Molnar:
|
an email from Ingo Molnar:
|
||||||
|
|
||||||
http://lkml.kernel.org/r/<20110529191055.GC9835%40elte.hu>
|
http://lkml.kernel.org/r/<20110529191055.GC9835%40elte.hu>
|
||||||
|
|
||||||
The x86 architecture has quite a few different ways to jump into
|
The x86 architecture has quite a few different ways to jump into
|
||||||
kernel code. Most of these entry points are registered in
|
kernel code. Most of these entry points are registered in
|
||||||
arch/x86/kernel/traps.c and implemented in arch/x86/kernel/entry_64.S
|
arch/x86/kernel/traps.c and implemented in arch/x86/entry/entry_64.S
|
||||||
for 64-bit, arch/x86/kernel/entry_32.S for 32-bit and finally
|
for 64-bit, arch/x86/entry/entry_32.S for 32-bit and finally
|
||||||
arch/x86/ia32/ia32entry.S which implements the 32-bit compatibility
|
arch/x86/entry/entry_64_compat.S which implements the 32-bit compatibility
|
||||||
syscall entry points and thus provides for 32-bit processes the
|
syscall entry points and thus provides for 32-bit processes the
|
||||||
ability to execute syscalls when running on 64-bit kernels.
|
ability to execute syscalls when running on 64-bit kernels.
|
||||||
|
|
||||||
|
|
|
@ -742,7 +742,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
||||||
cpu_detect(c);
|
cpu_detect(c);
|
||||||
get_cpu_vendor(c);
|
get_cpu_vendor(c);
|
||||||
get_cpu_cap(c);
|
get_cpu_cap(c);
|
||||||
fpu__init_system(c);
|
|
||||||
|
|
||||||
if (this_cpu->c_early_init)
|
if (this_cpu->c_early_init)
|
||||||
this_cpu->c_early_init(c);
|
this_cpu->c_early_init(c);
|
||||||
|
@ -754,6 +753,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
||||||
this_cpu->c_bsp_init(c);
|
this_cpu->c_bsp_init(c);
|
||||||
|
|
||||||
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
|
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
|
||||||
|
fpu__init_system(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init early_cpu_init(void)
|
void __init early_cpu_init(void)
|
||||||
|
|
|
@ -95,11 +95,12 @@ static void __init fpu__init_system_mxcsr(void)
|
||||||
unsigned int mask = 0;
|
unsigned int mask = 0;
|
||||||
|
|
||||||
if (cpu_has_fxsr) {
|
if (cpu_has_fxsr) {
|
||||||
struct fxregs_state fx_tmp __aligned(32) = { };
|
/* Static because GCC does not get 16-byte stack alignment right: */
|
||||||
|
static struct fxregs_state fxregs __initdata;
|
||||||
|
|
||||||
asm volatile("fxsave %0" : "+m" (fx_tmp));
|
asm volatile("fxsave %0" : "+m" (fxregs));
|
||||||
|
|
||||||
mask = fx_tmp.mxcsr_mask;
|
mask = fxregs.mxcsr_mask;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If zero then use the default features mask,
|
* If zero then use the default features mask,
|
||||||
|
|
|
@ -461,19 +461,18 @@ static void __init e820_reserve_setup_data(void)
|
||||||
{
|
{
|
||||||
struct setup_data *data;
|
struct setup_data *data;
|
||||||
u64 pa_data;
|
u64 pa_data;
|
||||||
int found = 0;
|
|
||||||
|
|
||||||
pa_data = boot_params.hdr.setup_data;
|
pa_data = boot_params.hdr.setup_data;
|
||||||
|
if (!pa_data)
|
||||||
|
return;
|
||||||
|
|
||||||
while (pa_data) {
|
while (pa_data) {
|
||||||
data = early_memremap(pa_data, sizeof(*data));
|
data = early_memremap(pa_data, sizeof(*data));
|
||||||
e820_update_range(pa_data, sizeof(*data)+data->len,
|
e820_update_range(pa_data, sizeof(*data)+data->len,
|
||||||
E820_RAM, E820_RESERVED_KERN);
|
E820_RAM, E820_RESERVED_KERN);
|
||||||
found = 1;
|
|
||||||
pa_data = data->next;
|
pa_data = data->next;
|
||||||
early_memunmap(data, sizeof(*data));
|
early_memunmap(data, sizeof(*data));
|
||||||
}
|
}
|
||||||
if (!found)
|
|
||||||
return;
|
|
||||||
|
|
||||||
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
|
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
|
||||||
memcpy(&e820_saved, &e820, sizeof(struct e820map));
|
memcpy(&e820_saved, &e820, sizeof(struct e820map));
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue