mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-30 19:06:14 +00:00
x86: convert pda ops to wrappers around x86 percpu accessors
pda is now a percpu variable and there's no reason it can't use plain x86 percpu accessors. Add x86_test_and_clear_bit_percpu() and replace pda op implementations with wrappers around x86 percpu accessors. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
b12d8db8fb
commit
49357d19e4
4 changed files with 16 additions and 85 deletions
|
@ -45,91 +45,15 @@ extern void pda_init(int);
|
||||||
|
|
||||||
#define cpu_pda(cpu) (&per_cpu(__pda, cpu))
|
#define cpu_pda(cpu) (&per_cpu(__pda, cpu))
|
||||||
|
|
||||||
/*
|
#define read_pda(field) x86_read_percpu(__pda.field)
|
||||||
* There is no fast way to get the base address of the PDA, all the accesses
|
#define write_pda(field, val) x86_write_percpu(__pda.field, val)
|
||||||
* have to mention %fs/%gs. So it needs to be done this Torvaldian way.
|
#define add_pda(field, val) x86_add_percpu(__pda.field, val)
|
||||||
*/
|
#define sub_pda(field, val) x86_sub_percpu(__pda.field, val)
|
||||||
extern void __bad_pda_field(void) __attribute__((noreturn));
|
#define or_pda(field, val) x86_or_percpu(__pda.field, val)
|
||||||
|
|
||||||
/*
|
|
||||||
* proxy_pda doesn't actually exist, but tell gcc it is accessed for
|
|
||||||
* all PDA accesses so it gets read/write dependencies right.
|
|
||||||
*/
|
|
||||||
extern struct x8664_pda _proxy_pda;
|
|
||||||
|
|
||||||
#define pda_offset(field) offsetof(struct x8664_pda, field)
|
|
||||||
|
|
||||||
#define pda_to_op(op, field, val) \
|
|
||||||
do { \
|
|
||||||
typedef typeof(_proxy_pda.field) T__; \
|
|
||||||
if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
|
|
||||||
switch (sizeof(_proxy_pda.field)) { \
|
|
||||||
case 2: \
|
|
||||||
asm(op "w %1,%%gs:%c2" : \
|
|
||||||
"+m" (_proxy_pda.field) : \
|
|
||||||
"ri" ((T__)val), \
|
|
||||||
"i"(pda_offset(field))); \
|
|
||||||
break; \
|
|
||||||
case 4: \
|
|
||||||
asm(op "l %1,%%gs:%c2" : \
|
|
||||||
"+m" (_proxy_pda.field) : \
|
|
||||||
"ri" ((T__)val), \
|
|
||||||
"i" (pda_offset(field))); \
|
|
||||||
break; \
|
|
||||||
case 8: \
|
|
||||||
asm(op "q %1,%%gs:%c2": \
|
|
||||||
"+m" (_proxy_pda.field) : \
|
|
||||||
"r" ((T__)val), \
|
|
||||||
"i"(pda_offset(field))); \
|
|
||||||
break; \
|
|
||||||
default: \
|
|
||||||
__bad_pda_field(); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define pda_from_op(op, field) \
|
|
||||||
({ \
|
|
||||||
typeof(_proxy_pda.field) ret__; \
|
|
||||||
switch (sizeof(_proxy_pda.field)) { \
|
|
||||||
case 2: \
|
|
||||||
asm(op "w %%gs:%c1,%0" : \
|
|
||||||
"=r" (ret__) : \
|
|
||||||
"i" (pda_offset(field)), \
|
|
||||||
"m" (_proxy_pda.field)); \
|
|
||||||
break; \
|
|
||||||
case 4: \
|
|
||||||
asm(op "l %%gs:%c1,%0": \
|
|
||||||
"=r" (ret__): \
|
|
||||||
"i" (pda_offset(field)), \
|
|
||||||
"m" (_proxy_pda.field)); \
|
|
||||||
break; \
|
|
||||||
case 8: \
|
|
||||||
asm(op "q %%gs:%c1,%0": \
|
|
||||||
"=r" (ret__) : \
|
|
||||||
"i" (pda_offset(field)), \
|
|
||||||
"m" (_proxy_pda.field)); \
|
|
||||||
break; \
|
|
||||||
default: \
|
|
||||||
__bad_pda_field(); \
|
|
||||||
} \
|
|
||||||
ret__; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#define read_pda(field) pda_from_op("mov", field)
|
|
||||||
#define write_pda(field, val) pda_to_op("mov", field, val)
|
|
||||||
#define add_pda(field, val) pda_to_op("add", field, val)
|
|
||||||
#define sub_pda(field, val) pda_to_op("sub", field, val)
|
|
||||||
#define or_pda(field, val) pda_to_op("or", field, val)
|
|
||||||
|
|
||||||
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
||||||
#define test_and_clear_bit_pda(bit, field) \
|
#define test_and_clear_bit_pda(bit, field) \
|
||||||
({ \
|
x86_test_and_clear_bit_percpu(bit, __pda.field)
|
||||||
int old__; \
|
|
||||||
asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
|
|
||||||
: "=r" (old__), "+m" (_proxy_pda.field) \
|
|
||||||
: "dIr" (bit), "i" (pda_offset(field)) : "memory");\
|
|
||||||
old__; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -121,6 +121,16 @@ do { \
|
||||||
#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
|
#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
|
||||||
#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
|
#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
|
||||||
|
|
||||||
|
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
||||||
|
#define x86_test_and_clear_bit_percpu(bit, var) \
|
||||||
|
({ \
|
||||||
|
int old__; \
|
||||||
|
asm volatile("btr %1,"__percpu_seg_str"%c2\n\tsbbl %0,%0" \
|
||||||
|
: "=r" (old__) \
|
||||||
|
: "dIr" (bit), "i" (&per_cpu__##var) : "memory"); \
|
||||||
|
old__; \
|
||||||
|
})
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
extern void load_pda_offset(int cpu);
|
extern void load_pda_offset(int cpu);
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -14,7 +14,6 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
|
||||||
OUTPUT_ARCH(i386:x86-64)
|
OUTPUT_ARCH(i386:x86-64)
|
||||||
ENTRY(phys_startup_64)
|
ENTRY(phys_startup_64)
|
||||||
jiffies_64 = jiffies;
|
jiffies_64 = jiffies;
|
||||||
_proxy_pda = 1;
|
|
||||||
PHDRS {
|
PHDRS {
|
||||||
text PT_LOAD FLAGS(5); /* R_E */
|
text PT_LOAD FLAGS(5); /* R_E */
|
||||||
data PT_LOAD FLAGS(7); /* RWE */
|
data PT_LOAD FLAGS(7); /* RWE */
|
||||||
|
|
|
@ -58,5 +58,3 @@ EXPORT_SYMBOL(__memcpy);
|
||||||
EXPORT_SYMBOL(empty_zero_page);
|
EXPORT_SYMBOL(empty_zero_page);
|
||||||
EXPORT_SYMBOL(init_level4_pgt);
|
EXPORT_SYMBOL(init_level4_pgt);
|
||||||
EXPORT_SYMBOL(load_gs_index);
|
EXPORT_SYMBOL(load_gs_index);
|
||||||
|
|
||||||
EXPORT_SYMBOL(_proxy_pda);
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue