mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
arm64 fixes/cleanups:
- Avoid taking a mutex in the secondary CPU bring-up path when interrupts are disabled - Ignore perf exclude_hv when the kernel is running in Hyp mode - Remove redundant instruction in cmpxchg -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJZHva5AAoJEGvWsS0AyF7xjGQP/ibkFaPSSe1PfpM3pC3mdyv1 2o1oChoKKh4u4VR5gZs2nAXZiOoZjPByvnzrJiF9k5KThkTJechr6PS0srUHtxVt hpReVgsnuqZ4iX68EsiMY+ayoMmBpHGBY9ErlbWPHpgttMHqq7xiMcb/+lSxHoxX GXrsz5J2KaHv7zNQY3nx+Ear592n7WdrBvUpBYh9jyGQWlC2cxKV/1RbcCa76MGx 33xKExL/NTsTy3cnP8gjVL95RmJCm4qfDGJky/UC8tDWnS92fXt5HHbkgWNCFIgD eL6hNyZtl4Sxmfcn1QwzBiTL2+VIsQhuqRW9gH0PiBP15TFh0nfzegrlylJ2gw6A qmq+fos1zyYdyzA43IDfrmbYJb7MD2pwuhgg8yDkIEguUUgMhdnd7V8nmgzAT8g2 nX1FwqZxz9+wBZHfP5gEEiIhCdOkbU6zFKWXlhxVluUkPDUdUKIWt80V6jGljJXj Np15Ld7joVcH06g1bwiUziNH8q/qpIE2YKxXKPHs9LfrEPtpUwK372rSeQFa28XE vxqbXyhfPujXNukCyVEMWYzzR3qE2hb3nh/dU6Oom+W6ZSE3f6s26iOM0Jo34nIN hZGws5onbIWOHhO34/KtRoyDtJDNM6I6Yjzn5QZntFf3HKBSI7s7VVntoZSSa1Gm oG0Fuy13XSqyBuxw70nL =eZ2l -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes/cleanups from Catalin Marinas: - Avoid taking a mutex in the secondary CPU bring-up path when interrupts are disabled - Ignore perf exclude_hv when the kernel is running in Hyp mode - Remove redundant instruction in cmpxchg * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64/cpufeature: don't use mutex in bringup path arm64: perf: Ignore exclude_hv when kernel is running in HYP arm64: Remove redundant mov from LL/SC cmpxchg
This commit is contained in:
commit
2fe296a61a
5 changed files with 53 additions and 14 deletions
|
@ -264,7 +264,6 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
|
||||||
" st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
|
" st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
|
||||||
" cbnz %w[tmp], 1b\n" \
|
" cbnz %w[tmp], 1b\n" \
|
||||||
" " #mb "\n" \
|
" " #mb "\n" \
|
||||||
" mov %" #w "[oldval], %" #w "[old]\n" \
|
|
||||||
"2:" \
|
"2:" \
|
||||||
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
|
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
|
||||||
[v] "+Q" (*(unsigned long *)ptr) \
|
[v] "+Q" (*(unsigned long *)ptr) \
|
||||||
|
|
|
@ -115,6 +115,7 @@ struct arm64_cpu_capabilities {
|
||||||
|
|
||||||
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
|
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
|
||||||
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
|
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
|
||||||
|
extern struct static_key_false arm64_const_caps_ready;
|
||||||
|
|
||||||
bool this_cpu_has_cap(unsigned int cap);
|
bool this_cpu_has_cap(unsigned int cap);
|
||||||
|
|
||||||
|
@ -124,7 +125,7 @@ static inline bool cpu_have_feature(unsigned int num)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* System capability check for constant caps */
|
/* System capability check for constant caps */
|
||||||
static inline bool cpus_have_const_cap(int num)
|
static inline bool __cpus_have_const_cap(int num)
|
||||||
{
|
{
|
||||||
if (num >= ARM64_NCAPS)
|
if (num >= ARM64_NCAPS)
|
||||||
return false;
|
return false;
|
||||||
|
@ -138,6 +139,14 @@ static inline bool cpus_have_cap(unsigned int num)
|
||||||
return test_bit(num, cpu_hwcaps);
|
return test_bit(num, cpu_hwcaps);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool cpus_have_const_cap(int num)
|
||||||
|
{
|
||||||
|
if (static_branch_likely(&arm64_const_caps_ready))
|
||||||
|
return __cpus_have_const_cap(num);
|
||||||
|
else
|
||||||
|
return cpus_have_cap(num);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void cpus_set_cap(unsigned int num)
|
static inline void cpus_set_cap(unsigned int num)
|
||||||
{
|
{
|
||||||
if (num >= ARM64_NCAPS) {
|
if (num >= ARM64_NCAPS) {
|
||||||
|
@ -145,7 +154,6 @@ static inline void cpus_set_cap(unsigned int num)
|
||||||
num, ARM64_NCAPS);
|
num, ARM64_NCAPS);
|
||||||
} else {
|
} else {
|
||||||
__set_bit(num, cpu_hwcaps);
|
__set_bit(num, cpu_hwcaps);
|
||||||
static_branch_enable(&cpu_hwcap_keys[num]);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/kvm_types.h>
|
#include <linux/kvm_types.h>
|
||||||
|
#include <asm/cpufeature.h>
|
||||||
#include <asm/kvm.h>
|
#include <asm/kvm.h>
|
||||||
#include <asm/kvm_asm.h>
|
#include <asm/kvm_asm.h>
|
||||||
#include <asm/kvm_mmio.h>
|
#include <asm/kvm_mmio.h>
|
||||||
|
@ -355,9 +356,12 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
||||||
unsigned long vector_ptr)
|
unsigned long vector_ptr)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Call initialization code, and switch to the full blown
|
* Call initialization code, and switch to the full blown HYP code.
|
||||||
* HYP code.
|
* If the cpucaps haven't been finalized yet, something has gone very
|
||||||
|
* wrong, and hyp will crash and burn when it uses any
|
||||||
|
* cpus_have_const_cap() wrapper.
|
||||||
*/
|
*/
|
||||||
|
BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
|
||||||
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
|
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -985,8 +985,16 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
|
||||||
*/
|
*/
|
||||||
void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
|
void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
|
||||||
{
|
{
|
||||||
for (; caps->matches; caps++)
|
for (; caps->matches; caps++) {
|
||||||
if (caps->enable && cpus_have_cap(caps->capability))
|
unsigned int num = caps->capability;
|
||||||
|
|
||||||
|
if (!cpus_have_cap(num))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* Ensure cpus_have_const_cap(num) works */
|
||||||
|
static_branch_enable(&cpu_hwcap_keys[num]);
|
||||||
|
|
||||||
|
if (caps->enable) {
|
||||||
/*
|
/*
|
||||||
* Use stop_machine() as it schedules the work allowing
|
* Use stop_machine() as it schedules the work allowing
|
||||||
* us to modify PSTATE, instead of on_each_cpu() which
|
* us to modify PSTATE, instead of on_each_cpu() which
|
||||||
|
@ -994,6 +1002,8 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
|
||||||
* we return.
|
* we return.
|
||||||
*/
|
*/
|
||||||
stop_machine(caps->enable, NULL, cpu_online_mask);
|
stop_machine(caps->enable, NULL, cpu_online_mask);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1096,6 +1106,14 @@ static void __init setup_feature_capabilities(void)
|
||||||
enable_cpu_capabilities(arm64_features);
|
enable_cpu_capabilities(arm64_features);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
|
||||||
|
EXPORT_SYMBOL(arm64_const_caps_ready);
|
||||||
|
|
||||||
|
static void __init mark_const_caps_ready(void)
|
||||||
|
{
|
||||||
|
static_branch_enable(&arm64_const_caps_ready);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if the current CPU has a given feature capability.
|
* Check if the current CPU has a given feature capability.
|
||||||
* Should be called from non-preemptible context.
|
* Should be called from non-preemptible context.
|
||||||
|
@ -1131,6 +1149,7 @@ void __init setup_cpu_features(void)
|
||||||
/* Set the CPU feature capabilies */
|
/* Set the CPU feature capabilies */
|
||||||
setup_feature_capabilities();
|
setup_feature_capabilities();
|
||||||
enable_errata_workarounds();
|
enable_errata_workarounds();
|
||||||
|
mark_const_caps_ready();
|
||||||
setup_elf_hwcaps(arm64_elf_hwcaps);
|
setup_elf_hwcaps(arm64_elf_hwcaps);
|
||||||
|
|
||||||
if (system_supports_32bit_el0())
|
if (system_supports_32bit_el0())
|
||||||
|
|
|
@ -877,15 +877,24 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
|
||||||
|
|
||||||
if (attr->exclude_idle)
|
if (attr->exclude_idle)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
if (is_kernel_in_hyp_mode() &&
|
|
||||||
attr->exclude_kernel != attr->exclude_hv)
|
/*
|
||||||
return -EINVAL;
|
* If we're running in hyp mode, then we *are* the hypervisor.
|
||||||
|
* Therefore we ignore exclude_hv in this configuration, since
|
||||||
|
* there's no hypervisor to sample anyway. This is consistent
|
||||||
|
* with other architectures (x86 and Power).
|
||||||
|
*/
|
||||||
|
if (is_kernel_in_hyp_mode()) {
|
||||||
|
if (!attr->exclude_kernel)
|
||||||
|
config_base |= ARMV8_PMU_INCLUDE_EL2;
|
||||||
|
} else {
|
||||||
|
if (attr->exclude_kernel)
|
||||||
|
config_base |= ARMV8_PMU_EXCLUDE_EL1;
|
||||||
|
if (!attr->exclude_hv)
|
||||||
|
config_base |= ARMV8_PMU_INCLUDE_EL2;
|
||||||
|
}
|
||||||
if (attr->exclude_user)
|
if (attr->exclude_user)
|
||||||
config_base |= ARMV8_PMU_EXCLUDE_EL0;
|
config_base |= ARMV8_PMU_EXCLUDE_EL0;
|
||||||
if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
|
|
||||||
config_base |= ARMV8_PMU_EXCLUDE_EL1;
|
|
||||||
if (!attr->exclude_hv)
|
|
||||||
config_base |= ARMV8_PMU_INCLUDE_EL2;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Install the filter into config_base as this is used to
|
* Install the filter into config_base as this is used to
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue