mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-09 16:12:21 +00:00
ARM:
- Move the arch-specific code into arch/arm64/kvm - Start the post-32bit cleanup - Cherry-pick a few non-invasive pre-NV patches x86: - Rework of TLB flushing - Rework of event injection, especially with respect to nested virtualization - Nested AMD event injection facelift, building on the rework of generic code and fixing a lot of corner cases - Nested AMD live migration support - Optimization for TSC deadline MSR writes and IPIs - Various cleanups - Asynchronous page fault cleanups (from tglx, common topic branch with tip tree) - Interrupt-based delivery of asynchronous "page ready" events (host side) - Hyper-V MSRs and hypercalls for guest debugging - VMX preemption timer fixes s390: - Cleanups Generic: - switch vCPU thread wakeup from swait to rcuwait The other architectures, and the guest side of the asynchronous page fault work, will come next week. -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAl7VJcYUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroPf6QgAq4wU5wdd1lTGz/i3DIhNVJNJgJlp ozLzRdMaJbdbn5RpAK6PEBd9+pt3+UlojpFB3gpJh2Nazv2OzV4yLQgXXXyyMEx1 5Hg7b4UCJYDrbkCiegNRv7f/4FWDkQ9dx++RZITIbxeskBBCEI+I7GnmZhGWzuC4 7kj4ytuKAySF2OEJu0VQF6u0CvrNYfYbQIRKBXjtOwuRK4Q6L63FGMJpYo159MBQ asg3B1jB5TcuGZ9zrjL5LkuzaP4qZZHIRs+4kZsH9I6MODHGUxKonrkablfKxyKy CFK+iaHCuEXXty5K0VmWM3nrTfvpEjVjbMc7e1QGBQ5oXsDM0pqn84syRg== =v7Wn -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm updates from Paolo Bonzini: "ARM: - Move the arch-specific code into arch/arm64/kvm - Start the post-32bit cleanup - Cherry-pick a few non-invasive pre-NV patches x86: - Rework of TLB flushing - Rework of event injection, especially with respect to nested virtualization - Nested AMD event injection facelift, building on the rework of generic code and fixing a lot of corner cases - Nested AMD live migration support - Optimization for TSC deadline MSR writes and IPIs - Various cleanups - Asynchronous page fault cleanups (from tglx, common topic branch with tip tree) - Interrupt-based delivery of asynchronous "page ready" events (host side) - Hyper-V MSRs and hypercalls for guest debugging - VMX preemption timer fixes s390: - Cleanups Generic: - switch vCPU thread wakeup from swait to rcuwait The other architectures, and the guest side of the asynchronous page fault work, will come next week" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (256 commits) KVM: selftests: fix rdtsc() for vmx_tsc_adjust_test KVM: check userspace_addr for all memslots KVM: selftests: update hyperv_cpuid with SynDBG tests x86/kvm/hyper-v: Add support for synthetic debugger via hypercalls x86/kvm/hyper-v: enable hypercalls regardless of hypercall page x86/kvm/hyper-v: Add support for synthetic debugger interface x86/hyper-v: Add synthetic debugger definitions KVM: selftests: VMX preemption timer migration test KVM: nVMX: Fix VMX preemption timer migration x86/kvm/hyper-v: Explicitly align hcall param for kvm_hyperv_exit KVM: x86/pmu: Support full width counting KVM: x86/pmu: Tweak kvm_pmu_get_msr to pass 'struct msr_data' in KVM: x86: announce KVM_FEATURE_ASYNC_PF_INT KVM: x86: acknowledgment mechanism for async pf page ready notifications KVM: x86: interrupt based APF 'page ready' event delivery KVM: introduce kvm_read_guest_offset_cached() KVM: rename kvm_arch_can_inject_async_page_present() to kvm_arch_can_dequeue_async_page_present() KVM: x86: extend struct kvm_vcpu_pv_apf_data with token info Revert "KVM: async_pf: Fix #DF due to inject "Page not Present" and "Page Ready" exceptions simultaneously" KVM: VMX: Replace zero-length array with flexible-array ...
This commit is contained in:
commit
039aeb9deb
155 changed files with 5318 additions and 3057 deletions
|
@ -3616,13 +3616,10 @@ mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
|
|||
/*
|
||||
* Hardirqs will be enabled:
|
||||
*/
|
||||
static void __trace_hardirqs_on_caller(unsigned long ip)
|
||||
static void __trace_hardirqs_on_caller(void)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
|
||||
/* we'll do an OFF -> ON transition: */
|
||||
curr->hardirqs_enabled = 1;
|
||||
|
||||
/*
|
||||
* We are going to turn hardirqs on, so set the
|
||||
* usage bit for all held locks:
|
||||
|
@ -3635,15 +3632,19 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
|
|||
* this bit from being set before)
|
||||
*/
|
||||
if (curr->softirqs_enabled)
|
||||
if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ))
|
||||
return;
|
||||
|
||||
curr->hardirq_enable_ip = ip;
|
||||
curr->hardirq_enable_event = ++curr->irq_events;
|
||||
debug_atomic_inc(hardirqs_on_events);
|
||||
mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
|
||||
}
|
||||
|
||||
void lockdep_hardirqs_on(unsigned long ip)
|
||||
/**
|
||||
* lockdep_hardirqs_on_prepare - Prepare for enabling interrupts
|
||||
* @ip: Caller address
|
||||
*
|
||||
* Invoked before a possible transition to RCU idle from exit to user or
|
||||
* guest mode. This ensures that all RCU operations are done before RCU
|
||||
* stops watching. After the RCU transition lockdep_hardirqs_on() has to be
|
||||
* invoked to set the final state.
|
||||
*/
|
||||
void lockdep_hardirqs_on_prepare(unsigned long ip)
|
||||
{
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
return;
|
||||
|
@ -3679,20 +3680,62 @@ void lockdep_hardirqs_on(unsigned long ip)
|
|||
if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
|
||||
return;
|
||||
|
||||
current->hardirq_chain_key = current->curr_chain_key;
|
||||
|
||||
current->lockdep_recursion++;
|
||||
__trace_hardirqs_on_caller(ip);
|
||||
__trace_hardirqs_on_caller();
|
||||
lockdep_recursion_finish();
|
||||
}
|
||||
NOKPROBE_SYMBOL(lockdep_hardirqs_on);
|
||||
EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
|
||||
|
||||
void noinstr lockdep_hardirqs_on(unsigned long ip)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
|
||||
if (unlikely(!debug_locks || curr->lockdep_recursion))
|
||||
return;
|
||||
|
||||
if (curr->hardirqs_enabled) {
|
||||
/*
|
||||
* Neither irq nor preemption are disabled here
|
||||
* so this is racy by nature but losing one hit
|
||||
* in a stat is not a big deal.
|
||||
*/
|
||||
__debug_atomic_inc(redundant_hardirqs_on);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We're enabling irqs and according to our state above irqs weren't
|
||||
* already enabled, yet we find the hardware thinks they are in fact
|
||||
* enabled.. someone messed up their IRQ state tracing.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Ensure the lock stack remained unchanged between
|
||||
* lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on().
|
||||
*/
|
||||
DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
|
||||
current->curr_chain_key);
|
||||
|
||||
/* we'll do an OFF -> ON transition: */
|
||||
curr->hardirqs_enabled = 1;
|
||||
curr->hardirq_enable_ip = ip;
|
||||
curr->hardirq_enable_event = ++curr->irq_events;
|
||||
debug_atomic_inc(hardirqs_on_events);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
|
||||
|
||||
/*
|
||||
* Hardirqs were disabled:
|
||||
*/
|
||||
void lockdep_hardirqs_off(unsigned long ip)
|
||||
void noinstr lockdep_hardirqs_off(unsigned long ip)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
|
||||
if (unlikely(!debug_locks || current->lockdep_recursion))
|
||||
if (unlikely(!debug_locks || curr->lockdep_recursion))
|
||||
return;
|
||||
|
||||
/*
|
||||
|
@ -3710,10 +3753,11 @@ void lockdep_hardirqs_off(unsigned long ip)
|
|||
curr->hardirq_disable_ip = ip;
|
||||
curr->hardirq_disable_event = ++curr->irq_events;
|
||||
debug_atomic_inc(hardirqs_off_events);
|
||||
} else
|
||||
} else {
|
||||
debug_atomic_inc(redundant_hardirqs_off);
|
||||
}
|
||||
}
|
||||
NOKPROBE_SYMBOL(lockdep_hardirqs_off);
|
||||
EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
|
||||
|
||||
/*
|
||||
* Softirqs will be enabled:
|
||||
|
@ -4389,8 +4433,8 @@ static void print_unlock_imbalance_bug(struct task_struct *curr,
|
|||
dump_stack();
|
||||
}
|
||||
|
||||
static int match_held_lock(const struct held_lock *hlock,
|
||||
const struct lockdep_map *lock)
|
||||
static noinstr int match_held_lock(const struct held_lock *hlock,
|
||||
const struct lockdep_map *lock)
|
||||
{
|
||||
if (hlock->instance == lock)
|
||||
return 1;
|
||||
|
@ -4677,7 +4721,7 @@ __lock_release(struct lockdep_map *lock, unsigned long ip)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static nokprobe_inline
|
||||
static __always_inline
|
||||
int __lock_is_held(const struct lockdep_map *lock, int read)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
|
@ -4937,7 +4981,7 @@ void lock_release(struct lockdep_map *lock, unsigned long ip)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(lock_release);
|
||||
|
||||
int lock_is_held_type(const struct lockdep_map *lock, int read)
|
||||
noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue