mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-28 09:31:14 +00:00
locking/lockdep: Avoid potential access of invalid memory in lock_class
commit61cc4534b6
upstream. It was found that reading /proc/lockdep after a lockdep splat may potentially cause an access to freed memory if lockdep_unregister_key() is called after the splat but before access to /proc/lockdep [1]. This is due to the fact that graph_lock() call in lockdep_unregister_key() fails after the clearing of debug_locks by the splat process. After lockdep_unregister_key() is called, the lock_name may be freed but the corresponding lock_class structure still have a reference to it. That invalid memory pointer will then be accessed when /proc/lockdep is read by a user and a use-after-free (UAF) error will be reported if KASAN is enabled. To fix this problem, lockdep_unregister_key() is now modified to always search for a matching key irrespective of the debug_locks state and zap the corresponding lock class if a matching one is found. [1] https://lore.kernel.org/lkml/77f05c15-81b6-bddd-9650-80d5f23fe330@i-love.sakura.ne.jp/ Fixes:8b39adbee8
("locking/lockdep: Make lockdep_unregister_key() honor 'debug_locks' again") Reported-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp> Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Cc: Cheng-Jui Wang <cheng-jui.wang@mediatek.com> Link: https://lkml.kernel.org/r/20220103023558.1377055-1-longman@redhat.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
e5189fb91f
commit
6f54aede22
1 changed files with 15 additions and 9 deletions
|
@ -6276,7 +6276,13 @@ void lockdep_reset_lock(struct lockdep_map *lock)
|
||||||
lockdep_reset_lock_reg(lock);
|
lockdep_reset_lock_reg(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unregister a dynamically allocated key. */
|
/*
|
||||||
|
* Unregister a dynamically allocated key.
|
||||||
|
*
|
||||||
|
* Unlike lockdep_register_key(), a search is always done to find a matching
|
||||||
|
* key irrespective of debug_locks to avoid potential invalid access to freed
|
||||||
|
* memory in lock_class entry.
|
||||||
|
*/
|
||||||
void lockdep_unregister_key(struct lock_class_key *key)
|
void lockdep_unregister_key(struct lock_class_key *key)
|
||||||
{
|
{
|
||||||
struct hlist_head *hash_head = keyhashentry(key);
|
struct hlist_head *hash_head = keyhashentry(key);
|
||||||
|
@ -6291,10 +6297,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
raw_local_irq_save(flags);
|
raw_local_irq_save(flags);
|
||||||
if (!graph_lock())
|
lockdep_lock();
|
||||||
goto out_irq;
|
|
||||||
|
|
||||||
pf = get_pending_free();
|
|
||||||
hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
|
hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
|
||||||
if (k == key) {
|
if (k == key) {
|
||||||
hlist_del_rcu(&k->hash_entry);
|
hlist_del_rcu(&k->hash_entry);
|
||||||
|
@ -6302,11 +6306,13 @@ void lockdep_unregister_key(struct lock_class_key *key)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
WARN_ON_ONCE(!found);
|
WARN_ON_ONCE(!found && debug_locks);
|
||||||
__lockdep_free_key_range(pf, key, 1);
|
if (found) {
|
||||||
call_rcu_zapped(pf);
|
pf = get_pending_free();
|
||||||
graph_unlock();
|
__lockdep_free_key_range(pf, key, 1);
|
||||||
out_irq:
|
call_rcu_zapped(pf);
|
||||||
|
}
|
||||||
|
lockdep_unlock();
|
||||||
raw_local_irq_restore(flags);
|
raw_local_irq_restore(flags);
|
||||||
|
|
||||||
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
|
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue