From c848ca572d4b1f4274e5f35076b92877520d7d0c Mon Sep 17 00:00:00 2001 From: Ksawlii Date: Sun, 24 Nov 2024 00:23:13 +0100 Subject: [PATCH] Revert "lockdep: fix deadlock issue between lockdep and rcu" This reverts commit 6624949ecaf4370eca748067a48296e240e23553. --- kernel/locking/lockdep.c | 48 ++++++++++++++-------------------------- 1 file changed, 16 insertions(+), 32 deletions(-) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index b576dd005..7471d85f5 100755 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -5967,27 +5967,25 @@ static struct pending_free *get_pending_free(void) static void free_zapped_rcu(struct rcu_head *cb); /* -* See if we need to queue an RCU callback, must called with -* the lockdep lock held, returns false if either we don't have -* any pending free or the callback is already scheduled. -* Otherwise, a call_rcu() must follow this function call. -*/ -static bool prepare_call_rcu_zapped(struct pending_free *pf) + * Schedule an RCU callback if no RCU callback is pending. Must be called with + * the graph lock held. + */ +static void call_rcu_zapped(struct pending_free *pf) { WARN_ON_ONCE(inside_selftest()); if (list_empty(&pf->zapped)) - return false; + return; if (delayed_free.scheduled) - return false; + return; delayed_free.scheduled = true; WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf); delayed_free.index ^= 1; - return true; + call_rcu(&delayed_free.rcu_head, free_zapped_rcu); } /* The caller must hold the graph lock. May be called from RCU context. */ @@ -6013,7 +6011,6 @@ static void free_zapped_rcu(struct rcu_head *ch) { struct pending_free *pf; unsigned long flags; - bool need_callback; if (WARN_ON_ONCE(ch != &delayed_free.rcu_head)) return; @@ -6025,18 +6022,14 @@ static void free_zapped_rcu(struct rcu_head *ch) pf = delayed_free.pf + (delayed_free.index ^ 1); __free_zapped_classes(pf); delayed_free.scheduled = false; - need_callback = - prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index); - lockdep_unlock(); - raw_local_irq_restore(flags); /* - * If there's pending free and its callback has not been scheduled, - * queue an RCU callback. - */ - if (need_callback) - call_rcu(&delayed_free.rcu_head, free_zapped_rcu); + * If there's anything on the open list, close and start a new callback. + */ + call_rcu_zapped(delayed_free.pf + delayed_free.index); + lockdep_unlock(); + raw_local_irq_restore(flags); } /* @@ -6076,7 +6069,6 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size) { struct pending_free *pf; unsigned long flags; - bool need_callback; init_data_structures_once(); @@ -6084,11 +6076,10 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size) lockdep_lock(); pf = get_pending_free(); __lockdep_free_key_range(pf, start, size); - need_callback = prepare_call_rcu_zapped(pf); + call_rcu_zapped(pf); lockdep_unlock(); raw_local_irq_restore(flags); - if (need_callback) - call_rcu(&delayed_free.rcu_head, free_zapped_rcu); + /* * Wait for any possible iterators from look_up_lock_class() to pass * before continuing to free the memory they refer to. @@ -6182,7 +6173,6 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock) struct pending_free *pf; unsigned long flags; int locked; - bool need_callback = false; raw_local_irq_save(flags); locked = graph_lock(); @@ -6191,13 +6181,11 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock) pf = get_pending_free(); __lockdep_reset_lock(pf, lock); - need_callback = prepare_call_rcu_zapped(pf); + call_rcu_zapped(pf); graph_unlock(); out_irq: raw_local_irq_restore(flags); - if (need_callback) - call_rcu(&delayed_free.rcu_head, free_zapped_rcu); } /* @@ -6241,7 +6229,6 @@ void lockdep_unregister_key(struct lock_class_key *key) struct pending_free *pf; unsigned long flags; bool found = false; - bool need_callback = false; might_sleep(); @@ -6262,14 +6249,11 @@ void lockdep_unregister_key(struct lock_class_key *key) if (found) { pf = get_pending_free(); __lockdep_free_key_range(pf, key, 1); - need_callback = prepare_call_rcu_zapped(pf); + call_rcu_zapped(pf); } lockdep_unlock(); raw_local_irq_restore(flags); - if (need_callback) - call_rcu(&delayed_free.rcu_head, free_zapped_rcu); - /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ synchronize_rcu(); }