Revert "lockdep: fix deadlock issue between lockdep and rcu"

This reverts commit 6624949eca.
This commit is contained in:
Ksawlii 2024-11-24 00:23:13 +01:00
parent b96c98434a
commit c848ca572d

View file

@ -5967,27 +5967,25 @@ static struct pending_free *get_pending_free(void)
static void free_zapped_rcu(struct rcu_head *cb); static void free_zapped_rcu(struct rcu_head *cb);
/* /*
* See if we need to queue an RCU callback, must called with * Schedule an RCU callback if no RCU callback is pending. Must be called with
* the lockdep lock held, returns false if either we don't have * the graph lock held.
* any pending free or the callback is already scheduled. */
* Otherwise, a call_rcu() must follow this function call. static void call_rcu_zapped(struct pending_free *pf)
*/
static bool prepare_call_rcu_zapped(struct pending_free *pf)
{ {
WARN_ON_ONCE(inside_selftest()); WARN_ON_ONCE(inside_selftest());
if (list_empty(&pf->zapped)) if (list_empty(&pf->zapped))
return false; return;
if (delayed_free.scheduled) if (delayed_free.scheduled)
return false; return;
delayed_free.scheduled = true; delayed_free.scheduled = true;
WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf); WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
delayed_free.index ^= 1; delayed_free.index ^= 1;
return true; call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
} }
/* The caller must hold the graph lock. May be called from RCU context. */ /* The caller must hold the graph lock. May be called from RCU context. */
@ -6013,7 +6011,6 @@ static void free_zapped_rcu(struct rcu_head *ch)
{ {
struct pending_free *pf; struct pending_free *pf;
unsigned long flags; unsigned long flags;
bool need_callback;
if (WARN_ON_ONCE(ch != &delayed_free.rcu_head)) if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
return; return;
@ -6025,18 +6022,14 @@ static void free_zapped_rcu(struct rcu_head *ch)
pf = delayed_free.pf + (delayed_free.index ^ 1); pf = delayed_free.pf + (delayed_free.index ^ 1);
__free_zapped_classes(pf); __free_zapped_classes(pf);
delayed_free.scheduled = false; delayed_free.scheduled = false;
need_callback =
prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
lockdep_unlock();
raw_local_irq_restore(flags);
/* /*
* If there's pending free and its callback has not been scheduled, * If there's anything on the open list, close and start a new callback.
* queue an RCU callback. */
*/ call_rcu_zapped(delayed_free.pf + delayed_free.index);
if (need_callback)
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
lockdep_unlock();
raw_local_irq_restore(flags);
} }
/* /*
@ -6076,7 +6069,6 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
{ {
struct pending_free *pf; struct pending_free *pf;
unsigned long flags; unsigned long flags;
bool need_callback;
init_data_structures_once(); init_data_structures_once();
@ -6084,11 +6076,10 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
lockdep_lock(); lockdep_lock();
pf = get_pending_free(); pf = get_pending_free();
__lockdep_free_key_range(pf, start, size); __lockdep_free_key_range(pf, start, size);
need_callback = prepare_call_rcu_zapped(pf); call_rcu_zapped(pf);
lockdep_unlock(); lockdep_unlock();
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
if (need_callback)
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
/* /*
* Wait for any possible iterators from look_up_lock_class() to pass * Wait for any possible iterators from look_up_lock_class() to pass
* before continuing to free the memory they refer to. * before continuing to free the memory they refer to.
@ -6182,7 +6173,6 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
struct pending_free *pf; struct pending_free *pf;
unsigned long flags; unsigned long flags;
int locked; int locked;
bool need_callback = false;
raw_local_irq_save(flags); raw_local_irq_save(flags);
locked = graph_lock(); locked = graph_lock();
@ -6191,13 +6181,11 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
pf = get_pending_free(); pf = get_pending_free();
__lockdep_reset_lock(pf, lock); __lockdep_reset_lock(pf, lock);
need_callback = prepare_call_rcu_zapped(pf); call_rcu_zapped(pf);
graph_unlock(); graph_unlock();
out_irq: out_irq:
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
if (need_callback)
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
} }
/* /*
@ -6241,7 +6229,6 @@ void lockdep_unregister_key(struct lock_class_key *key)
struct pending_free *pf; struct pending_free *pf;
unsigned long flags; unsigned long flags;
bool found = false; bool found = false;
bool need_callback = false;
might_sleep(); might_sleep();
@ -6262,14 +6249,11 @@ void lockdep_unregister_key(struct lock_class_key *key)
if (found) { if (found) {
pf = get_pending_free(); pf = get_pending_free();
__lockdep_free_key_range(pf, key, 1); __lockdep_free_key_range(pf, key, 1);
need_callback = prepare_call_rcu_zapped(pf); call_rcu_zapped(pf);
} }
lockdep_unlock(); lockdep_unlock();
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
if (need_callback)
call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
synchronize_rcu(); synchronize_rcu();
} }