Skip to content

Commit d8f14b8

Browse files
committed
Merge tag 'core-debugobjects-2023-05-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull debugobjects fixes from Thomas Gleixner: "Two fixes for debugobjects: - Prevent the allocation path from waking up kswapd. That's a long standing issue due to the GFP_ATOMIC allocation flag. As debug objects can be invoked from pretty much any context waking kswapd can end up in arbitrary lock chains versus the waitqueue lock - Correct the explicit lockdep wait-type violation in debug_object_fill_pool()" * tag 'core-debugobjects-2023-05-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: debugobjects: Don't wake up kswapd from fill_pool() debugobjects,locking: Annotate debug_object_fill_pool() wait type violation
2 parents 9bd5386 + eb79927 commit d8f14b8

File tree

4 files changed

+50
-10
lines changed

4 files changed

+50
-10
lines changed

include/linux/lockdep.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -344,6 +344,16 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
344344
#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
345345
#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
346346

347+
/*
348+
* Must use lock_map_aquire_try() with override maps to avoid
349+
* lockdep thinking they participate in the block chain.
350+
*/
351+
#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
352+
struct lockdep_map _name = { \
353+
.name = #_name "-wait-type-override", \
354+
.wait_type_inner = _wait_type, \
355+
.lock_type = LD_LOCK_WAIT_OVERRIDE, }
356+
347357
#else /* !CONFIG_LOCKDEP */
348358

349359
static inline void lockdep_init_task(struct task_struct *task)
@@ -432,6 +442,9 @@ extern int lockdep_is_held(const void *);
432442
#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
433443
#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
434444

445+
#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
446+
struct lockdep_map __maybe_unused _name = {}
447+
435448
#endif /* !LOCKDEP */
436449

437450
enum xhlock_context_t {
@@ -556,6 +569,7 @@ do { \
556569
#define rwsem_release(l, i) lock_release(l, i)
557570

558571
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
572+
#define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_)
559573
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
560574
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
561575
#define lock_map_release(l) lock_release(l, _THIS_IP_)

include/linux/lockdep_types.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ enum lockdep_wait_type {
3333
enum lockdep_lock_type {
3434
LD_LOCK_NORMAL = 0, /* normal, catch all */
3535
LD_LOCK_PERCPU, /* percpu */
36+
LD_LOCK_WAIT_OVERRIDE, /* annotation */
3637
LD_LOCK_MAX,
3738
};
3839

kernel/locking/lockdep.c

Lines changed: 21 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2263,6 +2263,9 @@ static inline bool usage_match(struct lock_list *entry, void *mask)
22632263

22642264
static inline bool usage_skip(struct lock_list *entry, void *mask)
22652265
{
2266+
if (entry->class->lock_type == LD_LOCK_NORMAL)
2267+
return false;
2268+
22662269
/*
22672270
* Skip local_lock() for irq inversion detection.
22682271
*
@@ -2289,14 +2292,16 @@ static inline bool usage_skip(struct lock_list *entry, void *mask)
22892292
* As a result, we will skip local_lock(), when we search for irq
22902293
* inversion bugs.
22912294
*/
2292-
if (entry->class->lock_type == LD_LOCK_PERCPU) {
2293-
if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
2294-
return false;
2295+
if (entry->class->lock_type == LD_LOCK_PERCPU &&
2296+
DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
2297+
return false;
22952298

2296-
return true;
2297-
}
2299+
/*
2300+
* Skip WAIT_OVERRIDE for irq inversion detection -- it's not actually
2301+
* a lock and only used to override the wait_type.
2302+
*/
22982303

2299-
return false;
2304+
return true;
23002305
}
23012306

23022307
/*
@@ -4768,7 +4773,8 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
47684773

47694774
for (; depth < curr->lockdep_depth; depth++) {
47704775
struct held_lock *prev = curr->held_locks + depth;
4771-
u8 prev_inner = hlock_class(prev)->wait_type_inner;
4776+
struct lock_class *class = hlock_class(prev);
4777+
u8 prev_inner = class->wait_type_inner;
47724778

47734779
if (prev_inner) {
47744780
/*
@@ -4778,6 +4784,14 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
47784784
* Also due to trylocks.
47794785
*/
47804786
curr_inner = min(curr_inner, prev_inner);
4787+
4788+
/*
4789+
* Allow override for annotations -- this is typically
4790+
* only valid/needed for code that only exists when
4791+
* CONFIG_PREEMPT_RT=n.
4792+
*/
4793+
if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE))
4794+
curr_inner = prev_inner;
47814795
}
47824796
}
47834797

lib/debugobjects.c

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
126126

127127
static void fill_pool(void)
128128
{
129-
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
129+
gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
130130
struct debug_obj *obj;
131131
unsigned long flags;
132132

@@ -591,10 +591,21 @@ static void debug_objects_fill_pool(void)
591591
{
592592
/*
593593
* On RT enabled kernels the pool refill must happen in preemptible
594-
* context:
594+
* context -- for !RT kernels we rely on the fact that spinlock_t and
595+
* raw_spinlock_t are basically the same type and this lock-type
596+
* inversion works just fine.
595597
*/
596-
if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
598+
if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
599+
/*
600+
* Annotate away the spinlock_t inside raw_spinlock_t warning
601+
* by temporarily raising the wait-type to WAIT_SLEEP, matching
602+
* the preemptible() condition above.
603+
*/
604+
static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
605+
lock_map_acquire_try(&fill_pool_map);
597606
fill_pool();
607+
lock_map_release(&fill_pool_map);
608+
}
598609
}
599610

600611
static void

0 commit comments

Comments
 (0)