locking/spinlock/rt: Prepare for RT local_lock
Add the static and runtime initializer mechanics to support the RT variant
of local_lock, which requires the lock type in the lockdep map to be set
to LD_LOCK_PERCPU.
Signed-off-by: Thomas Gleixner <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index 4fc7219..835aeda 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -8,20 +8,28 @@
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key);
+ struct lock_class_key *key, bool percpu);
#else
static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key)
+ struct lock_class_key *key, bool percpu)
{
}
#endif
-#define spin_lock_init(slock) \
-do { \
- static struct lock_class_key __key; \
- \
- rt_mutex_base_init(&(slock)->lock); \
- __rt_spin_lock_init(slock, #slock, &__key); \
+#define spin_lock_init(slock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ rt_mutex_base_init(&(slock)->lock); \
+ __rt_spin_lock_init(slock, #slock, &__key, false); \
+} while (0)
+
+#define local_spin_lock_init(slock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ rt_mutex_base_init(&(slock)->lock); \
+ __rt_spin_lock_init(slock, #slock, &__key, true); \
} while (0)
extern void rt_spin_lock(spinlock_t *lock);