From f3d786deeba09a9f92415ee94c1274f117980156 Mon Sep 17 00:00:00 2001 From: hujun5 Date: Sun, 19 Jan 2025 15:53:51 +0800 Subject: [PATCH] spinlock: add sched_lock to spin_lock_irqsave reason: We aim to replace big locks with smaller ones. So we will use spin_lock_irqsave extensively to replace enter_critical_section in the subsequent process. We imitate the implementation of Linux by adding sched_lock to spin_lock_irqsave in order to address scenarios where sem_post occurs within spin_lock_irqsave, which can lead to spinlock failures and deadlocks. Signed-off-by: hujun5 --- include/nuttx/spinlock.h | 68 ++++++++++++++++++++++++++++++++++------ 1 file changed, 59 insertions(+), 9 deletions(-) diff --git a/include/nuttx/spinlock.h b/include/nuttx/spinlock.h index 037f39e21fdb7..8609b740d5223 100644 --- a/include/nuttx/spinlock.h +++ b/include/nuttx/spinlock.h @@ -233,6 +233,8 @@ static inline_function void raw_spin_lock(FAR volatile spinlock_t *lock) #ifdef CONFIG_SPINLOCK static inline_function void spin_lock(FAR volatile spinlock_t *lock) { + sched_lock(); + /* Notify that we are waiting for a spinlock */ sched_note_spinlock_lock(lock); @@ -313,6 +315,8 @@ static inline_function bool spin_trylock(FAR volatile spinlock_t *lock) { bool locked; + sched_lock(); + /* Notify that we are waiting for a spinlock */ sched_note_spinlock_lock(lock); @@ -331,6 +335,7 @@ static inline_function bool spin_trylock(FAR volatile spinlock_t *lock) /* Notify that we abort for a spinlock */ sched_note_spinlock_abort(lock); + sched_unlock(); } return locked; @@ -400,9 +405,11 @@ static inline_function void spin_unlock(FAR volatile spinlock_t *lock) /* Notify that we are unlocking the spinlock */ sched_note_spinlock_unlock(lock); + + sched_unlock(); } # else -# define spin_unlock(l) do { *(l) = SP_UNLOCKED; } while (0) +# define spin_unlock(l) do { *(l) = SP_UNLOCKED; sched_unlock();} while (0) # endif #endif /* CONFIG_SPINLOCK */ @@ -443,12 +450,19 @@ irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock) irqstate_t flags; flags = up_irq_save(); + sched_lock_wo_note(); raw_spin_lock(lock); return flags; } #else -# define spin_lock_irqsave_wo_note(l) ((void)(l), up_irq_save()) +static inline_function +irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock) +{ + irqstate_t flags = up_irq_save(); + sched_lock_wo_note(); + return flags; +} #endif /**************************************************************************** @@ -496,7 +510,13 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) return flags; } #else -# define spin_lock_irqsave(l) ((void)(l), up_irq_save()) +static inline_function +irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) +{ + irqstate_t flags = up_irq_save(); + sched_lock(); + return flags; +} #endif /**************************************************************************** @@ -531,6 +551,7 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) ({ \ (void)(l); \ f = up_irq_save(); \ + sched_lock(); \ true; \ }) #endif /* CONFIG_SPINLOCK */ @@ -551,9 +572,10 @@ void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock, raw_spin_unlock(lock); up_irq_restore(flags); + sched_unlock_wo_note(); } #else -# define spin_unlock_irqrestore_wo_note(l, f) ((void)(l), up_irq_restore(f)) +# define spin_unlock_irqrestore_wo_note(l, f) ((void)(l), up_irq_restore(f), sched_unlock_wo_note()) #endif /**************************************************************************** @@ -592,7 +614,7 @@ void spin_unlock_irqrestore(FAR volatile spinlock_t *lock, sched_note_spinlock_unlock(lock); } #else -# define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f)) +# define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock()) #endif #if defined(CONFIG_RW_SPINLOCK) @@ -641,6 +663,8 @@ void spin_unlock_irqrestore(FAR volatile spinlock_t *lock, static inline_function void read_lock(FAR volatile rwlock_t *lock) { + sched_lock(); + while (true) { int old = atomic_read(lock); @@ -685,12 +709,15 @@ static inline_function void read_lock(FAR volatile rwlock_t *lock) static inline_function bool read_trylock(FAR volatile rwlock_t *lock) { + sched_lock(); while (true) { int old = atomic_read(lock); if (old <= RW_SP_WRITE_LOCKED) { DEBUGASSERT(old == RW_SP_WRITE_LOCKED); + sched_unlock(); + return false; } else if (atomic_cmpxchg(lock, &old, old + 1)) @@ -728,6 +755,8 @@ static inline_function void read_unlock(FAR volatile rwlock_t *lock) atomic_fetch_sub(lock, 1); SP_DSB(); SP_SEV(); + + sched_unlock(); } /**************************************************************************** @@ -759,6 +788,7 @@ static inline_function void write_lock(FAR volatile rwlock_t *lock) { int zero = RW_SP_UNLOCKED; + sched_lock(); while (!atomic_cmpxchg(lock, &zero, RW_SP_WRITE_LOCKED)) { SP_DSB(); @@ -797,9 +827,12 @@ static inline_function bool write_trylock(FAR volatile rwlock_t *lock) { int zero = RW_SP_UNLOCKED; + sched_lock(); if (atomic_cmpxchg(lock, &zero, RW_SP_WRITE_LOCKED)) { SP_DMB(); + sched_unlock(); + return true; } @@ -834,6 +867,7 @@ static inline_function void write_unlock(FAR volatile rwlock_t *lock) atomic_set(lock, RW_SP_UNLOCKED); SP_DSB(); SP_SEV(); + sched_unlock(); } /**************************************************************************** @@ -864,7 +898,15 @@ static inline_function void write_unlock(FAR volatile rwlock_t *lock) #ifdef CONFIG_SPINLOCK irqstate_t read_lock_irqsave(FAR rwlock_t *lock); #else -# define read_lock_irqsave(l) ((void)(l), up_irq_save()) +irqstate_t inline_function read_lock_irqsave(FAR rwlock_t *lock) +{ + irqstate_t ret; + + ret = up_irq_save(); + sched_lock(); + + return ret; +} #endif /**************************************************************************** @@ -893,7 +935,7 @@ irqstate_t read_lock_irqsave(FAR rwlock_t *lock); #ifdef CONFIG_SPINLOCK void read_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags); #else -# define read_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f)) +# define read_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock()) #endif /**************************************************************************** @@ -924,7 +966,15 @@ void read_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags); #ifdef CONFIG_SPINLOCK irqstate_t write_lock_irqsave(FAR rwlock_t *lock); #else -# define write_lock_irqsave(l) ((void)(l), up_irq_save()) +static inline_function write_lock_irqsave(FAR rwlock_t *lock) +{ + irqstate_t ret; + + ret = up_irq_save(); + sched_lock(); + + return ret; +} #endif /**************************************************************************** @@ -953,7 +1003,7 @@ irqstate_t write_lock_irqsave(FAR rwlock_t *lock); #ifdef CONFIG_SPINLOCK void write_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags); #else -# define write_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f)) +# define write_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock()) #endif #endif /* CONFIG_RW_SPINLOCK */