Skip to content

Commit

Permalink
spinlock: add sched_lock to spin_lock_irqsave
Browse files Browse the repository at this point in the history
reason:
We aim to replace big locks with smaller ones. So we will use spin_lock_irqsave extensively to
replace enter_critical_section in the subsequent process. We imitate the implementation of Linux
by adding sched_lock to spin_lock_irqsave in order to address scenarios where sem_post occurs
within spin_lock_irqsave, which can lead to spinlock failures and deadlocks.

Signed-off-by: hujun5 <hujun5@xiaomi.com>
  • Loading branch information
hujun260 committed Jan 20, 2025
1 parent 6ea4790 commit f3d786d
Showing 1 changed file with 59 additions and 9 deletions.
68 changes: 59 additions & 9 deletions include/nuttx/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,8 @@ static inline_function void raw_spin_lock(FAR volatile spinlock_t *lock)
#ifdef CONFIG_SPINLOCK
static inline_function void spin_lock(FAR volatile spinlock_t *lock)
{
sched_lock();

/* Notify that we are waiting for a spinlock */

sched_note_spinlock_lock(lock);
Expand Down Expand Up @@ -313,6 +315,8 @@ static inline_function bool spin_trylock(FAR volatile spinlock_t *lock)
{
bool locked;

sched_lock();

/* Notify that we are waiting for a spinlock */

sched_note_spinlock_lock(lock);
Expand All @@ -331,6 +335,7 @@ static inline_function bool spin_trylock(FAR volatile spinlock_t *lock)
/* Notify that we abort for a spinlock */

sched_note_spinlock_abort(lock);
sched_unlock();
}

return locked;
Expand Down Expand Up @@ -400,9 +405,11 @@ static inline_function void spin_unlock(FAR volatile spinlock_t *lock)
/* Notify that we are unlocking the spinlock */

sched_note_spinlock_unlock(lock);

sched_unlock();
}
# else
# define spin_unlock(l) do { *(l) = SP_UNLOCKED; } while (0)
# define spin_unlock(l) do { *(l) = SP_UNLOCKED; sched_unlock();} while (0)
# endif
#endif /* CONFIG_SPINLOCK */

Expand Down Expand Up @@ -443,12 +450,19 @@ irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock)
irqstate_t flags;
flags = up_irq_save();

sched_lock_wo_note();
raw_spin_lock(lock);

return flags;
}
#else
# define spin_lock_irqsave_wo_note(l) ((void)(l), up_irq_save())
static inline_function
irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock)
{
irqstate_t flags = up_irq_save();
sched_lock_wo_note();
return flags;
}
#endif

/****************************************************************************
Expand Down Expand Up @@ -496,7 +510,13 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock)
return flags;
}
#else
# define spin_lock_irqsave(l) ((void)(l), up_irq_save())
static inline_function
irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock)
{
irqstate_t flags = up_irq_save();
sched_lock();
return flags;
}
#endif

/****************************************************************************
Expand Down Expand Up @@ -531,6 +551,7 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock)
({ \
(void)(l); \
f = up_irq_save(); \
sched_lock(); \
true; \
})
#endif /* CONFIG_SPINLOCK */
Expand All @@ -551,9 +572,10 @@ void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock,
raw_spin_unlock(lock);

up_irq_restore(flags);
sched_unlock_wo_note();
}
#else
# define spin_unlock_irqrestore_wo_note(l, f) ((void)(l), up_irq_restore(f))
# define spin_unlock_irqrestore_wo_note(l, f) ((void)(l), up_irq_restore(f), sched_unlock_wo_note())
#endif

/****************************************************************************
Expand Down Expand Up @@ -592,7 +614,7 @@ void spin_unlock_irqrestore(FAR volatile spinlock_t *lock,
sched_note_spinlock_unlock(lock);
}
#else
# define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f))
# define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock())
#endif

#if defined(CONFIG_RW_SPINLOCK)
Expand Down Expand Up @@ -641,6 +663,8 @@ void spin_unlock_irqrestore(FAR volatile spinlock_t *lock,

static inline_function void read_lock(FAR volatile rwlock_t *lock)
{
sched_lock();

while (true)
{
int old = atomic_read(lock);
Expand Down Expand Up @@ -685,12 +709,15 @@ static inline_function void read_lock(FAR volatile rwlock_t *lock)

static inline_function bool read_trylock(FAR volatile rwlock_t *lock)
{
sched_lock();
while (true)
{
int old = atomic_read(lock);
if (old <= RW_SP_WRITE_LOCKED)
{
DEBUGASSERT(old == RW_SP_WRITE_LOCKED);
sched_unlock();

return false;
}
else if (atomic_cmpxchg(lock, &old, old + 1))
Expand Down Expand Up @@ -728,6 +755,8 @@ static inline_function void read_unlock(FAR volatile rwlock_t *lock)
atomic_fetch_sub(lock, 1);
SP_DSB();
SP_SEV();

sched_unlock();
}

/****************************************************************************
Expand Down Expand Up @@ -759,6 +788,7 @@ static inline_function void write_lock(FAR volatile rwlock_t *lock)
{
int zero = RW_SP_UNLOCKED;

sched_lock();
while (!atomic_cmpxchg(lock, &zero, RW_SP_WRITE_LOCKED))
{
SP_DSB();
Expand Down Expand Up @@ -797,9 +827,12 @@ static inline_function bool write_trylock(FAR volatile rwlock_t *lock)
{
int zero = RW_SP_UNLOCKED;

sched_lock();
if (atomic_cmpxchg(lock, &zero, RW_SP_WRITE_LOCKED))
{
SP_DMB();
sched_unlock();

return true;
}

Expand Down Expand Up @@ -834,6 +867,7 @@ static inline_function void write_unlock(FAR volatile rwlock_t *lock)
atomic_set(lock, RW_SP_UNLOCKED);
SP_DSB();
SP_SEV();
sched_unlock();
}

/****************************************************************************
Expand Down Expand Up @@ -864,7 +898,15 @@ static inline_function void write_unlock(FAR volatile rwlock_t *lock)
#ifdef CONFIG_SPINLOCK
irqstate_t read_lock_irqsave(FAR rwlock_t *lock);
#else
# define read_lock_irqsave(l) ((void)(l), up_irq_save())
irqstate_t inline_function read_lock_irqsave(FAR rwlock_t *lock)
{
irqstate_t ret;

ret = up_irq_save();
sched_lock();

return ret;
}
#endif

/****************************************************************************
Expand Down Expand Up @@ -893,7 +935,7 @@ irqstate_t read_lock_irqsave(FAR rwlock_t *lock);
#ifdef CONFIG_SPINLOCK
void read_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags);
#else
# define read_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f))
# define read_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock())
#endif

/****************************************************************************
Expand Down Expand Up @@ -924,7 +966,15 @@ void read_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags);
#ifdef CONFIG_SPINLOCK
irqstate_t write_lock_irqsave(FAR rwlock_t *lock);
#else
# define write_lock_irqsave(l) ((void)(l), up_irq_save())
static inline_function write_lock_irqsave(FAR rwlock_t *lock)
{
irqstate_t ret;

ret = up_irq_save();
sched_lock();

return ret;
}
#endif

/****************************************************************************
Expand Down Expand Up @@ -953,7 +1003,7 @@ irqstate_t write_lock_irqsave(FAR rwlock_t *lock);
#ifdef CONFIG_SPINLOCK
void write_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags);
#else
# define write_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f))
# define write_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock())
#endif

#endif /* CONFIG_RW_SPINLOCK */
Expand Down

0 comments on commit f3d786d

Please sign in to comment.