Skip to content

Commit

Permalink
spinlock: add sched_lock to spin_lock_irqsave
Browse files Browse the repository at this point in the history
reason:
We aim to replace big locks with smaller ones. So we will use spin_lock_irqsave extensively to
replace enter_critical_section in the subsequent process. We imitate the implementation of Linux
by adding sched_lock to spin_lock_irqsave in order to address scenarios where sem_post occurs
within spin_lock_irqsave, which can lead to spinlock failures and deadlocks.

Signed-off-by: hujun5 <hujun5@xiaomi.com>
  • Loading branch information
hujun260 committed Jan 21, 2025
1 parent 72d8eaf commit 6c7b056
Showing 1 changed file with 71 additions and 18 deletions.
89 changes: 71 additions & 18 deletions include/nuttx/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,8 @@ static inline_function void raw_spin_lock(FAR volatile spinlock_t *lock)
#ifdef CONFIG_SPINLOCK
static inline_function void spin_lock(FAR volatile spinlock_t *lock)
{
sched_lock();

/* Notify that we are waiting for a spinlock */

sched_note_spinlock_lock(lock);
Expand Down Expand Up @@ -313,6 +315,8 @@ static inline_function bool spin_trylock(FAR volatile spinlock_t *lock)
{
bool locked;

sched_lock();

/* Notify that we are waiting for a spinlock */

sched_note_spinlock_lock(lock);
Expand All @@ -331,6 +335,7 @@ static inline_function bool spin_trylock(FAR volatile spinlock_t *lock)
/* Notify that we abort for a spinlock */

sched_note_spinlock_abort(lock);
sched_unlock();
}

return locked;
Expand Down Expand Up @@ -400,9 +405,11 @@ static inline_function void spin_unlock(FAR volatile spinlock_t *lock)
/* Notify that we are unlocking the spinlock */

sched_note_spinlock_unlock(lock);

sched_unlock();
}
# else
# define spin_unlock(l) do { *(l) = SP_UNLOCKED; } while (0)
# define spin_unlock(l) do { *(l) = SP_UNLOCKED; sched_unlock();} while (0)
# endif
#endif /* CONFIG_SPINLOCK */

Expand Down Expand Up @@ -443,28 +450,35 @@ irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock)
irqstate_t flags;
flags = up_irq_save();

sched_lock_wo_note();
raw_spin_lock(lock);

return flags;
}
#else
# define spin_lock_irqsave_wo_note(l) ((void)(l), up_irq_save())
static inline_function
irqstate_t spin_lock_irqsave_wo_note(FAR volatile spinlock_t *lock)
{
irqstate_t flags = up_irq_save();
sched_lock_wo_note();
return flags;
}
#endif

/****************************************************************************
* Name: spin_lock_irqsave
*
* Description:
* If SMP is enabled:
* Disable local interrupts and take the lock spinlock and return
* the interrupt state.
* Disable local interrupts, sched_lock and take the lock spinlock and
* return the interrupt state.
*
* NOTE: This API is very simple to protect data (e.g. H/W register
* or internal data structure) in SMP mode. But do not use this API
* with kernel APIs which suspend a caller thread. (e.g. nxsem_wait)
*
* If SMP is not enabled:
* This function is equivalent to up_irq_save().
* This function is equivalent to up_irq_save() + sched_lock().
*
* Input Parameters:
* lock - Caller specific spinlock. not NULL.
Expand All @@ -485,9 +499,10 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock)

sched_note_spinlock_lock(lock);

/* Lock without trace note */
flags = up_irq_save();

flags = spin_lock_irqsave_wo_note(lock);
sched_lock();
raw_spin_lock(lock);

/* Notify that we have the spinlock */

Expand All @@ -496,7 +511,13 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock)
return flags;
}
#else
# define spin_lock_irqsave(l) ((void)(l), up_irq_save())
static inline_function
irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock)
{
irqstate_t flags = up_irq_save();
sched_lock();
return flags;
}
#endif

/****************************************************************************
Expand Down Expand Up @@ -575,6 +596,7 @@ irqstate_t raw_spin_lock_irqsave(FAR volatile spinlock_t *lock)
({ \
(void)(l); \
f = up_irq_save(); \
sched_lock(); \
true; \
})
#endif /* CONFIG_SPINLOCK */
Expand All @@ -595,21 +617,22 @@ void spin_unlock_irqrestore_wo_note(FAR volatile spinlock_t *lock,
raw_spin_unlock(lock);

up_irq_restore(flags);
sched_unlock_wo_note();
}
#else
# define spin_unlock_irqrestore_wo_note(l, f) ((void)(l), up_irq_restore(f))
# define spin_unlock_irqrestore_wo_note(l, f) ((void)(l), up_irq_restore(f), sched_unlock_wo_note())
#endif

/****************************************************************************
* Name: spin_unlock_irqrestore
*
* Description:
* If SMP is enabled:
* Release the lock and restore the interrupt state as it was prior
* to the previous call to spin_lock_irqsave(lock).
* Release the lock and restore the interrupt state, schec_unlock
* as it was prior to the previous call to spin_lock_irqsave(lock).
*
* If SMP is not enabled:
* This function is equivalent to up_irq_restore().
* This function is equivalent to up_irq_restore() + schec_unlock().
*
* Input Parameters:
* lock - Caller specific spinlock. not NULL
Expand All @@ -629,14 +652,17 @@ void spin_unlock_irqrestore(FAR volatile spinlock_t *lock,
{
/* Unlock without trace note */

spin_unlock_irqrestore_wo_note(lock, flags);
raw_spin_unlock(lock);

up_irq_restore(flags);
sched_unlock();

/* Notify that we are unlocking the spinlock */

sched_note_spinlock_unlock(lock);
}
#else
# define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f))
# define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock())
#endif

/****************************************************************************
Expand Down Expand Up @@ -720,6 +746,8 @@ void raw_spin_unlock_irqrestore(FAR volatile spinlock_t *lock,

static inline_function void read_lock(FAR volatile rwlock_t *lock)
{
sched_lock();

while (true)
{
int old = atomic_read(lock);
Expand Down Expand Up @@ -764,12 +792,15 @@ static inline_function void read_lock(FAR volatile rwlock_t *lock)

static inline_function bool read_trylock(FAR volatile rwlock_t *lock)
{
sched_lock();
while (true)
{
int old = atomic_read(lock);
if (old <= RW_SP_WRITE_LOCKED)
{
DEBUGASSERT(old == RW_SP_WRITE_LOCKED);
sched_unlock();

return false;
}
else if (atomic_cmpxchg(lock, &old, old + 1))
Expand Down Expand Up @@ -807,6 +838,8 @@ static inline_function void read_unlock(FAR volatile rwlock_t *lock)
atomic_fetch_sub(lock, 1);
UP_DSB();
UP_SEV();

sched_unlock();
}

/****************************************************************************
Expand Down Expand Up @@ -838,6 +871,7 @@ static inline_function void write_lock(FAR volatile rwlock_t *lock)
{
int zero = RW_SP_UNLOCKED;

sched_lock();
while (!atomic_cmpxchg(lock, &zero, RW_SP_WRITE_LOCKED))
{
UP_DSB();
Expand Down Expand Up @@ -876,9 +910,11 @@ static inline_function bool write_trylock(FAR volatile rwlock_t *lock)
{
int zero = RW_SP_UNLOCKED;

sched_lock();
if (atomic_cmpxchg(lock, &zero, RW_SP_WRITE_LOCKED))
{
UP_DMB();
sched_unlock();
return true;
}

Expand Down Expand Up @@ -913,6 +949,7 @@ static inline_function void write_unlock(FAR volatile rwlock_t *lock)
atomic_set(lock, RW_SP_UNLOCKED);
UP_DSB();
UP_SEV();
sched_unlock();
}

/****************************************************************************
Expand Down Expand Up @@ -943,7 +980,15 @@ static inline_function void write_unlock(FAR volatile rwlock_t *lock)
#ifdef CONFIG_SPINLOCK
irqstate_t read_lock_irqsave(FAR rwlock_t *lock);
#else
# define read_lock_irqsave(l) ((void)(l), up_irq_save())
irqstate_t inline_function read_lock_irqsave(FAR rwlock_t *lock)
{
irqstate_t ret;

ret = up_irq_save();
sched_lock();

return ret;
}
#endif

/****************************************************************************
Expand Down Expand Up @@ -972,7 +1017,7 @@ irqstate_t read_lock_irqsave(FAR rwlock_t *lock);
#ifdef CONFIG_SPINLOCK
void read_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags);
#else
# define read_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f))
# define read_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock())
#endif

/****************************************************************************
Expand Down Expand Up @@ -1003,7 +1048,15 @@ void read_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags);
#ifdef CONFIG_SPINLOCK
irqstate_t write_lock_irqsave(FAR rwlock_t *lock);
#else
# define write_lock_irqsave(l) ((void)(l), up_irq_save())
static inline_function write_lock_irqsave(FAR rwlock_t *lock)
{
irqstate_t ret;

ret = up_irq_save();
sched_lock();

return ret;
}
#endif

/****************************************************************************
Expand Down Expand Up @@ -1032,7 +1085,7 @@ irqstate_t write_lock_irqsave(FAR rwlock_t *lock);
#ifdef CONFIG_SPINLOCK
void write_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags);
#else
# define write_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f))
# define write_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock())
#endif

#endif /* CONFIG_RW_SPINLOCK */
Expand Down

0 comments on commit 6c7b056

Please sign in to comment.