diff --git a/include/nuttx/spinlock.h b/include/nuttx/spinlock.h index ca55d19bf6dcb..00190482c55c6 100644 --- a/include/nuttx/spinlock.h +++ b/include/nuttx/spinlock.h @@ -233,6 +233,8 @@ static inline_function void raw_spin_lock(FAR volatile spinlock_t *lock) #ifdef CONFIG_SPINLOCK static inline_function void spin_lock(FAR volatile spinlock_t *lock) { + sched_lock(); + /* Notify that we are waiting for a spinlock */ sched_note_spinlock_lock(lock); @@ -245,6 +247,8 @@ static inline_function void spin_lock(FAR volatile spinlock_t *lock) sched_note_spinlock_locked(lock); } +#else +# define spin_lock(l) sched_lock() #endif /* CONFIG_SPINLOCK */ /**************************************************************************** @@ -313,6 +317,8 @@ static inline_function bool spin_trylock(FAR volatile spinlock_t *lock) { bool locked; + sched_lock(); + /* Notify that we are waiting for a spinlock */ sched_note_spinlock_lock(lock); @@ -331,10 +337,13 @@ static inline_function bool spin_trylock(FAR volatile spinlock_t *lock) /* Notify that we abort for a spinlock */ sched_note_spinlock_abort(lock); + sched_unlock(); } return locked; } +#else +# define spin_trylock(l) (sched_lock(), true) #endif /* CONFIG_SPINLOCK */ /**************************************************************************** @@ -400,10 +409,14 @@ static inline_function void spin_unlock(FAR volatile spinlock_t *lock) /* Notify that we are unlocking the spinlock */ sched_note_spinlock_unlock(lock); + + sched_unlock(); } # else -# define spin_unlock(l) do { *(l) = SP_UNLOCKED; } while (0) +# define spin_unlock(l) do { *(l) = SP_UNLOCKED; sched_unlock();} while (0) # endif +#else +# define spin_unlock(l) sched_unlock() #endif /* CONFIG_SPINLOCK */ /**************************************************************************** @@ -456,15 +469,15 @@ irqstate_t raw_spin_lock_irqsave(FAR volatile spinlock_t *lock) * * Description: * If SMP is enabled: - * Disable local interrupts and take the lock spinlock and return - * the interrupt state. + * Disable local interrupts, sched_lock and take the lock spinlock and + * return the interrupt state. * * NOTE: This API is very simple to protect data (e.g. H/W register * or internal data structure) in SMP mode. But do not use this API * with kernel APIs which suspend a caller thread. (e.g. nxsem_wait) * * If SMP is not enabled: - * This function is equivalent to up_irq_save(). + * This function is equivalent to up_irq_save() + sched_lock(). * * Input Parameters: * lock - Caller specific spinlock. not NULL. @@ -485,9 +498,8 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) sched_note_spinlock_lock(lock); - /* Lock without trace note */ - flags = raw_spin_lock_irqsave(lock); + sched_lock(); /* Notify that we have the spinlock */ @@ -496,7 +508,13 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) return flags; } #else -# define spin_lock_irqsave(l) ((void)(l), up_irq_save()) +static inline_function +irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) +{ + irqstate_t flags = up_irq_save(); + sched_lock(); + return flags; +} #endif /**************************************************************************** @@ -570,6 +588,7 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) ({ \ (void)(l); \ f = up_irq_save(); \ + sched_lock(); \ true; \ }) #endif /* CONFIG_SPINLOCK */ @@ -600,11 +619,11 @@ void raw_spin_unlock_irqrestore(FAR volatile spinlock_t *lock, * * Description: * If SMP is enabled: - * Release the lock and restore the interrupt state as it was prior - * to the previous call to spin_lock_irqsave(lock). + * Release the lock and restore the interrupt state, sched_unlock + * as it was prior to the previous call to spin_lock_irqsave(lock). * * If SMP is not enabled: - * This function is equivalent to up_irq_restore(). + * This function is equivalent to up_irq_restore() + sched_unlock(). * * Input Parameters: * lock - Caller specific spinlock. not NULL @@ -625,12 +644,14 @@ void spin_unlock_irqrestore(FAR volatile spinlock_t *lock, irqstate_t flags) raw_spin_unlock_irqrestore(lock, flags); + sched_unlock(); + /* Notify that we are unlocking the spinlock */ sched_note_spinlock_unlock(lock); } #else -# define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f)) +# define spin_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock()) #endif #if defined(CONFIG_RW_SPINLOCK) @@ -679,6 +700,8 @@ void spin_unlock_irqrestore(FAR volatile spinlock_t *lock, irqstate_t flags) static inline_function void read_lock(FAR volatile rwlock_t *lock) { + sched_lock(); + while (true) { int old = atomic_read(lock); @@ -723,12 +746,15 @@ static inline_function void read_lock(FAR volatile rwlock_t *lock) static inline_function bool read_trylock(FAR volatile rwlock_t *lock) { + sched_lock(); while (true) { int old = atomic_read(lock); if (old <= RW_SP_WRITE_LOCKED) { DEBUGASSERT(old == RW_SP_WRITE_LOCKED); + sched_unlock(); + return false; } else if (atomic_cmpxchg(lock, &old, old + 1)) @@ -766,6 +792,8 @@ static inline_function void read_unlock(FAR volatile rwlock_t *lock) atomic_fetch_sub(lock, 1); UP_DSB(); UP_SEV(); + + sched_unlock(); } /**************************************************************************** @@ -797,6 +825,7 @@ static inline_function void write_lock(FAR volatile rwlock_t *lock) { int zero = RW_SP_UNLOCKED; + sched_lock(); while (!atomic_cmpxchg(lock, &zero, RW_SP_WRITE_LOCKED)) { UP_DSB(); @@ -835,9 +864,11 @@ static inline_function bool write_trylock(FAR volatile rwlock_t *lock) { int zero = RW_SP_UNLOCKED; + sched_lock(); if (atomic_cmpxchg(lock, &zero, RW_SP_WRITE_LOCKED)) { UP_DMB(); + sched_unlock(); return true; } @@ -872,6 +903,7 @@ static inline_function void write_unlock(FAR volatile rwlock_t *lock) atomic_set(lock, RW_SP_UNLOCKED); UP_DSB(); UP_SEV(); + sched_unlock(); } /**************************************************************************** @@ -902,7 +934,15 @@ static inline_function void write_unlock(FAR volatile rwlock_t *lock) #ifdef CONFIG_SPINLOCK irqstate_t read_lock_irqsave(FAR rwlock_t *lock); #else -# define read_lock_irqsave(l) ((void)(l), up_irq_save()) +irqstate_t inline_function read_lock_irqsave(FAR rwlock_t *lock) +{ + irqstate_t ret; + + ret = up_irq_save(); + sched_lock(); + + return ret; +} #endif /**************************************************************************** @@ -931,7 +971,7 @@ irqstate_t read_lock_irqsave(FAR rwlock_t *lock); #ifdef CONFIG_SPINLOCK void read_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags); #else -# define read_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f)) +# define read_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock()) #endif /**************************************************************************** @@ -962,7 +1002,15 @@ void read_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags); #ifdef CONFIG_SPINLOCK irqstate_t write_lock_irqsave(FAR rwlock_t *lock); #else -# define write_lock_irqsave(l) ((void)(l), up_irq_save()) +static inline_function write_lock_irqsave(FAR rwlock_t *lock) +{ + irqstate_t ret; + + ret = up_irq_save(); + sched_lock(); + + return ret; +} #endif /**************************************************************************** @@ -991,7 +1039,7 @@ irqstate_t write_lock_irqsave(FAR rwlock_t *lock); #ifdef CONFIG_SPINLOCK void write_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags); #else -# define write_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f)) +# define write_unlock_irqrestore(l, f) ((void)(l), up_irq_restore(f), sched_unlock()) #endif #endif /* CONFIG_RW_SPINLOCK */