Skip to content

Commit

Permalink
Add comments and add logic for a race condition in _lf_clock_now
Browse files Browse the repository at this point in the history
  • Loading branch information
erlingrj committed Oct 24, 2023
1 parent dcaed97 commit 2933a10
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 22 deletions.
49 changes: 39 additions & 10 deletions core/platform/lf_zephyr_clock_counter.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,21 +54,29 @@ static struct counter_alarm_cfg alarm_cfg;
const struct device *const counter_dev = DEVICE_DT_GET(LF_TIMER);
static volatile bool alarm_fired;

// This callback is invoked when the Counter device tracking the progress of time
// overflows
static void overflow_callback(const struct device *dev, void *user_data) {
last_epoch_nsec += epoch_duration_nsec;
/**
* This callback is invoked when the underlying Timer peripheral overflows.
* Handled by incrementing the epoch variable.
*/
static void overflow_callback(const struct device *dev, void *user_data) {
last_epoch_nsec += epoch_duration_nsec;
}

// This callback is invoked when the alarm expires.
/**
* This callback is invoked when the alarm configured for sleeping expires.
* The sleeping thread is released by giving it the semaphore.
*/
static void alarm_callback(const struct device *counter_dev,
uint8_t chan_id, uint32_t ticks,
void *user_data)
{
void *user_data) {
alarm_fired=true;
k_sem_give(&semaphore);
}

/**
* Initialize the Counter device. Check its frequency and compute epoch
* durations.
*/
void _lf_initialize_clock() {
struct counter_top_cfg counter_top_cfg;
uint32_t counter_max_ticks=0;
Expand Down Expand Up @@ -114,17 +122,35 @@ void _lf_initialize_clock() {
counter_start(counter_dev);
}

/**
* The Counter device tracks current physical time. Overflows are handled in an
* ISR.
*/
int _lf_clock_now(instant_t* t) {
static uint64_t last_nsec = 0;
uint32_t now_cycles;
int res;
uint64_t now_nsec;

res = counter_get_value(counter_dev, &now_cycles);
now_nsec = counter_ticks_to_us(counter_dev, now_cycles)*1000ULL;
*t = now_nsec + last_epoch_nsec;
now_nsec = counter_ticks_to_us(counter_dev, now_cycles)*1000ULL + last_epoch_nsec;

// Make sure that the clock is monotonic. We might have had a wrap but the
// epoch has not been updated because interrupts are disabled.
if (now_nsec < last_nsec) {
now_nsec = last_nsec + 1;
}

*t = now_nsec;
last_nsec = now_nsec;
return 0;
}

/**
* Handle interruptable sleep by configuring a future alarm callback and waiting
* on a semaphore. Make sure we can handle sleeps that exceed an entire epoch
* of the Counter.
*/
int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) {
// Reset flags
alarm_fired = false;
Expand Down Expand Up @@ -181,11 +207,14 @@ int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) {
}
}

/**
* We notify of async events by setting the flag and giving the semaphore.
*/
int _lf_unthreaded_notify_of_event() {
async_event = true;
k_sem_give(&semaphore);
return 0;
}

#endif
#endif
#endif
26 changes: 14 additions & 12 deletions core/platform/lf_zephyr_clock_kernel.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "util.h"

static int64_t epoch_duration_nsec;
static int64_t epoch_duration_usec;
static volatile int64_t last_epoch_nsec = 0;
static uint32_t timer_freq;
static volatile bool async_event = false;
Expand All @@ -51,37 +50,36 @@ void _lf_initialize_clock() {
timer_freq = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
LF_PRINT_LOG("--- Using LF Zephyr Kernel Clock with a frequency of %u Hz\n", timer_freq);
last_epoch_nsec = 0;
// Compute the duration of an epoch. Compute both
// nsec and usec now at boot to avoid these computations later
epoch_duration_nsec = ((1LL << 32) * SECONDS(1))/CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
epoch_duration_usec = epoch_duration_nsec/1000;
}

// Clock and sleep implementation for LO_RES clock. Handle wraps
// by checking if two consecutive reads are monotonic
/**
* Detect wraps by storing the previous clock readout. When a clock readout is
* less than the previous we have had a wrap. This only works of `_lf_clock_now`
* is invoked at least once per epoch.
*/
int _lf_clock_now(instant_t* t) {
static uint32_t last_read_cycles=0;
uint32_t now_cycles = k_cycle_get_32();

if (now_cycles < last_read_cycles) {
last_epoch_nsec += epoch_duration_nsec;
}

*t = (SECOND(1)/CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC)*now_cycles + last_epoch_nsec;

last_read_cycles = now_cycles;
return 0;
}

/**
* Interruptable sleep is implemented using busy-waiting.
*/
int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) {
async_event=false;
lf_critical_section_exit(env);

lf_critical_section_exit(env);
instant_t now;
do {
_lf_clock_now(&now);
} while ( (now<wakeup) && !async_event);

lf_critical_section_enter(env);

if (async_event) {
Expand All @@ -92,10 +90,14 @@ int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) {
}
}

/**
* Asynchronous events are notified by setting a flag which breaks the sleeping
* thread out of the busy-wait.
*/
int _lf_unthreaded_notify_of_event() {
async_event = true;
return 0;
}

#endif
#endif
#endif

0 comments on commit 2933a10

Please sign in to comment.