Skip to content

Commit

Permalink
pm: policy: event: use uptime ticks
Browse files Browse the repository at this point in the history
Update events to use uptime ticks, which is a monotonic clock which
in the same res as kernel ticks. This makes comparisons simple and
removes the complexity of dealing with wrapping counter values.

The wrapping is particularly problematic for events since this makes
it quite complex to track if an event has occured in the past, or
will occur in the future. This info is needed to know if an event
has actually been handled or not.

Signed-off-by: Bjarki Arge Andreasen <[email protected]>
  • Loading branch information
bjarki-andreasen authored and kartben committed Dec 9, 2024
1 parent 6aa760a commit 59779eb
Show file tree
Hide file tree
Showing 3 changed files with 71 additions and 82 deletions.
38 changes: 21 additions & 17 deletions include/zephyr/pm/policy.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ struct pm_policy_latency_request {
struct pm_policy_event {
/** @cond INTERNAL_HIDDEN */
sys_snode_t node;
uint32_t value_cyc;
int64_t uptime_ticks;
/** @endcond */
};

Expand Down Expand Up @@ -137,38 +137,38 @@ void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id);
*/
bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id);


/**
* @brief Register an event.
*
* Events in the power-management policy context are defined as any source that
* will wake up the system at a known time in the future. By registering such
* event, the policy manager will be able to decide whether certain power states
* are worth entering or not.
* CPU is woken up before the time passed in cycle to prevent the event handling
* latency
*
* @note It is mandatory to unregister events once they have happened by using
* pm_policy_event_unregister(). Not doing so is an API contract violation,
* because the system would continue to consider them as valid events in the
* *far* future, that is, after the cycle counter rollover.
* CPU is woken up before the time passed in cycle to minimize event handling
* latency. Once woken up, the CPU will be kept awake until the event has been
* handled, which is signaled by pm_policy_event_unregister() or moving event
* into the future using pm_policy_event_update().
*
* @param evt Event.
* @param cycle When the event will occur, in absolute time (cycles).
* @param uptime_ticks When the event will occur, in uptime ticks.
*
* @see pm_policy_event_unregister
* @see pm_policy_event_unregister()
*/
void pm_policy_event_register(struct pm_policy_event *evt, uint32_t cycle);
void pm_policy_event_register(struct pm_policy_event *evt, int64_t uptime_ticks);

/**
* @brief Update an event.
*
* This shortcut allows for moving the time an event will occur without the
* need for an unregister + register cycle.
*
* @param evt Event.
* @param cycle When the event will occur, in absolute time (cycles).
* @param uptime_ticks When the event will occur, in uptime ticks.
*
* @see pm_policy_event_register
*/
void pm_policy_event_update(struct pm_policy_event *evt, uint32_t cycle);
void pm_policy_event_update(struct pm_policy_event *evt, int64_t uptime_ticks);

/**
* @brief Unregister an event.
Expand Down Expand Up @@ -208,10 +208,14 @@ void pm_policy_device_power_lock_put(const struct device *dev);
/**
* @brief Returns the ticks until the next event
*
* If an event is registred, it will return the number of ticks until the next event as
* a positive or zero value. Otherwise it returns -1
* If an event is registred, it will return the number of ticks until the next event, if the
* "next"/"oldest" registered event is in the past, it will return 0. Otherwise it returns -1.
*
* @retval >0 If next registered event is in the future
* @retval 0 If next registered event is now or in the past
* @retval -1 Otherwise
*/
int32_t pm_policy_next_event_ticks(void);
int64_t pm_policy_next_event_ticks(void);

#else
static inline void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id)
Expand Down Expand Up @@ -261,7 +265,7 @@ static inline void pm_policy_device_power_lock_put(const struct device *dev)
ARG_UNUSED(dev);
}

static inline int32_t pm_policy_next_event_ticks(void)
static inline int64_t pm_policy_next_event_ticks(void)
{
return -1;
}
Expand Down
89 changes: 37 additions & 52 deletions subsys/pm/policy/policy_events.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,81 +20,66 @@ static sys_slist_t events_list;
/** Pointer to Next Event. */
struct pm_policy_event *next_event;

/** @brief Update next event. */
static void update_next_event(uint32_t cyc)
static void update_next_event(void)
{
int64_t new_next_event_cyc = -1;
struct pm_policy_event *evt;

/* unset the next event pointer */
next_event = NULL;

SYS_SLIST_FOR_EACH_CONTAINER(&events_list, evt, node) {
uint64_t cyc_evt = evt->value_cyc;

/*
* cyc value is a 32-bit rolling counter:
*
* |---------------->-----------------------|
* 0 cyc UINT32_MAX
*
* Values from [0, cyc) are events happening later than
* [cyc, UINT32_MAX], so pad [0, cyc) with UINT32_MAX + 1 to do
* the comparison.
*/
if (cyc_evt < cyc) {
cyc_evt += (uint64_t)UINT32_MAX + 1U;
if (next_event == NULL) {
next_event = evt;
continue;
}

if ((new_next_event_cyc < 0) || (cyc_evt < new_next_event_cyc)) {
new_next_event_cyc = cyc_evt;
next_event = evt;
if (next_event->uptime_ticks <= evt->uptime_ticks) {
continue;
}

next_event = evt;
}
}

int32_t pm_policy_next_event_ticks(void)
int64_t pm_policy_next_event_ticks(void)
{
int32_t cyc_evt = -1;

if ((next_event) && (next_event->value_cyc > 0)) {
cyc_evt = next_event->value_cyc - k_cycle_get_32();
cyc_evt = MAX(0, cyc_evt);
BUILD_ASSERT(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC >= CONFIG_SYS_CLOCK_TICKS_PER_SEC,
"HW Cycles per sec should be greater that ticks per sec");
return k_cyc_to_ticks_floor32(cyc_evt);
}
int64_t ticks = -1;

return -1;
}
K_SPINLOCK(&events_lock) {
if (next_event == NULL) {
K_SPINLOCK_BREAK;
}

void pm_policy_event_register(struct pm_policy_event *evt, uint32_t cycle)
{
k_spinlock_key_t key = k_spin_lock(&events_lock);
ticks = next_event->uptime_ticks - k_uptime_ticks();

evt->value_cyc = cycle;
sys_slist_append(&events_list, &evt->node);
update_next_event(k_cycle_get_32());
if (ticks < 0) {
ticks = 0;
}
}

k_spin_unlock(&events_lock, key);
return ticks;
}

void pm_policy_event_update(struct pm_policy_event *evt, uint32_t cycle)
void pm_policy_event_register(struct pm_policy_event *evt, int64_t uptime_ticks)
{
k_spinlock_key_t key = k_spin_lock(&events_lock);

evt->value_cyc = cycle;
update_next_event(k_cycle_get_32());
K_SPINLOCK(&events_lock) {
evt->uptime_ticks = uptime_ticks;
sys_slist_append(&events_list, &evt->node);
update_next_event();
}
}

k_spin_unlock(&events_lock, key);
void pm_policy_event_update(struct pm_policy_event *evt, int64_t uptime_ticks)
{
K_SPINLOCK(&events_lock) {
evt->uptime_ticks = uptime_ticks;
update_next_event();
}
}

void pm_policy_event_unregister(struct pm_policy_event *evt)
{
k_spinlock_key_t key = k_spin_lock(&events_lock);

(void)sys_slist_find_and_remove(&events_list, &evt->node);
update_next_event(k_cycle_get_32());

k_spin_unlock(&events_lock, key);
K_SPINLOCK(&events_lock) {
(void)sys_slist_find_and_remove(&events_list, &evt->node);
update_next_event();
}
}
26 changes: 13 additions & 13 deletions tests/subsys/pm/policy_api/src/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -308,29 +308,29 @@ ZTEST(policy_api, test_pm_policy_events)
{
struct pm_policy_event evt1;
struct pm_policy_event evt2;
uint32_t now_cycle;
uint32_t evt1_1_cycle;
uint32_t evt1_2_cycle;
uint32_t evt2_cycle;
int64_t now_uptime_ticks;
int64_t evt1_1_uptime_ticks;
int64_t evt1_2_uptime_ticks;
int64_t evt2_uptime_ticks;

now_cycle = k_cycle_get_32();
evt1_1_cycle = now_cycle + k_ticks_to_cyc_floor32(100);
evt1_2_cycle = now_cycle + k_ticks_to_cyc_floor32(200);
evt2_cycle = now_cycle + k_ticks_to_cyc_floor32(2000);
now_uptime_ticks = k_uptime_ticks();
evt1_1_uptime_ticks = now_uptime_ticks + 100;
evt1_2_uptime_ticks = now_uptime_ticks + 200;
evt2_uptime_ticks = now_uptime_ticks + 2000;

zassert_equal(pm_policy_next_event_ticks(), -1);
pm_policy_event_register(&evt1, evt1_1_cycle);
pm_policy_event_register(&evt2, evt2_cycle);
pm_policy_event_register(&evt1, evt1_1_uptime_ticks);
pm_policy_event_register(&evt2, evt2_uptime_ticks);
zassert_within(pm_policy_next_event_ticks(), 100, 50);
pm_policy_event_unregister(&evt1);
zassert_within(pm_policy_next_event_ticks(), 2000, 50);
pm_policy_event_unregister(&evt2);
zassert_equal(pm_policy_next_event_ticks(), -1);
pm_policy_event_register(&evt2, evt2_cycle);
pm_policy_event_register(&evt2, evt2_uptime_ticks);
zassert_within(pm_policy_next_event_ticks(), 2000, 50);
pm_policy_event_register(&evt1, evt1_1_cycle);
pm_policy_event_register(&evt1, evt1_1_uptime_ticks);
zassert_within(pm_policy_next_event_ticks(), 100, 50);
pm_policy_event_update(&evt1, evt1_2_cycle);
pm_policy_event_update(&evt1, evt1_2_uptime_ticks);
zassert_within(pm_policy_next_event_ticks(), 200, 50);
pm_policy_event_unregister(&evt1);
pm_policy_event_unregister(&evt2);
Expand Down

0 comments on commit 59779eb

Please sign in to comment.