diff --git a/message_passing/safe_message_handlers/README.md b/message_passing/safe_message_handlers/README.md index 7d727af3..274d9cdc 100644 --- a/message_passing/safe_message_handlers/README.md +++ b/message_passing/safe_message_handlers/README.md @@ -3,7 +3,6 @@ This sample shows off important techniques for handling signals and updates, aka messages. In particular, it illustrates how message handlers can interleave or not be completed before the workflow completes, and how you can manage that. * Here, using workflow.wait_condition, signal and update handlers will only operate when the workflow is within a certain state--between cluster_started and cluster_shutdown. -* You can run start_workflow with an initializer signal that you want to run before anything else other than the workflow's constructor. This pattern is known as "signal-with-start." * Message handlers can block and their actions can be interleaved with one another and with the main workflow. This can easily cause bugs, so you can use a lock to protect shared state from interleaved access. * An "Entity" workflow, i.e. a long-lived workflow, periodically "continues as new". It must do this to prevent its history from growing too large, and it passes its state to the next workflow. You can check `workflow.info().is_continue_as_new_suggested()` to see when it's time. * Most people want their message handlers to finish before the workflow run completes or continues as new. Use `await workflow.wait_condition(lambda: workflow.all_handlers_finished())` to achieve this. diff --git a/message_passing/safe_message_handlers/activities.py b/message_passing/safe_message_handlers/activities.py index 3a1c9cd2..da8a8be0 100644 --- a/message_passing/safe_message_handlers/activities.py +++ b/message_passing/safe_message_handlers/activities.py @@ -11,6 +11,16 @@ class AssignNodesToJobInput: job_name: str +@dataclass +class ClusterState: + node_ids: List[str] + + +@activity.defn +async def start_cluster() -> ClusterState: + return ClusterState(node_ids=[f"node-{i}" for i in range(25)]) + + @activity.defn async def assign_nodes_to_job(input: AssignNodesToJobInput) -> None: print(f"Assigning nodes {input.nodes} to job {input.job_name}") @@ -37,7 +47,7 @@ class FindBadNodesInput: @activity.defn async def find_bad_nodes(input: FindBadNodesInput) -> Set[str]: await asyncio.sleep(0.1) - bad_nodes = set([n for n in input.nodes_to_check if int(n) % 5 == 0]) + bad_nodes = set([id for id in input.nodes_to_check if hash(id) % 5 == 0]) if bad_nodes: print(f"Found bad nodes: {bad_nodes}") else: diff --git a/message_passing/safe_message_handlers/starter.py b/message_passing/safe_message_handlers/starter.py index cf712163..7ffe13d9 100644 --- a/message_passing/safe_message_handlers/starter.py +++ b/message_passing/safe_message_handlers/starter.py @@ -16,8 +16,10 @@ async def do_cluster_lifecycle(wf: WorkflowHandle, delay_seconds: Optional[int] = None): - - await wf.signal(ClusterManagerWorkflow.start_cluster) + cluster_status = await wf.execute_update( + ClusterManagerWorkflow.wait_until_cluster_started + ) + print(f"Cluster started with {len(cluster_status.nodes)} nodes") print("Assigning jobs to nodes...") allocation_updates = [] diff --git a/message_passing/safe_message_handlers/worker.py b/message_passing/safe_message_handlers/worker.py index a900c7bb..34e71290 100644 --- a/message_passing/safe_message_handlers/worker.py +++ b/message_passing/safe_message_handlers/worker.py @@ -8,6 +8,7 @@ ClusterManagerWorkflow, assign_nodes_to_job, find_bad_nodes, + start_cluster, unassign_nodes_for_job, ) @@ -21,7 +22,12 @@ async def main(): client, task_queue="safe-message-handlers-task-queue", workflows=[ClusterManagerWorkflow], - activities=[assign_nodes_to_job, unassign_nodes_for_job, find_bad_nodes], + activities=[ + assign_nodes_to_job, + unassign_nodes_for_job, + find_bad_nodes, + start_cluster, + ], ): logging.info("ClusterManagerWorkflow worker started, ctrl+c to exit") await interrupt_event.wait() diff --git a/message_passing/safe_message_handlers/workflow.py b/message_passing/safe_message_handlers/workflow.py index 338a3065..ca549a61 100644 --- a/message_passing/safe_message_handlers/workflow.py +++ b/message_passing/safe_message_handlers/workflow.py @@ -14,6 +14,7 @@ UnassignNodesForJobInput, assign_nodes_to_job, find_bad_nodes, + start_cluster, unassign_nodes_for_job, ) @@ -65,18 +66,27 @@ class ClusterManagerAssignNodesToJobResult: # These updates must run atomically. @workflow.defn class ClusterManagerWorkflow: - def __init__(self) -> None: - self.state = ClusterManagerState() + @workflow.init + def __init__(self, input: ClusterManagerInput) -> None: + if input.state: + self.state = input.state + else: + self.state = ClusterManagerState() + + if input.test_continue_as_new: + self.max_history_length: Optional[int] = 120 + self.sleep_interval_seconds = 1 + else: + self.max_history_length = None + self.sleep_interval_seconds = 600 + # Protects workflow state from interleaved access self.nodes_lock = asyncio.Lock() - self.max_history_length: Optional[int] = None - self.sleep_interval_seconds: int = 600 - @workflow.signal - async def start_cluster(self) -> None: - self.state.cluster_started = True - self.state.nodes = {str(k): None for k in range(25)} - workflow.logger.info("Cluster started") + @workflow.update + async def wait_until_cluster_started(self) -> ClusterManagerState: + await workflow.wait_condition(lambda: self.state.cluster_started) + return self.state @workflow.signal async def shutdown_cluster(self) -> None: @@ -135,7 +145,7 @@ async def _assign_nodes_to_job( self.state.jobs_assigned.add(job_name) # Even though it returns nothing, this is an update because the client may want to track it, for example - # to wait for nodes to be unassignd before reassigning them. + # to wait for nodes to be unassigned before reassigning them. @workflow.update async def delete_job(self, input: ClusterManagerDeleteJobInput) -> None: await workflow.wait_condition(lambda: self.state.cluster_started) @@ -202,30 +212,15 @@ async def perform_health_checks(self) -> None: f"Health check failed with error {type(e).__name__}:{e}" ) - # The cluster manager is a long-running "entity" workflow so we need to periodically checkpoint its state and - # continue-as-new. - def init(self, input: ClusterManagerInput) -> None: - if input.state: - self.state = input.state - if input.test_continue_as_new: - self.max_history_length = 120 - self.sleep_interval_seconds = 1 - - def should_continue_as_new(self) -> bool: - if workflow.info().is_continue_as_new_suggested(): - return True - # This is just for ease-of-testing. In production, we trust temporal to tell us when to continue as new. - if ( - self.max_history_length - and workflow.info().get_current_history_length() > self.max_history_length - ): - return True - return False - @workflow.run async def run(self, input: ClusterManagerInput) -> ClusterManagerResult: - self.init(input) - await workflow.wait_condition(lambda: self.state.cluster_started) + cluster_state = await workflow.execute_activity( + start_cluster, schedule_to_close_timeout=timedelta(seconds=10) + ) + self.state.nodes = {k: None for k in cluster_state.node_ids} + self.state.cluster_started = True + workflow.logger.info("Cluster started") + # Perform health checks at intervals. while True: await self.perform_health_checks() @@ -239,6 +234,8 @@ async def run(self, input: ClusterManagerInput) -> ClusterManagerResult: pass if self.state.cluster_shutdown: break + # The cluster manager is a long-running "entity" workflow so we need to periodically checkpoint its state and + # continue-as-new. if self.should_continue_as_new(): # We don't want to leave any job assignment or deletion handlers half-finished when we continue as new. await workflow.wait_condition(lambda: workflow.all_handlers_finished()) @@ -255,3 +252,14 @@ async def run(self, input: ClusterManagerInput) -> ClusterManagerResult: len(self.get_assigned_nodes()), len(self.get_bad_nodes()), ) + + def should_continue_as_new(self) -> bool: + if workflow.info().is_continue_as_new_suggested(): + return True + # This is just for ease-of-testing. In production, we trust temporal to tell us when to continue as new. + if ( + self.max_history_length + and workflow.info().get_current_history_length() > self.max_history_length + ): + return True + return False diff --git a/tests/message_passing/safe_message_handlers/workflow_test.py b/tests/message_passing/safe_message_handlers/workflow_test.py index 92345be7..8cd303d5 100644 --- a/tests/message_passing/safe_message_handlers/workflow_test.py +++ b/tests/message_passing/safe_message_handlers/workflow_test.py @@ -1,5 +1,6 @@ import asyncio import uuid +from typing import Callable, Sequence import pytest from temporalio.client import Client, WorkflowUpdateFailedError @@ -10,6 +11,7 @@ from message_passing.safe_message_handlers.activities import ( assign_nodes_to_job, find_bad_nodes, + start_cluster, unassign_nodes_for_job, ) from message_passing.safe_message_handlers.workflow import ( @@ -19,6 +21,13 @@ ClusterManagerWorkflow, ) +ACTIVITIES: Sequence[Callable] = [ + assign_nodes_to_job, + unassign_nodes_for_job, + find_bad_nodes, + start_cluster, +] + async def test_safe_message_handlers(client: Client, env: WorkflowEnvironment): if env.supports_time_skipping: @@ -30,7 +39,7 @@ async def test_safe_message_handlers(client: Client, env: WorkflowEnvironment): client, task_queue=task_queue, workflows=[ClusterManagerWorkflow], - activities=[assign_nodes_to_job, unassign_nodes_for_job, find_bad_nodes], + activities=ACTIVITIES, ): cluster_manager_handle = await client.start_workflow( ClusterManagerWorkflow.run, @@ -38,7 +47,9 @@ async def test_safe_message_handlers(client: Client, env: WorkflowEnvironment): id=f"ClusterManagerWorkflow-{uuid.uuid4()}", task_queue=task_queue, ) - await cluster_manager_handle.signal(ClusterManagerWorkflow.start_cluster) + await cluster_manager_handle.execute_update( + ClusterManagerWorkflow.wait_until_cluster_started + ) allocation_updates = [] for i in range(6): @@ -82,7 +93,7 @@ async def test_update_idempotency(client: Client, env: WorkflowEnvironment): client, task_queue=task_queue, workflows=[ClusterManagerWorkflow], - activities=[assign_nodes_to_job, unassign_nodes_for_job, find_bad_nodes], + activities=ACTIVITIES, ): cluster_manager_handle = await client.start_workflow( ClusterManagerWorkflow.run, @@ -91,7 +102,9 @@ async def test_update_idempotency(client: Client, env: WorkflowEnvironment): task_queue=task_queue, ) - await cluster_manager_handle.signal(ClusterManagerWorkflow.start_cluster) + await cluster_manager_handle.execute_update( + ClusterManagerWorkflow.wait_until_cluster_started + ) result_1 = await cluster_manager_handle.execute_update( ClusterManagerWorkflow.assign_nodes_to_job, @@ -121,7 +134,7 @@ async def test_update_failure(client: Client, env: WorkflowEnvironment): client, task_queue=task_queue, workflows=[ClusterManagerWorkflow], - activities=[assign_nodes_to_job, unassign_nodes_for_job, find_bad_nodes], + activities=ACTIVITIES, ): cluster_manager_handle = await client.start_workflow( ClusterManagerWorkflow.run, @@ -130,7 +143,9 @@ async def test_update_failure(client: Client, env: WorkflowEnvironment): task_queue=task_queue, ) - await cluster_manager_handle.signal(ClusterManagerWorkflow.start_cluster) + await cluster_manager_handle.execute_update( + ClusterManagerWorkflow.wait_until_cluster_started + ) await cluster_manager_handle.execute_update( ClusterManagerWorkflow.assign_nodes_to_job,