diff --git a/Cargo.lock b/Cargo.lock index 1afa1154..25b8e0c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,7 +23,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ - "crypto-common 0.1.6", + "crypto-common", "generic-array", ] @@ -328,6 +328,19 @@ dependencies = [ "syn 2.0.66", ] +[[package]] +name = "asynchronous-codec" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4057f2c32adbb2fc158e22fb38433c8e9bbf76b75a4732c7c0cbaf695fb65568" +dependencies = [ + "bytes", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite", +] + [[package]] name = "asynchronous-codec" version = "0.7.0" @@ -418,6 +431,12 @@ dependencies = [ "serde", ] +[[package]] +name = "bimap" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "230c5f1ca6a325a32553f8640d31ac9b49f2411e901e427570154868b46da4f7" + [[package]] name = "bincode" version = "1.3.3" @@ -466,15 +485,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "block-buffer" -version = "0.11.0-pre.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ded684142010808eb980d9974ef794da2bcf97d13396143b1515e9f0fb4a10e" -dependencies = [ - "crypto-common 0.2.0-pre.5", -] - [[package]] name = "bs58" version = "0.5.1" @@ -542,16 +552,19 @@ checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f" [[package]] name = "ceylon" -version = "0.13.5" +version = "0.14.0" dependencies = [ "async-trait", "env_logger", + "futures", "log", "sangedama", "serde", "serde_json", "thiserror", "tokio", + "tracing", + "tracing-subscriber", "uniffi", "uuid", ] @@ -606,7 +619,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ - "crypto-common 0.1.6", + "crypto-common", "inout", "zeroize", ] @@ -632,12 +645,6 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" -[[package]] -name = "const-oid" -version = "0.10.0-pre.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e3352a27098ba6b09546e5f13b15165e6a88b5c2723afecb3ea9576b27e3ea" - [[package]] name = "core-foundation" version = "0.9.4" @@ -689,17 +696,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "crypto-common" -version = "0.2.0-pre.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7aa2ec04f5120b830272a481e8d9d8ba4dda140d2cda59b0f1110d5eb93c38e" -dependencies = [ - "getrandom", - "hybrid-array", - "rand_core", -] - [[package]] name = "ctr" version = "0.9.2" @@ -769,7 +765,7 @@ version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ - "const-oid 0.9.6", + "const-oid", "zeroize", ] @@ -826,21 +822,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer 0.10.4", - "crypto-common 0.1.6", + "crypto-common", "subtle", ] -[[package]] -name = "digest" -version = "0.11.0-pre.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065d93ead7c220b85d5b4be4795d8398eac4ff68b5ee63895de0a3c1fb6edf25" -dependencies = [ - "block-buffer 0.11.0-pre.5", - "const-oid 0.10.0-pre.2", - "crypto-common 0.2.0-pre.5", -] - [[package]] name = "displaydoc" version = "0.2.4" @@ -884,7 +869,7 @@ dependencies = [ "ed25519", "rand_core", "serde", - "sha2 0.10.8", + "sha2", "subtle", "zeroize", ] @@ -1295,12 +1280,6 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - [[package]] name = "hex_fmt" version = "0.3.0" @@ -1456,15 +1435,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" -[[package]] -name = "hybrid-array" -version = "0.2.0-rc.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d306b679262030ad8813a82d4915fc04efff97776e4db7f8eb5137039d56400" -dependencies = [ - "typenum", -] - [[package]] name = "hyper" version = "0.14.28" @@ -1728,16 +1698,21 @@ dependencies = [ "getrandom", "instant", "libp2p-allow-block-list", + "libp2p-autonat", "libp2p-connection-limits", "libp2p-core", "libp2p-dns", "libp2p-gossipsub", + "libp2p-identify", "libp2p-identity", "libp2p-mdns", "libp2p-metrics", "libp2p-noise", + "libp2p-ping", "libp2p-quic", "libp2p-relay", + "libp2p-rendezvous", + "libp2p-request-response", "libp2p-swarm", "libp2p-tcp", "libp2p-tls 0.3.0", @@ -1762,6 +1737,27 @@ dependencies = [ "void", ] +[[package]] +name = "libp2p-autonat" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95151726170e41b591735bf95c42b888fe4aa14f65216a9fbf0edcc04510586" +dependencies = [ + "async-trait", + "asynchronous-codec 0.6.2", + "futures", + "futures-timer", + "instant", + "libp2p-core", + "libp2p-identity", + "libp2p-request-response", + "libp2p-swarm", + "quick-protobuf", + "quick-protobuf-codec 0.2.0", + "rand", + "tracing", +] + [[package]] name = "libp2p-connection-limits" version = "0.3.1" @@ -1824,7 +1820,7 @@ version = "0.46.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d665144a616dadebdc5fff186b1233488cdcd8bfb1223218ff084b6d052c94f7" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.7.0", "base64 0.21.7", "byteorder", "bytes", @@ -1840,11 +1836,34 @@ dependencies = [ "libp2p-swarm", "prometheus-client", "quick-protobuf", - "quick-protobuf-codec", + "quick-protobuf-codec 0.3.1", "rand", "regex", - "sha2 0.10.8", + "sha2", + "smallvec", + "tracing", + "void", +] + +[[package]] +name = "libp2p-identify" +version = "0.44.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5d635ebea5ca0c3c3e77d414ae9b67eccf2a822be06091b9c1a0d13029a1e2f" +dependencies = [ + "asynchronous-codec 0.7.0", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "lru", + "quick-protobuf", + "quick-protobuf-codec 0.3.1", "smallvec", + "thiserror", "tracing", "void", ] @@ -1861,7 +1880,7 @@ dependencies = [ "multihash", "quick-protobuf", "rand", - "sha2 0.10.8", + "sha2", "thiserror", "tracing", "zeroize", @@ -1898,7 +1917,9 @@ dependencies = [ "instant", "libp2p-core", "libp2p-gossipsub", + "libp2p-identify", "libp2p-identity", + "libp2p-ping", "libp2p-relay", "libp2p-swarm", "pin-project", @@ -1911,7 +1932,7 @@ version = "0.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecd0545ce077f6ea5434bcb76e8d0fe942693b4380aaad0d34a358c2bd05793" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.7.0", "bytes", "curve25519-dalek", "futures", @@ -1922,7 +1943,7 @@ dependencies = [ "once_cell", "quick-protobuf", "rand", - "sha2 0.10.8", + "sha2", "snow", "static_assertions", "thiserror", @@ -1931,6 +1952,24 @@ dependencies = [ "zeroize", ] +[[package]] +name = "libp2p-ping" +version = "0.44.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1de5a6cf64fba7f7e8f2102711c9c6c043a8e56b86db8cd306492c517da3fb3" +dependencies = [ + "either", + "futures", + "futures-timer", + "instant", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand", + "tracing", + "void", +] + [[package]] name = "libp2p-quic" version = "0.10.3" @@ -1961,7 +2000,7 @@ version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d1c667cfabf3dd675c8e3cea63b7b98434ecf51721b7894cbb01d29983a6a9b" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.7.0", "bytes", "either", "futures", @@ -1971,7 +2010,7 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "quick-protobuf", - "quick-protobuf-codec", + "quick-protobuf-codec 0.3.1", "rand", "static_assertions", "thiserror", @@ -1980,6 +2019,50 @@ dependencies = [ "web-time", ] +[[package]] +name = "libp2p-rendezvous" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "168a444a16f569771bcb48aa081a32724079156e64a730dd900276391ccb6385" +dependencies = [ + "async-trait", + "asynchronous-codec 0.6.2", + "bimap", + "futures", + "futures-timer", + "instant", + "libp2p-core", + "libp2p-identity", + "libp2p-request-response", + "libp2p-swarm", + "quick-protobuf", + "quick-protobuf-codec 0.2.0", + "rand", + "thiserror", + "tracing", + "void", +] + +[[package]] +name = "libp2p-request-response" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c314fe28368da5e3a262553fb0ad575c1c8934c461e10de10265551478163836" +dependencies = [ + "async-trait", + "futures", + "futures-bounded", + "futures-timer", + "instant", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand", + "smallvec", + "tracing", + "void", +] + [[package]] name = "libp2p-swarm" version = "0.44.2" @@ -2746,13 +2829,26 @@ dependencies = [ "byteorder", ] +[[package]] +name = "quick-protobuf-codec" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" +dependencies = [ + "asynchronous-codec 0.6.2", + "bytes", + "quick-protobuf", + "thiserror", + "unsigned-varint 0.7.2", +] + [[package]] name = "quick-protobuf-codec" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "15a0580ab32b169745d7a39db2ba969226ca16738931be152a3209b409de2474" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.7.0", "bytes", "quick-protobuf", "thiserror", @@ -3131,19 +3227,6 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" -[[package]] -name = "sangathika" -version = "0.1.0" -dependencies = [ - "hex", - "libp2p", - "serde", - "serde_json", - "sha2 0.11.0-pre.3", - "time", - "tokio", -] - [[package]] name = "sangedama" version = "0.1.1" @@ -3161,10 +3244,11 @@ dependencies = [ "reqwest", "serde", "serde_json", - "sha2 0.10.8", + "sha2", "tokio", "tracing", "tracing-subscriber", + "uuid", ] [[package]] @@ -3311,17 +3395,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha2" -version = "0.11.0-pre.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f33549bf3064b62478926aa89cbfc7c109aab66ae8f0d5d2ef839e482cc30d6" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest 0.11.0-pre.8", -] - [[package]] name = "sharded-slab" version = "0.1.7" @@ -3389,7 +3462,7 @@ dependencies = [ "rand_core", "ring 0.17.8", "rustc_version", - "sha2 0.10.8", + "sha2", "subtle", ] @@ -3955,7 +4028,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ - "crypto-common 0.1.6", + "crypto-common", "subtle", ] @@ -3964,6 +4037,10 @@ name = "unsigned-varint" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" +dependencies = [ + "asynchronous-codec 0.6.2", + "bytes", +] [[package]] name = "unsigned-varint" @@ -4000,18 +4077,11 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" -[[package]] -name = "utils-random-names" -version = "0.1.0" -dependencies = [ - "rand", -] - [[package]] name = "uuid" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ "getrandom", ] diff --git a/Cargo.toml b/Cargo.toml index 72554961..4689d6d9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] members = [ "libs/sangedama", - "libs/utils/utils-random-names", - "bindings/ceylon", "libs/sangathika", ] + "bindings/ceylon" +] diff --git a/README.md b/README.md index 500c17db..0da76c56 100644 --- a/README.md +++ b/README.md @@ -2,3 +2,13 @@ [![PyPI - Version](https://img.shields.io/pypi/v/ceylon.svg)](https://pypi.org/project/ceylon) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ceylon.svg)](https://pypi.org/project/ceylon) + + +#Agents + +## Admin +- Manage other agents + +## Workers +- Execute task what they recived to do from admin +- Send update to admin after finish the task diff --git a/bindings/ceylon/Cargo.toml b/bindings/ceylon/Cargo.toml index 16789d73..8437f006 100644 --- a/bindings/ceylon/Cargo.toml +++ b/bindings/ceylon/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ceylon" -version = "0.13.5" +version = "0.14.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -19,6 +19,9 @@ async-trait = "0.1.80" serde = { version = "1.0.203", features = ["derive"] } log = "0.4.21" env_logger = "0.11.3" +tracing-subscriber = "0.3.18" +tracing = "0.1.40" +futures = { version = "0.3.30", default-features = true, features = ["default"] } [build-dependencies] uniffi = { version = "0.28.0", features = ["build"] } diff --git a/bindings/ceylon/ceylon/__init__.py b/bindings/ceylon/ceylon/__init__.py index 536ceb0e..01a40635 100644 --- a/bindings/ceylon/ceylon/__init__.py +++ b/bindings/ceylon/ceylon/__init__.py @@ -1 +1 @@ -from .runner import AgentRunner +# from .runner import AgentRunner diff --git a/bindings/ceylon/ceylon/llm/llm_agent.py b/bindings/ceylon/ceylon/llm/llm_agent.py index 82a86b80..657db1f5 100644 --- a/bindings/ceylon/ceylon/llm/llm_agent.py +++ b/bindings/ceylon/ceylon/llm/llm_agent.py @@ -4,12 +4,12 @@ from collections import deque from typing import List -import networkx as nx from langchain_core.tools import StructuredTool, BaseTool from pydantic.dataclasses import dataclass -from ceylon.ceylon import AgentCore, Processor, MessageHandler, AgentDefinition +from ceylon.ceylon import AgentCore, Processor, MessageHandler, AgentDefinition, uniffi_set_event_loop from ceylon.llm.llm_caller import process_agent_request +from ceylon.llm.task_manager import TaskManager from ceylon.runner import RunnerInput @@ -22,13 +22,13 @@ class LLMAgentResponse: class LLMAgent(AgentCore, MessageHandler, Processor): tools: list[StructuredTool] - network_graph: nx.DiGraph - network_graph_original: nx.DiGraph queue: deque original_goal = None agent_replies: List[LLMAgentResponse] = [] + task_manager = TaskManager() + def __init__(self, name, position, instructions, responsibilities, llm, tools: list[BaseTool] = None): super().__init__(definition=AgentDefinition( name=name, @@ -38,8 +38,6 @@ def __init__(self, name, position, instructions, responsibilities, llm, tools: l ), on_message=self, processor=self) self.llm = llm self.tools = tools - # Create a directed graph to represent the workflow - self.network_graph = nx.DiGraph() # Initialize the queue and executed agents self.queue = deque() @@ -49,59 +47,35 @@ async def on_message(self, agent_id, data, time): dt: LLMAgentResponse = pickle.loads(data) print(f"{definition.name} Received message from = '{dt.agent_name}") - next_agent = self.get_next_agent() - if next_agent == dt.agent_name: - self.agent_replies.append(dt) - await self.update_status(dt.agent_name) - - next_agent = self.get_next_agent() - if next_agent == definition.name: - dependencies = list(self.network_graph_original.predecessors(next_agent)) - print("Dependencies are:", dependencies, "for", next_agent) - - only_dependencies = {dt.agent_name: dt for dt in self.agent_replies if dt.agent_name in dependencies} - - if len(only_dependencies) == len(dependencies): - print("Executing", definition.name) - await self.execute(self.original_goal) - - await self.execute({ - "original_request": self.original_goal, - **only_dependencies, - dt.agent_name: dt.response - }) + # next_agent = self.get_next_agent() + # if next_agent == dt.agent_name: + # self.agent_replies.append(dt) + # await self.update_status(dt.agent_name) + # + # next_agent = self.get_next_agent() + # if next_agent == definition.name: + # dependencies = list(self.network_graph_original.predecessors(next_agent)) + # print("Dependencies are:", dependencies, "for", next_agent) + # + + # only_dependencies = {dt.agent_name: dt for dt in self.agent_replies if dt.agent_name in dependencies} + # + # if len(only_dependencies) == len(dependencies): + # print("Executing", definition.name) + # await self.execute(self.original_goal) + # + # await self.execute({ + # "original_request": self.original_goal, + # **only_dependencies, + # dt.agent_name: dt.response + # }) async def run(self, inputs): + print(" Running LLMAgent") inputs: RunnerInput = pickle.loads(inputs) - self._initialize_graph(inputs.network) - self.original_goal = inputs.request - await self.execute(inputs.request) - - - await self.stop() - - def _initialize_graph(self, network): - # Add nodes and edges based on the agents and their dependencies - for agent, dependencies in network.items(): - print(agent) - self.network_graph.add_node(agent) - for dependency in dependencies: - self.network_graph.add_edge(dependency, agent) - - self.network_graph_original = self.network_graph.copy() - - # Initialize the queue with nodes that have no dependencies (indegree 0) - self.queue.extend([node for node in self.network_graph if self.network_graph.in_degree(node) == 0]) - - - - def get_next_agent(self): - if not self.queue: - print("No more agents to execute.") - return None - return self.queue[0] + print(inputs.request) async def execute(self, input): definition = await self.definition() @@ -123,24 +97,5 @@ async def execute(self, input): else: print("Not executing", definition.name, "as it is not the next agent in the queue.") - async def update_status(self, agent): - if agent not in self.queue: - print(f"Agent {agent} is not ready to execute or has already been executed.") - return - - self.queue.remove(agent) - print(f"Executing {agent}") - - # Remove the current agent and update the graph - for successor in list(self.network_graph.successors(agent)): - self.network_graph.remove_edge(agent, successor) - if self.network_graph.in_degree(successor) == 0: - self.queue.append(successor) - self.network_graph.remove_node(agent) - - if not self.network_graph.nodes: - print("Workflow executed successfully.") - await self.stop() - elif not self.queue: - print("Cycle detected in the workflow!") - + async def start(self, topic: "str", url: "str", inputs: "bytes") -> None: + return await super().start(topic, url, inputs) diff --git a/bindings/ceylon/ceylon/llm/task_manager.py b/bindings/ceylon/ceylon/llm/task_manager.py new file mode 100644 index 00000000..6b003610 --- /dev/null +++ b/bindings/ceylon/ceylon/llm/task_manager.py @@ -0,0 +1,107 @@ +import enum +from typing import List, Tuple + +from pydantic import dataclasses + + +class TaskStatus(enum.Enum): + PENDING = "PENDING" + RUNNING = "RUNNING" + COMPLETED = "COMPLETED" + FAILED = "FAILED" + + +@dataclasses.dataclass +class Task: + name: str + dependencies: List[str] + status: TaskStatus = TaskStatus.PENDING + + +class TaskManager: + def __init__(self, tasks: List[Task] = None): + self.tasks = tasks or [] + + def add_tasks(self, tasks: List[Task]): + self.tasks.extend(tasks) + + def add_dependencies(self, task_name: str, dependencies: List[str]): + task = next((task for task in self.tasks if task.name == task_name), None) + if task: + task.dependencies.extend(dependencies) + + def update_status(self, task_name: str, status: TaskStatus) -> bool: + task = next((task for task in self.tasks if task.name == task_name), None) + if task: + if status == TaskStatus.RUNNING or status == TaskStatus.COMPLETED: + can_start, required_tasks = self.can_start_with_required_tasks(task_name) + if not can_start: + print( + f"Cannot update task '{task_name}' to '{status.name}'" + f" because the following tasks need to be completed first: {required_tasks}") + return False + task.status = status + return True + return False + + def find_next_task(self) -> List[Task]: + tasks: List[Task] = self.tasks + # Create a dictionary to quickly access task status by name + task_status = {task.name: task.status for task in tasks} + + # List to hold the names of tasks that can be started next + next_tasks = [] + + # Iterate through each task to find eligible ones + for task in tasks: + if task.status == TaskStatus.PENDING: + # Check if all dependencies are completed + if all(task_status[dep] == TaskStatus.COMPLETED for dep in task.dependencies): + next_tasks.append(task) + + return next_tasks + + def can_start_with_required_tasks(self, task_name: str) -> Tuple[bool, List[str]]: + task = next((task for task in self.tasks if task.name == task_name), None) + if not task: + return False, [] + + required_tasks = self._get_pending_dependencies(task) + can_start = len(required_tasks) == 0 + + return can_start, required_tasks + + def _get_pending_dependencies(self, task: Task) -> List[str]: + pending_dependencies = [] + + for dependency_name in task.dependencies: + dependency = next((t for t in self.tasks if t.name == dependency_name), None) + if dependency and dependency.status != TaskStatus.COMPLETED: + pending_dependencies.append(dependency_name) + pending_dependencies.extend(self._get_pending_dependencies(dependency)) + + return list(set(pending_dependencies)) + + +if __name__ == '__main__': + tasks = [ + Task(name="task1", dependencies=["task2", "task3"]), + Task(name="task2", dependencies=["task4"]), + Task(name="task3", dependencies=["task4"]), + Task(name="task4", dependencies=[]), + Task(name="task9", dependencies=[]), + Task(name="task5", dependencies=["task3"]), + Task(name="task6", dependencies=["task3"]) + ] + + task_manager = TaskManager(tasks) + print(task_manager.find_next_task()) + + print(task_manager.can_start_with_required_tasks("task1")) + + task_manager.update_status("task4", TaskStatus.COMPLETED) + print(task_manager.can_start_with_required_tasks("task1")) + task_manager.update_status("task2", TaskStatus.COMPLETED) + print(task_manager.can_start_with_required_tasks("task1")) + task_manager.update_status("task3", TaskStatus.COMPLETED) + print(task_manager.can_start_with_required_tasks("task1")) \ No newline at end of file diff --git a/bindings/ceylon/ceylon/runner.py b/bindings/ceylon/ceylon/runner.py index 3153fcda..9f58fc2c 100644 --- a/bindings/ceylon/ceylon/runner.py +++ b/bindings/ceylon/ceylon/runner.py @@ -1,6 +1,6 @@ import asyncio import pickle -from typing import List, Dict +from typing import List, Dict, Any from pydantic import BaseModel @@ -8,13 +8,7 @@ from ceylon.ceylon.ceylon import uniffi_set_event_loop -class RunnerInput(BaseModel): - request: dict - agents: List[AgentDefinition] - network: Dict[str, List[str]] - class Config: - arbitrary_types_allowed = True class AgentRunner: diff --git a/bindings/ceylon/ceylon/task/__init__.py b/bindings/ceylon/ceylon/task/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/bindings/ceylon/ceylon/task/task.py b/bindings/ceylon/ceylon/task/task.py new file mode 100644 index 00000000..e69de29b diff --git a/bindings/ceylon/ceylon/workspace/__init__.py b/bindings/ceylon/ceylon/workspace/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/bindings/ceylon/ceylon/workspace/admin.py b/bindings/ceylon/ceylon/workspace/admin.py new file mode 100644 index 00000000..653915e2 --- /dev/null +++ b/bindings/ceylon/ceylon/workspace/admin.py @@ -0,0 +1,31 @@ +import asyncio +import pickle + +from ceylon.ceylon import AdminAgent, AdminAgentConfig, Processor, MessageHandler, uniffi_set_event_loop, EventHandler +from ceylon.workspace.runner import RunnerInput + + +class Admin(AdminAgent, Processor, MessageHandler, EventHandler): + + def __init__(self, name="admin", port=8888): + print("Admin initialized") + super().__init__(config=AdminAgentConfig(name=name, port=port), processor=self, on_message=self, on_event=self) + + async def run(self, inputs: "bytes"): + pass + + # + async def on_message(self, agent_id: "str", data: "bytes", time: "int"): + print(f"Admin on_message {self.details().name}", agent_id, data, time) + + async def run_admin(self, inputs, workers): + uniffi_set_event_loop(asyncio.get_event_loop()) + runner_input = RunnerInput(request=inputs, agents=[], network={}) + await self.start(pickle.dumps(runner_input), workers) + + # + async def execute_task(self, input): + pass + + async def on_agent_connected(self, topic: "str", agent_id: "str"): + print("Agent connected", agent_id, topic) diff --git a/bindings/ceylon/ceylon/workspace/message.py b/bindings/ceylon/ceylon/workspace/message.py new file mode 100644 index 00000000..46a5b8fe --- /dev/null +++ b/bindings/ceylon/ceylon/workspace/message.py @@ -0,0 +1,13 @@ +from pydantic.dataclasses import dataclass + + +@dataclass +class AdminRequest: + name: str + message: str + + +@dataclass +class WorkerResponse: + name: str + message: str diff --git a/bindings/ceylon/ceylon/workspace/runner.py b/bindings/ceylon/ceylon/workspace/runner.py new file mode 100644 index 00000000..6db6748e --- /dev/null +++ b/bindings/ceylon/ceylon/workspace/runner.py @@ -0,0 +1,14 @@ +from typing import Any, List, Dict + +from pydantic import BaseModel + +from ceylon.ceylon import AgentDetail + + +class RunnerInput(BaseModel): + request: Any + agents: List[AgentDetail] + network: Dict[str, List[str]] + + class Config: + arbitrary_types_allowed = True diff --git a/bindings/ceylon/ceylon/workspace/worker.py b/bindings/ceylon/ceylon/workspace/worker.py new file mode 100644 index 00000000..5e5cc90e --- /dev/null +++ b/bindings/ceylon/ceylon/workspace/worker.py @@ -0,0 +1,34 @@ +import asyncio +import pickle + +from ceylon.ceylon import WorkerAgent, WorkerAgentConfig, Processor, \ + MessageHandler + + +class Worker(WorkerAgent, Processor, MessageHandler): + + def __init__(self, name="admin", workspace_id="admin", admin_peer=None, admin_port=8888,role="worker"): + super().__init__(config=WorkerAgentConfig(name=name, + role=role, + admin_peer=admin_peer, + admin_port=admin_port, + work_space_id=workspace_id), processor=self, on_message=self) + + async def run(self, inputs: "bytes"): + print(f"Worker received: {inputs}") + try: + while True: + await self.broadcast(pickle.dumps({ + "hello": f"world from worker {self.details().name}" + })) + await asyncio.sleep(1) + print(f"Worker broadcasted: {pickle.dumps({'hello': 'world from worker'})}") + except Exception as e: + print(f"Worker error: {e}") + print(f"Worker {self.details().name} finished") + + def on_message(self, agent_id: "str", data: "bytes", time: "int"): + print(f"Worker on_message {self.details().name}", agent_id, data, time) + + async def on_message(self, agent_id: "str", data: "bytes", time: "int"): + print(f"Worker on_message {self.details().name}", agent_id, data, time) diff --git a/bindings/ceylon/examples/blog_writer/editor_panel.py b/bindings/ceylon/examples/blog_writer/editor_panel.py index a9d67953..c6937ae9 100644 --- a/bindings/ceylon/examples/blog_writer/editor_panel.py +++ b/bindings/ceylon/examples/blog_writer/editor_panel.py @@ -13,7 +13,8 @@ async def main(): runner = AgentRunner(workspace_name="ceylon-ai") # llm_lib = ChatOllama(model="phi3:instruct") - llm_lib = ChatOllama(model="gemma2:latest") + # llm_lib = ChatOllama(model="gemma2:latest") + llm_lib = ChatOllama(model="gemma2:27b") # llm_lib = ChatOpenAI(model="gpt-4o") runner.register_agent(LLMAgent( name="name_chooser", @@ -54,6 +55,22 @@ async def main(): instructions=[ "Ensure clarity, accuracy, and proper formatting while respecting ethical guidelines and privacy."] )) + + runner.register_agent(LLMAgent( + name="tweeter", + position="Tweet", + llm=llm_lib, + responsibilities=["Create tweet based on writer's content."], + instructions=[ + "Simple", + "Be creative", + "Be original", + "With Hashtags" + ], + tools=[ + FilePublisherTool() + ] + )) # runner.register_agent(LLMAgent( name="publisher", @@ -82,7 +99,8 @@ async def main(): "name_chooser": [], "researcher": [], "writer": ["researcher"], - "publisher": ["writer", "name_chooser"] + "publisher": ["writer", "name_chooser"], + "tweeter": ["writer", "name_chooser"] } ) diff --git a/bindings/ceylon/pyproject.toml b/bindings/ceylon/pyproject.toml index 0bd006e8..f5a95838 100644 --- a/bindings/ceylon/pyproject.toml +++ b/bindings/ceylon/pyproject.toml @@ -10,7 +10,7 @@ readme = { file = "README.md", content-type = "text/markdown" } license = { file = "LICENSE" } keywords = ["Aritifical Intelligence", "Multi Agent Systems", "CEYLON", "Ceylon Aritficial Intelligence."] dynamic = [] -version = "0.13.5" +version = "0.14.0" classifiers = [ "Programming Language :: Rust", "Programming Language :: Python :: Implementation :: CPython", @@ -35,7 +35,10 @@ dependencies = [ "langchain==0.2.6", "duckduckgo-search==6.1.7", "langchain-community==0.2.6", - "openai==1.35.7" + "openai==1.35.7", + "expert-ceylon==2.0.0a2", + "python-statemachine[diagrams]==2.3.4", + "pydot===3.0.1" ] [project.urls] diff --git a/bindings/ceylon/src/agent/agent.rs b/bindings/ceylon/src/agent/agent.rs deleted file mode 100644 index 471ed90f..00000000 --- a/bindings/ceylon/src/agent/agent.rs +++ /dev/null @@ -1,382 +0,0 @@ -use std::sync::Arc; -use std::time::{SystemTime, UNIX_EPOCH}; -use log::{debug, error, info}; -use tokio::{select, signal}; -use tokio::sync::{mpsc, Mutex, oneshot, RwLock}; - -use sangedama::node::{ - node::{create_node}, - message::{NodeMessage, MessageType}, -}; - -use crate::agent::agent_base::{AgentDefinition, MessageHandler, Processor}; -use crate::agent::state::{AgentState, Message, SystemMessage}; - -pub struct AgentCore { - _definition: RwLock, - _workspace_id: Option, - _processor: Arc>>, - _on_message: Arc>>, - receiver_from_outside_rx: Arc>>, - sender_from_outside_tx: tokio::sync::mpsc::Sender, - - shutdown_tx: Arc>, - shutdown_rx: Arc>>, -} - -impl AgentCore { - pub fn new(definition: AgentDefinition, on_message: Arc, processor: Arc) -> Self { - let (tx_0, rx_0) = tokio::sync::mpsc::channel::(100); - let (shutdown_tx, shutdown_rx) = mpsc::channel::<()>(1); - - Self { - _definition: RwLock::new(definition), - _workspace_id: None, - _on_message: Arc::new(Mutex::new(on_message)), - _processor: Arc::new(Mutex::new(processor)), - receiver_from_outside_rx: Arc::new(Mutex::new(rx_0)), - sender_from_outside_tx: tx_0, - - shutdown_tx: Arc::new(shutdown_tx), - shutdown_rx: Arc::new(Mutex::new(shutdown_rx)), - } - } - - pub async fn definition(&self) -> AgentDefinition { - self._definition.read().await.clone() - } - - pub async fn id(&self) -> String { - self._definition.read().await.id.clone().unwrap_or("".to_string()) - } - - pub fn workspace_id(&self) -> String { - self._workspace_id.clone().unwrap_or("".to_string()) - } - - pub fn set_workspace_id(&mut self, workspace_id: String) { - self._workspace_id = Option::from(workspace_id); - } - - pub async fn broadcast(&self, message: Vec) { - let name = self.definition().await.name.clone(); - let id = self.id().await; - let msg = SystemMessage::Content(Message::new(message, Some(id), name)); - self.sender_from_outside_tx.send(msg).await.unwrap(); - } - - pub fn get_tx_0(&self) -> tokio::sync::mpsc::Sender { - self.sender_from_outside_tx.clone() - } - - pub async fn stop(&self) { - info!( "Agent {} stop", self.definition().await.name); - self.shutdown_tx.clone().send(()).await.unwrap(); - } -} - -impl AgentCore { - pub async fn start(&self, topic: String, url: String, inputs: Vec) { - let definition = self.definition().await; - let agent_name = definition.name.clone(); - let (tx_0, rx_0) = tokio::sync::mpsc::channel::(100); - let (mut node_0, mut message_from_node) = create_node(agent_name.clone(), rx_0).await; - let on_message = self._on_message.clone(); - - self._definition.write().await.id = Some(node_0.id.clone()); - let definition = self.definition().await; - - let (agent_state_message_sender_tx, mut agent_state_message_receiver) = tokio::sync::mpsc::channel::(100); - - let agent_state = Arc::new(Mutex::new(AgentState::new())); - - let mut is_requst_to_shutdown = false; - - // State Message Handle here - let agent_state_clone = Arc::clone(&agent_state); - let agent_state_message_processor = tokio::spawn(async move { - loop { - if is_requst_to_shutdown { - break; - } - if let Some(message) = agent_state_message_receiver.recv().await { - debug!( "Message: {:?}", message); - { - agent_state_clone.lock().await.add_message(message).await; - debug!("AgentState updated"); - } - { - let snapshot = agent_state_clone.lock().await.request_snapshot().await; - debug!("Snapshot: {:?}", snapshot); - } - } - } - }); - - - // Message distributed to other nodes - let receiver_from_outside_rx = Arc::clone(&self.receiver_from_outside_rx); - let definition_handler_process = definition.clone(); - let agent_state_message_sender_tx_c1 = agent_state_message_sender_tx.clone(); - let agent_id = definition.id.clone().unwrap_or("".to_string()); - let message_from_agent_impl_handler_process = tokio::spawn(async move { - loop { - if is_requst_to_shutdown { - break; - } - if tx_0.is_closed() { - break; - } - if let Some(raw_message) = receiver_from_outside_rx.lock().await.recv().await { - let name = definition_handler_process.name.clone(); - match definition.id.clone() { - Some(id) => { - let msg = NodeMessage::data(name, id, raw_message.to_bytes()); - tx_0.send(msg).await.unwrap(); - - if let SystemMessage::Content(message) = raw_message { - let mut msg = message.clone(); - if msg.sender_id.is_none() { - msg.sender_id = Some(agent_id.clone()); - } - agent_state_message_sender_tx_c1.send(msg).await.unwrap(); - } - } - None => { - error!("Agent {} has no id", name); - } - }; - } - } - }); - - // Agent receive message from other nodes - let agent_name = definition.name.clone(); - let agent_state_message_sender_tx_c1 = agent_state_message_sender_tx.clone(); - let agent_state_clone_handle_process = Arc::clone(&agent_state); - let node_message_sender = self.get_tx_0(); - let message_handler_process = tokio::spawn(async move { - loop { - if is_requst_to_shutdown { - break; - } - if let Some(node_message) = message_from_node.recv().await { - if node_message.r#type == MessageType::Message { - debug!( "Agent {:?} received message from node {:?}", agent_name, node_message); - let sender_id = node_message.originator_id; - let sender_name = node_message.originator; - let data = SystemMessage::from_bytes(node_message.data.clone()); - let snapshot = agent_state_clone_handle_process.lock().await.request_snapshot().await; - match data { - SystemMessage::Content(message) => { - agent_state_message_sender_tx_c1.send(message.clone()).await.unwrap(); - on_message.lock().await.on_message(sender_id, message.content, node_message.time).await; - } - SystemMessage::SyncRequest { versions } => { - debug!( "Agent {:?} received sync request from node {:?}", sender_name, versions); - let mut missing_versions = vec![]; - - // If requested list miss something in snapshot - let snapshot_versions = snapshot.versions(); - for snapshot_version in snapshot_versions { - if !versions.contains(&snapshot_version) { - missing_versions.push(snapshot_version); - } - } - - if !missing_versions.is_empty() { - let missing_messages = snapshot.get_messages(missing_versions.clone()); - let sync_request = SystemMessage::SyncResponse { - messages: missing_messages, - }; - node_message_sender.send(sync_request).await.expect("TODO: panic message"); - } - } - SystemMessage::SyncResponse { messages } => { - debug!( "Agent {:?} received sync response from node {:?}", agent_name, messages); - - for message in messages { - agent_state_message_sender_tx_c1.send(message.clone()).await.unwrap(); - } - } - SystemMessage::Beacon { time, sender, name, sync_hash } => { - debug!( "Agent {:?} received beacon {:?} from {:?} at {:?}", agent_name, sender, name, time); - - if sync_hash != snapshot.sync_hash() { - debug!("Sync Hash: {:?} from {:?} not equal to snapshot hash: {:?}", sync_hash, sender, snapshot.sync_hash()); - - let sync_request = SystemMessage::SyncRequest { - versions: snapshot.versions(), - }; - node_message_sender.send(sync_request).await.expect("TODO: panic message"); - } - } - } - } - } - } - }); - - // Handle run process - let processor = self._processor.clone(); - let run_process = tokio::spawn(async move { - processor.lock().await.run(inputs).await; - }); - - // Handle sync process - let node_message_sender = self.get_tx_0(); - let definition = self.definition().await; - let agent_state_clone_sync_process = Arc::clone(&agent_state); - let run_sync_request = tokio::spawn(async move { - loop { - match definition.clone().id { - Some(id) => { - let snapshot = agent_state_clone_sync_process.lock().await.request_snapshot().await; - let beacon_message = SystemMessage::Beacon { - time: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs_f64(), - sender: id.clone(), - name: definition.name.clone(), - sync_hash: snapshot.sync_hash(), - }; - node_message_sender.send(beacon_message).await.unwrap(); - tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; - } - None => { - tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; - continue; - } - } - } - }); - - node_0.connect(url.as_str(), topic.as_str()); - node_0.run().await; - - let definition = self.definition().await; - let agent_name = definition.name.clone(); - debug!( "Agent {:?} started", agent_name); - - let shutdown_rx_ = Arc::clone(&self.shutdown_rx); - - let agent_name_clone = definition.name.clone(); - let task_shutdown = tokio::spawn(async move { - loop { - if let Some(_) = shutdown_rx_.lock().await.recv().await { - debug!("Agent {:?} received shutdown signal", agent_name_clone); - is_requst_to_shutdown = true; - break; - } - } - }); - - select! { - _ = message_from_agent_impl_handler_process => { - debug!("Agent {:?} message_from_agent_impl_handler_process stopped", agent_name); - }, - _ = message_handler_process => { - debug!("Agent {:?} message_handler_process stopped", agent_name); - }, - _ = run_process => { - debug!("Agent {:?} run_process stopped", agent_name); - }, - _ = run_sync_request => { - debug!("Agent {:?} run_sync_request stopped", agent_name); - }, - _ = agent_state_message_processor => { - debug!("Agent {:?} agent_state_message_processor stopped", agent_name); - }, - _ = task_shutdown => { - debug!("Agent {:?} agent_state_message_processor stopped", agent_name); - }, - _ = signal::ctrl_c() => { - println!("Agent {:?} received exit signal", agent_name); - // Perform any necessary cleanup here - is_requst_to_shutdown = true; - }, - - } - } -} - -#[cfg(test)] -mod tests { - use std::fmt::Debug; - use std::sync::Arc; - - use log::info; - use serde_json::json; - - use crate::{MessageHandler, Processor, Workspace, WorkspaceConfig}; - use crate::agent::agent_base::AgentDefinition; - - use super::AgentCore; - - #[derive(Debug)] - struct AgentHandler { - tx_0: tokio::sync::mpsc::Sender>, - } - - - #[async_trait::async_trait] - impl MessageHandler for AgentHandler { - async fn on_message(&self, agent_id: String, data: Vec, time: u64) { - println!("Agent {} received message {:?}", agent_id, data); - } - } - - #[async_trait::async_trait] - impl Processor for AgentHandler { - async fn run(&self, input: Vec) -> () { - println!("Agent received input {:?}", input); - loop { - println!("AgentHandler "); - self.tx_0.send( - json!({ - "data": "Hello World!", - }).as_str().unwrap().as_bytes().to_vec() - ).await.unwrap(); - tokio::time::sleep(std::time::Duration::from_millis(1000)).await; - } - } - } - - fn create_agent(definition: AgentDefinition) -> AgentCore { - let (tx_0, mut rx_0) = tokio::sync::mpsc::channel::>(100); - - let ag = AgentCore::new(definition, - Arc::new(AgentHandler { - tx_0: tx_0.clone(), - }), - Arc::new(AgentHandler { - tx_0: tx_0.clone(), - }), ); - return ag; - } - - - #[tokio::test] - async fn test_agent() { - let agent = create_agent(AgentDefinition { - id: None, - name: "test".to_string(), - position: "test".to_string(), - responsibilities: vec!["test".to_string()], - instructions: vec!["test".to_string()], - }); - let agent2 = create_agent(AgentDefinition { - id: None, - name: "test2".to_string(), - position: "test2".to_string(), - responsibilities: vec!["test2".to_string()], - instructions: vec!["test2".to_string()], - }); - - let workspace = Workspace::new(vec![Arc::new(agent), Arc::new(agent2)], WorkspaceConfig { - name: "test".to_string(), - host: "0.0.0.0".to_string(), - port: 0, - }); - workspace.run(json!({"test": "test"}).to_string().as_bytes().to_vec()).await; - } -} - diff --git a/bindings/ceylon/src/agent/agent_base.rs b/bindings/ceylon/src/agent/agent_base.rs deleted file mode 100644 index fdf0bb6d..00000000 --- a/bindings/ceylon/src/agent/agent_base.rs +++ /dev/null @@ -1,22 +0,0 @@ -use std::fmt::Debug; - -use serde::{Deserialize, Serialize}; - -#[async_trait::async_trait] -pub trait MessageHandler: Send + Sync + Debug { - async fn on_message(&self, agent_id: String, data: Vec, time: u64); -} - -#[async_trait::async_trait] -pub trait Processor: Send + Sync + Debug { - async fn run(&self, input: Vec) -> (); -} - -#[derive(Deserialize, Serialize, Debug, Clone, Default)] -pub struct AgentDefinition { - pub id: Option, - pub name: String, - pub position: String, - pub instructions: Vec, - pub responsibilities: Vec, -} \ No newline at end of file diff --git a/bindings/ceylon/src/agent/mod.rs b/bindings/ceylon/src/agent/mod.rs deleted file mode 100644 index 221b1114..00000000 --- a/bindings/ceylon/src/agent/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod agent; -pub mod workspace; -pub mod agent_base; -pub mod state; \ No newline at end of file diff --git a/bindings/ceylon/src/agent/state.rs b/bindings/ceylon/src/agent/state.rs deleted file mode 100644 index aa415d23..00000000 --- a/bindings/ceylon/src/agent/state.rs +++ /dev/null @@ -1,170 +0,0 @@ -use std::hash::{DefaultHasher, Hash, Hasher}; -use std::time::{SystemTime, UNIX_EPOCH}; - -use log::info; -use serde::{Deserialize, Serialize}; -use tokio::sync::RwLock; - -pub static SYSTEM_MESSAGE_CONTENT_TYPE: &str = "ceylon.system.message.content"; -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct Message { - pub id: String, - pub sender_id: Option, - pub sender: String, - pub content: Vec, - pub version: f64, -} - -impl Message { - pub fn new(content: Vec, sender_id: Option, sender: String) -> Self { - Self { - id: uuid::Uuid::new_v4().to_string(), - content, - version: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs_f64(), - sender_id, - sender, - } - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub enum SystemMessage { - Content(Message), - SyncRequest { versions: Vec }, - SyncResponse { messages: Vec }, - Beacon { - name: String, - sender: String, - time: f64, - sync_hash: u64, - }, -} - -impl SystemMessage { - pub fn get_id(&self) -> String { - match self { - SystemMessage::Content(message) => message.id.clone(), - SystemMessage::SyncRequest { .. } => "sync_request".to_string(), - SystemMessage::SyncResponse { .. } => "sync_response".to_string(), - SystemMessage::Beacon { .. } => "beacon".to_string(), - } - } - pub fn get_type(&self) -> String { - match self { - SystemMessage::Content(..) => SYSTEM_MESSAGE_CONTENT_TYPE.to_string(), - SystemMessage::SyncRequest { .. } => "sync_request".to_string(), - SystemMessage::SyncResponse { .. } => "sync_response".to_string(), - SystemMessage::Beacon { .. } => "beacon".to_string(), - } - } - - pub fn to_bytes(&self) -> Vec { - serde_json::to_vec(self).unwrap() - } - - pub fn from_bytes(bytes: Vec) -> Self { - serde_json::from_slice(&bytes).unwrap() - } -} - -pub type AgentStateMessageList = Vec; - -#[derive(Debug)] -pub struct AgentStateSnap { - pub messages: AgentStateMessageList, - pub last_version: f64, -} - -impl AgentStateSnap { - pub fn versions(&self) -> Vec { - self.messages - .iter() - .map(|message| message.version) - .collect() - } - - pub fn sync_hash(&self) -> u64 { - let versions = self.versions(); - let mut hash = DefaultHasher::new(); - - for version in versions { - format!("{}", version).hash(&mut hash); - } - - hash.finish() - } - - pub fn missing_versions(&self, versions: Vec) -> Vec { - let mut missing_versions = Vec::new(); - for version in versions { - if !self.versions().contains(&version) { - missing_versions.push(version); - } - } - missing_versions - } - - pub fn get_messages(&self, versions: Vec) -> Vec { - let mut messages = Vec::new(); - for version in versions { - if let Some(message) = self.messages.iter().find(|message| message.version == version) { - messages.push(message.clone()); - } - } - messages - } -} - -#[derive(Default)] -pub struct AgentState { - messages: RwLock, - last_version: RwLock, -} - -impl AgentState { - pub fn new() -> Self { - Self { - messages: RwLock::new(Vec::new()), - last_version: RwLock::new(0.0), - } - } - - pub async fn add_message(&self, message: Message) { - info!("Started add_message ordered by version"); - self.messages.write().await.push(message); - self.order_by_version().await; - let last_message = self.get_last_message().await; - let last_version = self.last_version.read().await.clone(); - info!( "Last version: {}, Last message version: {}", last_version, last_message.version); - if last_message.version > last_version { - let mut last_version = self.last_version.write().await; - *last_version = last_message.version; - } - info!("Finished add_message ordered by version"); - } - - pub async fn get_last_message(&self) -> Message { - let messages = self.messages.read().await.clone(); - messages.last().unwrap().clone() - } - - pub async fn order_by_version(&self) { - info!("Started updating ordered by version"); - let mut messages = self.messages.read().await.clone(); - let mut ordered_messages = Vec::new(); - messages.sort_by(|a, b| a.version.partial_cmp(&b.version).unwrap()); - for message in messages { - ordered_messages.push(message); - } - let mut update_messages = self.messages.write().await; - update_messages.clear(); - update_messages.extend(ordered_messages); - info!("Finished updating ordered by version"); - } - - pub async fn request_snapshot(&self) -> AgentStateSnap { - let messages = self.messages.read().await.clone(); - let last_version = self.last_version.read().await.clone(); - AgentStateSnap { messages, last_version } - } -} \ No newline at end of file diff --git a/bindings/ceylon/src/agent/workspace.rs b/bindings/ceylon/src/agent/workspace.rs deleted file mode 100644 index 2744589f..00000000 --- a/bindings/ceylon/src/agent/workspace.rs +++ /dev/null @@ -1,66 +0,0 @@ -use std::sync::Arc; - -use serde::{Deserialize, Serialize}; -use uniffi::deps::log::debug; - -use crate::AgentCore; - -#[derive(Deserialize, Serialize, Clone, Debug)] -pub struct WorkspaceConfig { - pub name: String, - pub host: String, - pub port: u16, -} - -pub struct Workspace { - id: String, - port: u16, - host: String, - _name: String, - _agents: Vec>, -} - - -impl Workspace { - pub fn new(agents: Vec>, config: WorkspaceConfig) -> Self { - let _name = config.name; - let id = format!("workspace-{}", uuid::Uuid::new_v4()); - - Self { - id, - port: config.port, - host: config.host, - _name, - _agents: agents, - } - } - - pub async fn run(&self, inputs: Vec) { - env_logger::init(); - debug!("Workspace {} running", self.id); - let rt = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build() - .unwrap(); - - let mut tasks = vec![]; - let _inputs = inputs.clone(); - for agent in self._agents.iter() { - let _inputs = _inputs.clone(); - let url = format!("/ip4/{}/udp/{}/quic-v1", self.host, self.port); - let topic = format!("workspace-{}", agent.workspace_id()); - - let agent = agent.clone(); - let task = rt.spawn(async move { - agent.start(topic, url, _inputs).await; - }); - tasks.push(task); - } - - for task in tasks { - task.await.unwrap(); - } - rt.shutdown_background(); - } -} - diff --git a/bindings/ceylon/src/ceylon.udl b/bindings/ceylon/src/ceylon.udl index eab5ec65..238914fc 100644 --- a/bindings/ceylon/src/ceylon.udl +++ b/bindings/ceylon/src/ceylon.udl @@ -1,38 +1,72 @@ namespace ceylon { string version(); + void enable_log(string level); + + void cprint(string message); }; -dictionary Message{ - string id; - string? sender_id=""; - string sender; - bytes content; - f64 version; +dictionary WorkSpaceConfig{ + string name; + u16 port = 8888; +}; + +interface WorkSpace{ + constructor(WorkSpaceConfig config); + + [Async] + void run(bytes inputs); }; -dictionary AgentDefinition{ - string? id=""; +dictionary AdminAgentConfig { string name; - string position; - sequence responsibilities; - sequence instructions; + u16 port; }; -interface AgentCore{ - constructor(AgentDefinition definition, MessageHandler on_message, Processor processor); +dictionary AgentDetail{ + string name; + string id; + string role; +}; + +interface AdminAgent{ + constructor(AdminAgentConfig config,MessageHandler on_message, Processor processor, EventHandler on_event); [Async] - AgentDefinition definition(); - string workspace_id(); + void start(bytes inputs, sequence workers); [Async] - void broadcast(bytes message); + void stop(); + [Async] - void stop(); + void broadcast(bytes message); + + AgentDetail details(); }; +dictionary WorkerAgentConfig { + string name; + string work_space_id; + string admin_peer; + string role; + u16 admin_port; +}; +interface WorkerAgent{ + constructor(WorkerAgentConfig config,MessageHandler on_message, Processor processor); + [Async] + void start(bytes inputs); + [Async] + void stop(); + + [Async] + void broadcast(bytes message); + + AgentDetail details(); + +}; + +// Handle Agents behaviours [Trait,WithForeign] interface MessageHandler { [Async] @@ -45,15 +79,8 @@ interface Processor { void run(bytes inputs); }; -dictionary WorkspaceConfig{ - string name; - string host = "/ip4/0.0.0.0/tcp/"; - u16 port = 8888; -}; - -interface Workspace{ - constructor(sequence agents,WorkspaceConfig config); - +[Trait,WithForeign] +interface EventHandler { [Async] - void run(bytes inputs); + void on_agent_connected(string topic,AgentDetail agent); }; \ No newline at end of file diff --git a/bindings/ceylon/src/lib.rs b/bindings/ceylon/src/lib.rs index 4b9bcb0b..60fe610b 100644 --- a/bindings/ceylon/src/lib.rs +++ b/bindings/ceylon/src/lib.rs @@ -1,25 +1,24 @@ -mod agent; +mod workspace; fn version() -> String { env!("CARGO_PKG_VERSION").to_string() } -pub use agent::{ - agent_base::{ - MessageHandler, - Processor, - AgentDefinition, - }, - agent::{ - AgentCore - }, - state::{ - Message - }, - workspace::{ - Workspace, - WorkspaceConfig, - }, -}; +fn cprint(val: String) { + info!( "{}", val); +} + +fn enable_log(level: String) { + let subscriber = tracing_subscriber::FmtSubscriber::builder() + .with_level(true) + .with_max_level(Level::from_str(&level).unwrap()) + .finish(); + + // use that subscriber to process traces emitted after this point + tracing::subscriber::set_global_default(subscriber).unwrap(); +} +use std::str::FromStr; +use tracing::{info, Level}; +pub use workspace::*; uniffi::include_scaffolding!("ceylon"); \ No newline at end of file diff --git a/bindings/ceylon/src/main.rs b/bindings/ceylon/src/main.rs index bf76b737..d079c7f1 100644 --- a/bindings/ceylon/src/main.rs +++ b/bindings/ceylon/src/main.rs @@ -1,89 +1,2 @@ -use std::sync::Arc; -use serde_json::json; -use tokio::io; -use tokio::io::AsyncBufReadExt; - -mod agent; -pub use agent::{ - agent::AgentCore, - agent_base::{AgentDefinition, MessageHandler, Processor}, - workspace::{Workspace, WorkspaceConfig}, - state::{AgentState, Message, SystemMessage}, -}; - -#[derive(Debug)] -struct AgentHandler { - tx_0: tokio::sync::mpsc::Sender>, -} - -#[async_trait::async_trait] -impl MessageHandler for AgentHandler { - async fn on_message(&self, agent_id: String, data: Vec, time: u64) { - println!("Agent {} received message {:?}", agent_id, String::from_utf8_lossy(&data)); - } -} - -#[async_trait::async_trait] -impl Processor for AgentHandler { - async fn run(&self, input: Vec) -> () { - println!("Agent received input {:?}", input); - let mut stdin = io::BufReader::new(io::stdin()).lines(); - loop { - tokio::select! { - line = stdin.next_line() => { - let line = line.unwrap(); - if let Some(line) = line { - println!("Entered : {}" ,line.clone()); - self.tx_0.send( - json!({ - "data": format!("Human : {}", line), - }).to_string().into_bytes() - ).await.unwrap(); - } - }, - } - } - } -} - #[tokio::main] -async fn main() { - tokio::spawn(async move { - env_logger::init(); - let (tx_0, mut rx_0) = tokio::sync::mpsc::channel::>(100); - let definition = AgentDefinition { - id: None, - name: "test".to_string(), - position: "test".to_string(), - responsibilities: vec!["test".to_string()], - instructions: vec!["test".to_string()], - }; - let ag = AgentCore::new( - definition, - Arc::new(AgentHandler { tx_0: tx_0.clone() }), - Arc::new(AgentHandler { tx_0: tx_0.clone() }), - ); - let ag_tx = ag.get_tx_0(); - tokio::spawn(async move { - ag.start( - "test_topic".to_string(), - "/ip4/0.0.0.0/udp/0/quic-v1".to_string(), - vec![], - ) - .await; - }); - - loop { - tokio::select! { - message = rx_0.recv() => { - if let Some(raw_message) = message { - let msg = SystemMessage::Content(Message::new(raw_message, None, "test".to_string())); - ag_tx.send(msg).await.unwrap(); - } - } - } - } - }) - .await - .unwrap(); -} +async fn main() {} \ No newline at end of file diff --git a/bindings/ceylon/src/workspace.rs b/bindings/ceylon/src/workspace.rs new file mode 100644 index 00000000..2e887950 --- /dev/null +++ b/bindings/ceylon/src/workspace.rs @@ -0,0 +1,27 @@ +mod workspace; +mod admin_agent; +mod agent; +mod worker_agent; +mod message; + +pub use agent::{ + MessageHandler, + Processor, + AgentDetail, + EventHandler, +}; + +pub use workspace::{ + WorkSpace, + WorkSpaceConfig, +}; + +pub use admin_agent::{ + AdminAgent, + AdminAgentConfig, +}; + +pub use worker_agent::{ + WorkerAgentConfig, + WorkerAgent, +}; diff --git a/bindings/ceylon/src/workspace/admin_agent.rs b/bindings/ceylon/src/workspace/admin_agent.rs new file mode 100644 index 00000000..bc9d0b0b --- /dev/null +++ b/bindings/ceylon/src/workspace/admin_agent.rs @@ -0,0 +1,269 @@ +use futures::future::join_all; +use std::collections::HashMap; +use std::sync::{Arc}; +use tokio::runtime::Runtime; +use tokio::sync::{Mutex, RwLock}; +use tokio::{select, signal}; +use tracing::{error, info}; + +use crate::workspace::agent::{AgentDetail, EventHandler}; +use crate::workspace::message::AgentMessage; +use crate::{MessageHandler, Processor, WorkerAgent}; +use sangedama::peer::message::data::{EventType, NodeMessage}; +use sangedama::peer::node::{ + create_key, create_key_from_bytes, get_peer_id, AdminPeer, AdminPeerConfig, +}; + +#[derive(Clone)] +pub struct AdminAgentConfig { + pub name: String, + pub port: u16, +} + +pub struct AdminAgent { + pub config: AdminAgentConfig, + + _processor: Arc>>, + _on_message: Arc>>, + _on_event: Arc>>, + + pub broadcast_emitter: tokio::sync::mpsc::Sender>, + pub broadcast_receiver: Arc>>>, + + runtime: Runtime, + _peer_id: String, + + _key: Vec, +} + +impl AdminAgent { + pub fn new( + config: AdminAgentConfig, + on_message: Arc, + processor: Arc, + on_event: Arc, + ) -> Self { + let (broadcast_emitter, broadcast_receiver) = tokio::sync::mpsc::channel::>(100); + + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(); + let admin_peer_key = create_key(); + let id = get_peer_id(&admin_peer_key).to_string(); + + Self { + config, + _on_message: Arc::new(Mutex::new(on_message)), + _processor: Arc::new(Mutex::new(processor)), + _on_event: Arc::new(Mutex::new(on_event)), + + broadcast_emitter, + broadcast_receiver: Arc::new(Mutex::new(broadcast_receiver)), + + runtime: rt, + + _peer_id: id, + + _key: admin_peer_key.to_protobuf_encoding().unwrap(), + } + } + + pub async fn broadcast(&self, message: Vec) { + let node_message = AgentMessage::NodeMessage { message }; + match self.broadcast_emitter.send(node_message.to_bytes()).await { + Ok(_) => {} + Err(_) => { + error!("Failed to send broadcast message"); + } + } + } + + pub async fn start(&self, inputs: Vec, agents: Vec>) { + self.run_(inputs, agents).await; + } + + pub async fn stop(&self) { + info!("Agent {} stop called", self.config.name); + } + + pub fn details(&self) -> AgentDetail { + AgentDetail { + name: self.config.name.clone(), + id: self._peer_id.clone(), + role: "admin".to_string(), + } + } + async fn run_(&self, inputs: Vec, agents: Vec>) { + info!("Agent {} running", self.config.name); + + let worker_details: RwLock> = RwLock::new(HashMap::new()); + + let config = self.config.clone(); + let admin_config = AdminPeerConfig::new(config.port, config.name.clone()); + + let peer_key = create_key_from_bytes(self._key.clone()); + + let (mut peer_, mut peer_listener_) = + AdminPeer::create(admin_config.clone(), peer_key).await; + + if peer_.id == self._peer_id { + info!("Admin peer created {}", peer_.id.clone()); + } else { + panic!("Id mismatch"); + } + let admin_id = peer_.id.clone(); + let admin_emitter = peer_.emitter(); + + let mut is_request_to_shutdown = false; + + let task_admin = self.runtime.spawn(async move { + peer_.run(None).await; + }); + + + let mut worker_tasks = vec![]; + + let _inputs = inputs.clone(); + let admin_id_ = admin_id.clone(); + for agent in agents { + let _inputs_ = _inputs.clone(); + let agent_ = agent.clone(); + let _admin_id_ = admin_id_.clone(); + let mut config = agent_.config.clone(); + config.admin_peer = _admin_id_.clone(); + let tasks = agent_ + .run_with_config(_inputs_.clone(), config, self.runtime.handle()) + .await; + let agent_detail = agent_.details(); + + worker_details + .write() + .await + .insert(agent_detail.id.clone(), agent_detail); + + for task in tasks { + worker_tasks.push(task); + } + } + + error!("Worker tasks created"); + + let worker_tasks = join_all(worker_tasks); + + + let name = self.config.name.clone(); + let on_message = self._on_message.clone(); + let on_event = self._on_event.clone(); + let task_admin_listener = self.runtime.spawn(async move { + loop { + if is_request_to_shutdown { + break; + } + select! { + event = peer_listener_.recv() => { + if let Some(event) = event { + match event { + NodeMessage::Message{ data, created_by, time} => { + let agent_message = AgentMessage::from_bytes(data); + + match agent_message { + AgentMessage::NodeMessage { message } => { + on_message.lock().await.on_message( + created_by, + message, + time + ).await; + } + _ => { + info!("Agent listener {:?}", agent_message); + } + } + } + NodeMessage::Event { + event, + .. + }=>{ + match event{ + EventType::Subscribe{ + peer_id, + topic, + }=>{ + if let Some(agent) = worker_details.read().await.get(&peer_id) { + let agent = agent.clone(); + on_event.lock().await.on_agent_connected(topic,agent) + .await; + } + } + _ => { + info!("Admin Received Event {:?}", event); + } + } + } + } + } + } + } + } + }); + + let processor = self._processor.clone(); + let processor_input_clone = inputs.clone(); + let run_process = self.runtime.spawn(async move { + processor.lock().await.run(processor_input_clone).await; + loop { + if is_request_to_shutdown { + break; + } + } + }); + + let broadcast_receiver = self.broadcast_receiver.clone(); + let run_broadcast = self.runtime.spawn(async move { + loop { + if is_request_to_shutdown { + break; + } + if let Some(raw_data) = broadcast_receiver.lock().await.recv().await { + info!("Agent broadcast {:?}", raw_data); + admin_emitter.send(raw_data).await.unwrap(); + } + } + }); + + + self.runtime + .spawn(async move { + select! { + _ = worker_tasks => { + info!("Agent {} task_admin_listener done", name); + } + _ = task_admin => { + info!("Agent {} task_admin done", name); + } + _ = task_admin_listener => { + info!("Agent {} task_admin_listener done", name); + } + _ = run_process => { + info!("Agent {} run_process done", name); + } + _ = run_broadcast => { + info!("Agent {} run_broadcast done", name); + } + _ = signal::ctrl_c() => { + println!("Agent {:?} received exit signal", name); + // Perform any necessary cleanup here + is_request_to_shutdown = true; + } + } + }) + .await + .unwrap(); + + loop { + if is_request_to_shutdown { + break; + } + } + } +} diff --git a/bindings/ceylon/src/workspace/agent.rs b/bindings/ceylon/src/workspace/agent.rs new file mode 100644 index 00000000..12d9b630 --- /dev/null +++ b/bindings/ceylon/src/workspace/agent.rs @@ -0,0 +1,30 @@ +use std::fmt::Debug; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AgentDetail { + pub name: String, + pub id: String, + pub role: String, +} + + +#[async_trait::async_trait] +pub trait AgentBase { + async fn run_(&self, inputs: Vec); +} + +#[async_trait::async_trait] +pub trait MessageHandler: Send + Sync + Debug { + async fn on_message(&self, agent_id: String, data: Vec, time: u64); +} + +#[async_trait::async_trait] +pub trait Processor: Send + Sync + Debug { + async fn run(&self, input: Vec) -> (); +} + +#[async_trait::async_trait] +pub trait EventHandler: Send + Sync + Debug { + async fn on_agent_connected(&self, topic: String, agent: AgentDetail) -> (); +} \ No newline at end of file diff --git a/bindings/ceylon/src/workspace/message.rs b/bindings/ceylon/src/workspace/message.rs new file mode 100644 index 00000000..d1a1a1cb --- /dev/null +++ b/bindings/ceylon/src/workspace/message.rs @@ -0,0 +1,21 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] +pub enum AgentMessage { + SystemMessage { + message: Vec + }, + NodeMessage { + message: Vec + }, +} + +impl AgentMessage { + pub fn to_bytes(&self) -> Vec { + serde_json::to_vec(self).unwrap() + } + + pub fn from_bytes(bytes: Vec) -> Self { + serde_json::from_slice(&bytes).unwrap() + } +} \ No newline at end of file diff --git a/bindings/ceylon/src/workspace/worker_agent.rs b/bindings/ceylon/src/workspace/worker_agent.rs new file mode 100644 index 00000000..3f796d91 --- /dev/null +++ b/bindings/ceylon/src/workspace/worker_agent.rs @@ -0,0 +1,171 @@ +use serde::{Deserialize, Serialize}; +use std::sync::{Arc}; +use tokio::sync::Mutex; +use tokio::{select}; +use tokio::runtime::{Handle}; +use tokio::task::JoinHandle; +use tracing::{error, info}; + +use crate::workspace::agent::AgentDetail; +use crate::workspace::message::AgentMessage; +use crate::{MessageHandler, Processor}; +use sangedama::peer::message::data::NodeMessage; +use sangedama::peer::node::{ + create_key, create_key_from_bytes, get_peer_id, MemberPeer, MemberPeerConfig, +}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WorkerAgentConfig { + pub name: String, + pub role: String, + pub work_space_id: String, + pub admin_peer: String, + pub admin_port: u16, +} + +pub struct WorkerAgent { + pub config: WorkerAgentConfig, + + _processor: Arc>>, + _on_message: Arc>>, + + pub broadcast_emitter: tokio::sync::mpsc::Sender>, + pub broadcast_receiver: Arc>>>, + + _peer_id: String, + _key: Vec, +} + +impl WorkerAgent { + pub fn new( + config: WorkerAgentConfig, + on_message: Arc, + processor: Arc, + ) -> Self { + let (broadcast_emitter, broadcast_receiver) = tokio::sync::mpsc::channel::>(100); + let admin_peer_key = create_key(); + let id = get_peer_id(&admin_peer_key).to_string(); + Self { + config, + _processor: Arc::new(Mutex::new(processor)), + _on_message: Arc::new(Mutex::new(on_message)), + + broadcast_emitter, + broadcast_receiver: Arc::new(Mutex::new(broadcast_receiver)), + + _peer_id: id, + _key: admin_peer_key.to_protobuf_encoding().unwrap(), + } + } + pub async fn broadcast(&self, message: Vec) { + let node_message = AgentMessage::NodeMessage { message }; + let message = node_message.to_bytes(); + + match self.broadcast_emitter.send(message).await { + Ok(_) => {} + Err(_) => { + error!("Failed to send broadcast message"); + } + } + } + pub async fn start(&self, _: Vec) { + info!("Not yet implemented"); + // self.run_with_config(inputs, self.config.clone()).await; + } + + pub async fn stop(&self) { + info!("Agent {} stop called", self.config.name); + } + + pub fn details(&self) -> AgentDetail { + AgentDetail { + name: self.config.name.clone(), + id: self._peer_id.clone(), + role: self.config.role.clone(), + } + } +} + +impl WorkerAgent { + pub async fn run_with_config(&self, inputs: Vec, worker_agent_config: WorkerAgentConfig, runtime: &Handle) -> Vec> { + info!("Agent {} running", self.config.name); + + let config = worker_agent_config.clone(); + let member_config = MemberPeerConfig::new( + config.name.clone(), + config.work_space_id.clone(), + config.admin_peer.clone(), + config.admin_port, + ); + let peer_key = create_key_from_bytes(self._key.clone()); + let (mut peer_, mut peer_listener_) = + MemberPeer::create(member_config.clone(), peer_key).await; + if peer_.id == self._peer_id { + info!("Worker peer created {}", peer_.id.clone()); + } else { + panic!("Id mismatch"); + } + let peer_emitter = peer_.emitter(); + + let is_request_to_shutdown = false; + + let task_admin = runtime.spawn(async move { + peer_.run().await; + }); + + let on_message = self._on_message.clone(); + let task_admin_listener = runtime.spawn(async move { + loop { + if is_request_to_shutdown { + break; + } + select! { + event = peer_listener_.recv() => { + if let Some(event) = event { + match event { + NodeMessage::Message{ data, created_by, time} => { + let agent_message = AgentMessage::from_bytes(data); + + match agent_message { + AgentMessage::NodeMessage { message } => { + on_message.lock().await.on_message( + created_by, + message, + time + ).await; + } + _ => { + info!("Agent listener {:?}", agent_message); + } + } + } + _ => { + info!("Agent listener {:?}", event); + } + } + } + } + } + } + }); + + let processor = self._processor.clone(); + let run_process = runtime.spawn(async move { + processor.lock().await.run(inputs).await; + }); + + let broadcast_receiver = self.broadcast_receiver.clone(); + let run_broadcast = runtime.spawn(async move { + loop { + if is_request_to_shutdown { + break; + } + if let Some(raw_data) = broadcast_receiver.lock().await.recv().await { + peer_emitter.send(raw_data).await.unwrap(); + } + } + }); + + vec![task_admin, task_admin_listener, run_process, run_broadcast] + } +} diff --git a/bindings/ceylon/src/workspace/workspace.rs b/bindings/ceylon/src/workspace/workspace.rs new file mode 100644 index 00000000..5f16df46 --- /dev/null +++ b/bindings/ceylon/src/workspace/workspace.rs @@ -0,0 +1,19 @@ +use tracing::info; + +pub struct WorkSpaceConfig { + pub name: String, + pub port: u16, +} +pub struct WorkSpace { + pub config: WorkSpaceConfig, +} + +impl WorkSpace { + pub fn new(config: WorkSpaceConfig) -> WorkSpace { + WorkSpace { config } + } + + pub async fn run(&self, _: Vec) { + info!("Workspace {} running", self.config.name); + } +} diff --git a/bindings/ceylon/tests/single_llm_agent.py b/bindings/ceylon/tests/single_llm_agent.py new file mode 100644 index 00000000..e8c5f680 --- /dev/null +++ b/bindings/ceylon/tests/single_llm_agent.py @@ -0,0 +1,49 @@ +import asyncio + +from langchain_community.chat_models import ChatOllama + +from ceylon import AgentRunner +from ceylon.llm.llm_agent import LLMAgent + + +async def main(): + runner = AgentRunner(workspace_name="ceylon-ai") + llm_lib = ChatOllama(model="llama3:instruct") + # llm_lib = ChatOpenAI(model="gpt-4o") + runner.register_agent(LLMAgent( + name="writer", + position="Assistant Writer", + llm=llm_lib, + responsibilities=["Create high-quality, original content that matches the audience's tone and style."], + instructions=[ + "Ensure clarity, accuracy, and proper formatting while respecting ethical guidelines and privacy."] + )) + runner.register_agent(LLMAgent( + name="resercher", + position="Assistant Writer", + llm=llm_lib, + responsibilities=["Create high-quality, original content that matches the audience's tone and style."], + instructions=[ + "Ensure clarity, accuracy, and proper formatting while respecting ethical guidelines and privacy."] + )) + + await runner.run( + { + "request": "I want to create a blog post", + "title": "How to use AI for Machine Learning", + "tone": "informal", + "length": "large", + "style": "creative" + }, + network={ + "name_chooser": [], + "researcher": [], + "writer": ["researcher"], + "editor": ["writer"], + "publisher": ["editor", "name_chooser"] + } + ) + + +if __name__ == '__main__': + asyncio.run(main()) diff --git a/bindings/ceylon/tests/state_machine/__init__.py b/bindings/ceylon/tests/state_machine/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/bindings/ceylon/tests/state_machine/guess_number.py b/bindings/ceylon/tests/state_machine/guess_number.py new file mode 100644 index 00000000..aee052f2 --- /dev/null +++ b/bindings/ceylon/tests/state_machine/guess_number.py @@ -0,0 +1,135 @@ +import asyncio +import random +import sys + +from statemachine import State +from statemachine import StateMachine + + +class GuessTheNumberMachine(StateMachine): + """ + Guess the number machine. + + This docstring exercises the SAME `GuessTheNumberMachine` in syncronous code. + + >>> sm = GuessTheNumberMachine(print, seed=103) + I'm thinking of a number between 1 and 5. Can you guess what it is? >>> + + >>> while not sm.current_state.final: + ... sm.send("guess", random.randint(1, 5)) + Your guess is 2... + Too low. Try again. >>> + Your guess is 1... + Too low. Try again. >>> + Your guess is 5... + Too high. Try again. >>> + Your guess is 1... + Too low. Try again. >>> + Your guess is 4... + Congratulations, you guessed the number in 5 guesses! + + """ + + start = State(initial=True) + low = State() + high = State() + won = State(final=True) + lose = State(final=True) + + guess = ( + lose.from_(low, high, cond="max_guesses_reached") + | won.from_(low, high, start, cond="guess_is_equal") + | low.from_(low, high, start, cond="guess_is_lower") + | high.from_(low, high, start, cond="guess_is_higher") + ) + + def __init__(self, writer, max_attempts=5, lower=1, higher=5, seed=42): + self.writer = writer + self.max_attempts = max_attempts + self.lower = lower + self.higher = higher + self.guesses = 0 + + # lets play a not so random game, or our tests will be crazy + random.seed(seed) + self.number = random.randint(self.lower, self.higher) + super().__init__() + + async def max_guesses_reached(self): + return self.guesses >= self.max_attempts + + async def before_guess(self, number): + self.guesses += 1 + self.writer(f"Your guess is {number}...") + + async def guess_is_lower(self, number): + return number < self.number + + async def guess_is_higher(self, number): + return number > self.number + + async def guess_is_equal(self, number): + return self.number == number + + async def on_enter_start(self): + self.writer( + f"I'm thinking of a number between {self.lower} and {self.higher}. " + f"Can you guess what it is? >>> " + ) + + async def on_enter_low(self): + self.writer("Too low. Try again. >>> ") + + async def on_enter_high(self): + self.writer("Too high. Try again. >>> ") + + async def on_enter_won(self): + self.writer(f"Congratulations, you guessed the number in {self.guesses} guesses!") + + async def on_enter_lose(self): + self.writer(f"Oh, no! You've spent all your {self.guesses} attempts!") + + +# This function will be used to connect the stdin and stdout to the asyncio event loop. + + +async def connect_stdin_stdout(): + loop = asyncio.get_event_loop() + reader = asyncio.StreamReader() + protocol = asyncio.StreamReaderProtocol(reader) + await loop.connect_read_pipe(lambda: protocol, sys.stdin) + w_transport, w_protocol = await loop.connect_write_pipe( + asyncio.streams.FlowControlMixin, sys.stdout + ) + writer = asyncio.StreamWriter(w_transport, w_protocol, reader, loop) + return reader, writer + + +async def main_async(): + reader, writer = await connect_stdin_stdout() + sm = GuessTheNumberMachine( + lambda s: writer.write(b"\n" + s.encode("utf-8")), seed=random.randint(1, 1000) + ) + await sm.activate_initial_state() + while not sm.current_state.final: + res = await reader.read(100) + if not res: + break + await sm.send("guess", int(res)) + await writer.drain() + writer.close() + + +def main_sync(): + sm = GuessTheNumberMachine(print, seed=random.randint(1, 1000), higher=100) + sm.activate_initial_state() + while not sm.current_state.final: + res = sys.stdin.readline() + if not res: + break + sm.send("guess", int(res)) + + +if __name__ == '__main__': + main_sync() + # asyncio.run(main_async()) diff --git a/bindings/ceylon/tests/state_machine/order_state.py b/bindings/ceylon/tests/state_machine/order_state.py new file mode 100644 index 00000000..89e7943e --- /dev/null +++ b/bindings/ceylon/tests/state_machine/order_state.py @@ -0,0 +1,44 @@ +from statemachine import StateMachine, State + + +class OrderControl(StateMachine): + waiting_for_payment = State(initial=True) + processing = State() + shipping = State() + completed = State(final=True) + + add_to_order = waiting_for_payment.to(waiting_for_payment) + receive_payment = ( + waiting_for_payment.to(processing, cond="payments_enough") + | waiting_for_payment.to(waiting_for_payment, unless="payments_enough") + ) + process_order = processing.to(shipping, cond="payment_received") + ship_order = shipping.to(completed) + + def __init__(self): + self.order_total = 0 + self.payments = [] + self.payment_received = False + super(OrderControl, self).__init__() + + def payments_enough(self, amount): + return sum(self.payments) + amount >= self.order_total + + def before_add_to_order(self, amount): + self.order_total += amount + return self.order_total + + def before_receive_payment(self, amount): + self.payments.append(amount) + return self.payments + + def after_receive_payment(self): + self.payment_received = True + + def on_enter_waiting_for_payment(self): + self.payment_received = False + + +control = OrderControl() + +control.add_to_order(3) diff --git a/bindings/ceylon/tests/state_machine/state_sync.py b/bindings/ceylon/tests/state_machine/state_sync.py new file mode 100644 index 00000000..ab0ee44d --- /dev/null +++ b/bindings/ceylon/tests/state_machine/state_sync.py @@ -0,0 +1,25 @@ +import asyncio + +from statemachine import StateMachine, State + + +class AsyncStateMachine(StateMachine): + initial = State('Initial', initial=True) + final = State('Final', final=True) + + keep = initial.to.itself(internal=True) + advance = initial.to(final) + + async def on_advance(self): + return 42 + + +async def run_sm(): + sm = AsyncStateMachine() + result = await sm.advance() + print(f"Result is {result}") + print(sm.current_state) + + +if __name__ == '__main__': + asyncio.run(run_sm()) diff --git a/bindings/ceylon/tests/state_machine/trafic_light.py b/bindings/ceylon/tests/state_machine/trafic_light.py new file mode 100644 index 00000000..b89167a1 --- /dev/null +++ b/bindings/ceylon/tests/state_machine/trafic_light.py @@ -0,0 +1,64 @@ +""" + +--------------------- +Traffic light machine +--------------------- + +This example demonstrates how to create a traffic light machine using the `statemachine` library. + +The state machine will run in a dedicated thread and will cycle through the states. + +""" + +import time +from threading import Event as ThreadingEvent +from threading import Thread + +from statemachine import State +from statemachine import StateMachine + + +class TrafficLightMachine(StateMachine): + "A traffic light machine" + + green = State(initial=True) + yellow = State() + red = State() + + cycle = green.to(yellow) | yellow.to(red) | red.to(green) + + def before_cycle(self, event: str, source: State, target: State): + print(f"Running {event} from {source.id} to {target.id}") + + +# %% +# Run in a dedicated thread + + +class Supervisor: + def __init__(self, sm: StateMachine, sm_event: str): + self.sm = sm + self.sm_event = sm_event + self.stop_event = ThreadingEvent() + + def run(self): + while not self.stop_event.is_set(): + self.sm.send(self.sm_event) + self.stop_event.wait(0.1) + + def stop(self): + self.stop_event.set() + + +def main(): + supervisor = Supervisor(TrafficLightMachine(), "cycle") + t = Thread(target=supervisor.run) + t.start() + + time.sleep(0.5) + supervisor.stop() + t.join() + + +if __name__ == "__main__": + main() diff --git a/bindings/ceylon/tests/state_machine/user_signup.py b/bindings/ceylon/tests/state_machine/user_signup.py new file mode 100644 index 00000000..88f1abfd --- /dev/null +++ b/bindings/ceylon/tests/state_machine/user_signup.py @@ -0,0 +1,113 @@ +from dataclasses import dataclass +from enum import Enum + +from statemachine import State +from statemachine import StateMachine +from statemachine.states import States + + +class UserStatus(str, Enum): + signup_incomplete = "SIGNUP_INCOMPLETE" + signup_complete = "SIGNUP_COMPLETE" + signup_rejected = "SIGNUP_REJECTED" + operational_enabled = "OPERATIONAL_ENABLED" + operational_disabled = "OPERATIONAL_DISABLED" + operational_rescinded = "OPERATIONAL_RESCINDED" + + +class UserExperience(str, Enum): + basic = "BASIC" + premium = "PREMIUM" + + +@dataclass +class User: + name: str + email: str + status: UserStatus = UserStatus.signup_incomplete + experience: UserExperience = UserExperience.basic + + verified: bool = False + + def __post_init__(self): + self._status_sm = UserStatusMachine( + self, state_field="status", listeners=[MachineChangeListenter()] + ) + self._status_sm.bind_events_to(self) + + self._experience_sm = UserExperienceMachine( + self, state_field="experience", listeners=[MachineChangeListenter()] + ) + self._experience_sm.bind_events_to(self) + + +class MachineChangeListenter: + def before_transition(self, event: str, state: State): + print(f"Before {event} in {state}") + + def on_enter_state(self, state: State, event: str): + print(f"Entering {state} from {event}") + + +class UserStatusMachine(StateMachine): + _states = States.from_enum( + UserStatus, + initial=UserStatus.signup_incomplete, + final=[ + UserStatus.operational_rescinded, + UserStatus.signup_rejected, + ], + ) + + signup = _states.signup_incomplete.to(_states.signup_complete) + reject = _states.signup_rejected.from_( + _states.signup_incomplete, + _states.signup_complete, + ) + enable = _states.signup_complete.to(_states.operational_enabled) + disable = _states.operational_enabled.to(_states.operational_disabled) + rescind = _states.operational_rescinded.from_( + _states.operational_enabled, + _states.operational_disabled, + ) + + def on_signup(self, token: str): + if token == "": + raise ValueError("Token is required") + self.model.verified = True + + +class UserExperienceMachine(StateMachine): + _states = States.from_enum( + UserExperience, + initial=UserExperience.basic, + ) + + upgrade = _states.basic.to(_states.premium) + downgrade = _states.premium.to(_states.basic) + + +def main(): # type: ignore[attr-defined] + # By binding the events to the User model, the events can be fired directly from the model + user = User(name="Frodo", email="frodo@lor.com") + + try: + # Trying to signup with an empty token should raise an exception + user.signup("") + except Exception as e: + print(e) + + assert user.verified is False + + user.signup("1234") + + assert user.status == UserStatus.signup_complete + assert user.verified is True + + print(user.experience) + user.upgrade() + print(user.experience) + + +if __name__ == "__main__": + main() diff --git a/bindings/ceylon/tests/test_agents.py b/bindings/ceylon/tests/test_agents.py new file mode 100644 index 00000000..60eb3ffb --- /dev/null +++ b/bindings/ceylon/tests/test_agents.py @@ -0,0 +1,110 @@ +import asyncio +import pickle + +from pydantic.dataclasses import dataclass + +from ceylon import AgentRunner +from ceylon.ceylon import AgentCore, MessageHandler, Processor, AgentDefinition +from ceylon.runner import RunnerInput + + +@dataclass +class GameBoard: + grid: list + + def __str__(self): + return str(self.grid) + + +def find_position(grid, item): + for row_idx, row in enumerate(grid): + for col_idx, cell in enumerate(row): + if cell == item: + return row_idx, col_idx + return None + + +class SimpleAgent(AgentCore, MessageHandler, Processor): + + def __init__(self, name): + super().__init__(definition=AgentDefinition( + name=name, + position="player", + instructions=[], + responsibilities=[] + ), on_message=self, processor=self) + self.goal = None + self.agent_position = None + + async def run(self, inputs: "bytes"): + agent_def = await self.definition() + input_request: RunnerInput = pickle.loads(inputs) + game_board: GameBoard = input_request.request + + if not self.goal or not self.agent_position: + self.goal = find_position(game_board.grid, 'G') + self.agent_position = find_position(game_board.grid, 'A') + + await self.move(game_board) + print(agent_def.name, game_board) + + await self.broadcast(pickle.dumps(game_board)) + + async def on_message(self, agent_id: "str", data: "bytes", time: "int"): + game_board: GameBoard = pickle.loads(data) + print("Game board", game_board) + + await self.move(game_board) + await self.broadcast(pickle.dumps(game_board)) + + async def move(self, game_board): + row, col = self.agent_position + goal_row, goal_col = self.goal + + # Determine the direction to move + if row < goal_row and game_board.grid[row + 1][col] != 1: + new_position = (row + 1, col) + elif row > goal_row and game_board.grid[row - 1][col] != 1: + new_position = (row - 1, col) + elif col < goal_col and game_board.grid[row][col + 1] != 1: + new_position = (row, col + 1) + elif col > goal_col and game_board.grid[row][col - 1] != 1: + new_position = (row, col - 1) + else: + new_position = self.agent_position # No move possible + + # Update the agent's position + await self.update_position(game_board, new_position) + + async def update_position(self, game_board, new_position): + old_row, old_col = self.agent_position + new_row, new_col = new_position + + game_board.grid[old_row][old_col] = 0 + game_board.grid[new_row][new_col] = 'A' + self.agent_position = new_position + + print(f"Agent {(await self.definition()).name} moved from ({old_row}, {old_col}) to ({new_row}, {new_col})") + + +async def main(): + worker = AgentRunner(workspace_name="ceylon-ai") + + worker.register_agent(SimpleAgent("agent_1")) + worker.register_agent(SimpleAgent("agent_2")) + + game_board = GameBoard( + grid=[ + [0, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 1, 0], + [0, 1, 0, 0, 0], + ['A', 0, 0, 1, 'G'] + ] + ) + print(game_board) + await worker.run(game_board) + + +if __name__ == '__main__': + asyncio.run(main()) diff --git a/bindings/ceylon/tests/test_worker/__init__.py b/bindings/ceylon/tests/test_worker/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/bindings/ceylon/tests/test_worker/test.py b/bindings/ceylon/tests/test_worker/test.py new file mode 100644 index 00000000..a75d64f0 --- /dev/null +++ b/bindings/ceylon/tests/test_worker/test.py @@ -0,0 +1,37 @@ +import asyncio +import pickle + +from ceylon.ceylon import enable_log, uniffi_set_event_loop +from ceylon.workspace.admin import Admin +from ceylon.workspace.worker import Worker + + +async def main(): + admin = Admin( + name="admin", + port=8000 + ) + worker1 = Worker( + name="worker1", + admin_port=8000, + admin_peer="admin", + workspace_id="admin" + ) + worker2 = Worker( + name="worker2", + admin_port=8000, + admin_peer="admin", + workspace_id="admin" + ) + + await admin.run_admin(pickle.dumps({ + "title": "How to use AI for Machine Learning", + }), [ + worker1, + worker2 + ]) + + +if __name__ == '__main__': + enable_log("INFO") + asyncio.run(main()) diff --git a/bindings/ceylon/tests/test_worker/test_game_board.py b/bindings/ceylon/tests/test_worker/test_game_board.py new file mode 100644 index 00000000..122ac1cf --- /dev/null +++ b/bindings/ceylon/tests/test_worker/test_game_board.py @@ -0,0 +1,80 @@ +import random + + +# Define the roles and their abilities +class Role: + def __init__(self, name, ability): + self.name = name + self.ability = ability + + +class Player: + def __init__(self, name, role): + self.name = name + self.role = role + self.clues = [] + self.position = 'Foyer' + + def move(self, new_position): + self.position = new_position + print(f"{self.name} moves to the {new_position}") + + def find_clue(self, clue): + + self.clues.append(clue) + print(f"{self.name} found a clue: {clue}") + + +# Define the mansion layout and clues +rooms = ['Library', 'Lab', 'Attic', 'Kitchen', 'Garden'] +clues = { + 'Library': 'Ancient Book', + 'Lab': 'Strange Chemical', + 'Attic': 'Old Journal', + 'Kitchen': 'Hidden Key', + 'Garden': 'Mysterious Map' +} + +# Define the players +roles = [ + Role('Detective', 'Can ask detailed questions about clues'), + Role('Scientist', 'Can analyze clues for additional insights'), + Role('Historian', 'Knows the mansion\'s background'), + Role('Locksmith', 'Can open hidden rooms and safes'), + Role('Psychic', 'Can sense hidden items and passages'), + Role('Thief', 'Can retrieve items from locked areas') +] + +players = [ + Player('Alice', roles[0]), + Player('Bob', roles[1]), + Player('Charlie', roles[2]), + Player('Diana', roles[3]), + Player('Eve', roles[4]), + Player('Frank', roles[5]) +] + + +# Simulate a few turns +def simulate_game_turns(turns): + for turn in range(turns): + print(f"\nTurn {turn + 1}") + for player in players: + # Randomly move the player to a new room + new_room = random.choice(rooms) + player.move(new_room) + + # Check if there's a clue in the room + if new_room in clues: + player.find_clue(clues[new_room]) + # Remove the clue from the room (assuming it's found only once) + del clues[new_room] + + +# Run the simulation for 5 turns +simulate_game_turns(5) + +# Print the final state +print("\nFinal State:") +for player in players: + print(f"{player.name} ({player.role.name}) has clues: {player.clues}") diff --git a/libs/sangathika/Cargo.toml b/libs/sangathika/Cargo.toml deleted file mode 100644 index 0bf87f55..00000000 --- a/libs/sangathika/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "sangathika" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -serde = { version = "1.0.203", features = ["derive"] } -sha2 = "0.11.0-pre.3" -hex = "0.4.3" -tokio = { version = "1.17.0", features = ["full"] } -time = "0.3.36" - -libp2p = { version = "0.53.2", features = ["tokio", "gossipsub", "mdns", "noise", "macros", "tcp", "yamux", "quic", "tls", "relay", "websocket", "dns"] } -serde_json = "1.0.117" diff --git a/libs/sangathika/src/blockchain.rs b/libs/sangathika/src/blockchain.rs deleted file mode 100644 index df188a6b..00000000 --- a/libs/sangathika/src/blockchain.rs +++ /dev/null @@ -1,97 +0,0 @@ -use std::time::{SystemTime, UNIX_EPOCH}; - -use hex; -use serde::{Deserialize, Serialize}; -use sha2::{Digest, Sha256}; - -type BlockData = Vec; -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct Block { - index: u64, - timestamp: u128, - previous_hash: String, - hash: String, - data: BlockData, - nonce: u64, -} - -impl Block { - fn new(index: u64, previous_hash: String, data: BlockData) -> Self { - let timestamp = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Time went backwards") - .as_millis(); - let mut block = Block { - index, - timestamp, - previous_hash, - hash: String::new(), - data, - nonce: 0, - }; - block.hash = block.calculate_hash(); - block - } - - fn calculate_hash(&self) -> String { - let mut hasher = Sha256::new(); - hasher.update(self.index.to_string()); - hasher.update(self.timestamp.to_string()); - hasher.update(&self.previous_hash); - hasher.update(&self.data); - hasher.update(self.nonce.to_string()); - let result = hasher.finalize(); - hex::encode(result) - } - - fn mine_block(&mut self, difficulty: usize) { - let target = "0".repeat(difficulty); - while &self.hash[..difficulty] != target { - self.nonce += 1; - self.hash = self.calculate_hash(); - } - } -} - -pub struct Blockchain { - blocks: Vec, - difficulty: usize, -} - -impl Blockchain { - pub(crate) fn new(difficulty: usize) -> Self { - let mut blockchain = Blockchain { - blocks: vec![Block::new(0, String::from("0"), String::from("Genesis Block").into_bytes())], - difficulty, - }; - blockchain.blocks[0].hash = blockchain.blocks[0].calculate_hash(); - blockchain - } - - pub(crate) fn add_block(&mut self, data: BlockData) { - let previous_block = &self.blocks[self.blocks.len() - 1]; - let mut new_block = Block::new(self.blocks.len() as u64, previous_block.hash.clone(), data); - new_block.mine_block(self.difficulty); - self.blocks.push(new_block); - } - - pub(crate) fn is_valid(&self) -> bool { - for i in 1..self.blocks.len() { - let current_block = &self.blocks[i]; - let previous_block = &self.blocks[i - 1]; - - if current_block.hash != current_block.calculate_hash() { - return false; - } - if current_block.previous_hash != previous_block.hash { - return false; - } - } - true - } - - pub fn get_last(&self) -> &Block { - self.blocks.last().unwrap() - } -} - diff --git a/libs/sangathika/src/main.rs b/libs/sangathika/src/main.rs deleted file mode 100644 index 5847c39b..00000000 --- a/libs/sangathika/src/main.rs +++ /dev/null @@ -1,29 +0,0 @@ -use tokio::sync::RwLock; -use crate::peer::P2P; - -mod blockchain; -mod peer; - -#[tokio::main] -async fn main() { - tokio::spawn(async move { - let mut p2p = P2P::new().await; - p2p.connect("/ip4/0.0.0.0/udp/0/quic-v1", "test_topic"); - p2p.run().await; - }).await.unwrap(); - - - // let blockchain1 = Mutex::new(Blockchain::new(4)); - // let interval = Duration::from_secs(10); - // - // tokio::spawn(async move { - // loop { - // let mut blockchain = blockchain1.lock().await; - // blockchain.add_block("Some new data".to_string().into_bytes()); - // println!("Mined a new block: {:?}", blockchain.get_last()); - // println!("Blockchain valid: {}", blockchain.is_valid()); - // - // sleep(interval).await; - // } - // }).await.unwrap(); -} diff --git a/libs/sangathika/src/peer.rs b/libs/sangathika/src/peer.rs deleted file mode 100644 index ab256092..00000000 --- a/libs/sangathika/src/peer.rs +++ /dev/null @@ -1,216 +0,0 @@ -use std::hash::{DefaultHasher, Hash, Hasher}; -use std::time::Duration; - -use libp2p::core::muxing::StreamMuxerBox; -use libp2p::core::transport::dummy::DummyTransport; -use libp2p::{ - futures::StreamExt, - gossipsub, mdns, noise, - swarm::{NetworkBehaviour, Swarm, SwarmEvent}, - tcp, tls, yamux, PeerId, SwarmBuilder, -}; -use serde::{Deserialize, Serialize}; -use tokio::io::{self, AsyncBufReadExt}; -use tokio::sync::Mutex; - -use crate::blockchain::{Block, Blockchain}; - -#[derive(Debug, Serialize, Deserialize, Clone)] -struct BlockchainMessage { - sender: String, - blocks: Vec, -} - -#[derive(NetworkBehaviour)] -struct BlockBehaviour { - gossipsub: gossipsub::Behaviour, - mdns: mdns::tokio::Behaviour, -} - -enum Event { - Gossipsub(gossipsub::Event), - Mdns(mdns::Event), -} - -impl BlockBehaviour { - fn new(key: &libp2p::identity::Keypair) -> Self { - let message_id_fn = |message: &gossipsub::Message| { - let mut s = DefaultHasher::new(); - message.data.hash(&mut s); - gossipsub::MessageId::from(s.finish().to_string()) - }; - - // Set a custom gossipsub configuration - let gossipsub_config = gossipsub::ConfigBuilder::default() - .history_length(10) - .history_gossip(10) - .heartbeat_interval(Duration::from_secs(1)) // This is set to aid debugging by not cluttering the log space - .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message signing) - .message_id_fn(message_id_fn) // content-address messages. No two messages of the same content will be propagated. - .build() - .map_err(|msg| io::Error::new(io::ErrorKind::Other, msg)) - .unwrap(); // Temporary hack because `build` does not return a proper `std::error::Error`. - - // build a gossipsub network behaviour - let gossipsub = gossipsub::Behaviour::new( - gossipsub::MessageAuthenticity::Signed(key.clone()), - gossipsub_config, - ) - .unwrap(); - - Self { - gossipsub, - mdns: mdns::tokio::Behaviour::new(mdns::Config::default(), key.public().to_peer_id()) - .unwrap(), - } - } -} - -pub struct P2P { - swarm: Swarm, - topics: Vec, - // topics: RwLock>, -} - -impl P2P { - pub async fn new() -> Self { - let swarm = SwarmBuilder::with_new_identity() - .with_tokio() - .with_tcp( - tcp::Config::default(), - noise::Config::new, - yamux::Config::default, - ) - .unwrap() - .with_quic() - .with_other_transport(|_key| DummyTransport::<(PeerId, StreamMuxerBox)>::new()) - .unwrap() - .with_dns() - .unwrap() - .with_websocket( - (tls::Config::new, noise::Config::new), - yamux::Config::default, - ) - .await - .unwrap() - .with_relay_client( - (tls::Config::new, noise::Config::new), - yamux::Config::default, - ) - .unwrap() - .with_behaviour(|_key, relay| Ok(BlockBehaviour::new(_key))) - .unwrap() - .with_swarm_config(|cfg| { - // Edit cfg here. - cfg.with_idle_connection_timeout(Duration::from_secs(240)) - }) - .build(); - - P2P { - swarm, - topics: vec!["test_topic".to_string()], - } - } - - pub fn connect(&mut self, url: &str, topic_str: &str) { - let topic = gossipsub::IdentTopic::new(topic_str); - match self.swarm.behaviour_mut().gossipsub.subscribe(&topic) { - Ok(_) => {} - Err(e) => {} - } - - self.swarm.listen_on(url.parse().unwrap()).unwrap(); - } - - pub async fn run(&mut self) { - let mut stdin = io::BufReader::new(io::stdin()).lines(); - let blockchain = Mutex::new(Blockchain::new(4)); - - loop { - tokio::select! { - line = stdin.next_line() => { - let line = line.unwrap(); - if let Some(line) = line { - let id = self.swarm.local_peer_id(); - let mut blockchain = blockchain.lock().await; - blockchain.add_block(line.to_string().into_bytes()); - let block = blockchain.get_last().clone(); - if blockchain.is_valid() { - let message: BlockchainMessage = BlockchainMessage { - blocks: vec![block], - sender: id.to_string().clone(), - }; - self.broadcast(message).await; - } - } - }, - event = self.swarm.select_next_some() => match event { - SwarmEvent::Behaviour(ev) => { - match ev{ - BlockBehaviourEvent::Gossipsub(ev) => { - match ev{ - gossipsub::Event::Message { propagation_source, message_id, message } => { - let block_message:BlockchainMessage = serde_json::from_slice(message.data.as_slice()).unwrap(); - let mut blockchain = blockchain.lock().await; - blockchain.add_block(message.data); - }, - gossipsub::Event::Subscribed { peer_id, topic } => { - println!("Subscribed to topic: {:?}", topic); - // self.subscribed_to_topic( topic.clone().into_string().as_str()).await; - }, - _ => { - println!("Unhandled gossipsub event: {:?}", ev); - }, - } - - } - BlockBehaviourEvent::Mdns(ev) => { - println!("Mdns event: {:?}", ev); - match ev{ - mdns::Event::Discovered(list) => { - println!("Discovered: {:?}", list); - for (peer_id, _) in list { - self.swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id); - } - }, - - mdns::Event::Expired(list) => { - println!("Expired: {:?}", list); - for (peer_id, _) in list { - self.swarm.behaviour_mut().gossipsub.remove_explicit_peer(&peer_id); - } - }, - } - } - _ => { - println!("Unhandled event: {:?}", ev); - } - } - }, - _ => { - println!( "Unhandled event: {:?}", event); - }, - }, - } - } - } - - async fn broadcast(&mut self, message: BlockchainMessage) { - let topics = self.topics.clone(); - - for topic in topics.clone() { - let topic = gossipsub::IdentTopic::new(topic); - match self - .swarm - .behaviour_mut() - .gossipsub - .publish(topic, serde_json::to_vec(&message).unwrap().as_slice()) - { - Ok(_) => {} - Err(e) => { - println!("Error: {:?}", e); - } - } - } - } -} diff --git a/libs/sangedama/Cargo.toml b/libs/sangedama/Cargo.toml index 6340b1a1..3c2c70ee 100644 --- a/libs/sangedama/Cargo.toml +++ b/libs/sangedama/Cargo.toml @@ -15,7 +15,15 @@ serde_json = "1.0.117" tokio = { version = "1.37.0", features = ["full"] } serde = { version = "1.0.203", features = ["derive"] } dotenvy = "0.15.7" -libp2p = { version = "0.53.2", features = ["tokio", "gossipsub", "mdns", "noise", "macros", "tcp", "yamux", "quic", "tls", "relay", "websocket", "dns"] } +libp2p = { version = "0.53.2", features = [ + "tokio", + "gossipsub", "mdns", "noise", "macros", "tcp", "yamux", "quic", "tls", "relay", "websocket", "dns", + "autonat", + "identify", + "rendezvous", + "ping", + "request-response" +] } libp2p-gossipsub = "0.46.1" libp2p-noise = "0.44.0" libp2p-tls = "0.4.0" @@ -27,3 +35,4 @@ env_logger = "0.11.3" sha2 = "0.10.8" chrono = "0.4.38" +uuid = "1.10.0" \ No newline at end of file diff --git a/libs/sangedama/src/lib.rs b/libs/sangedama/src/lib.rs index 65592fd4..0fea5ce3 100644 --- a/libs/sangedama/src/lib.rs +++ b/libs/sangedama/src/lib.rs @@ -1,3 +1,3 @@ mod common; -pub mod node; \ No newline at end of file +pub mod peer; \ No newline at end of file diff --git a/libs/sangedama/src/main.rs b/libs/sangedama/src/main.rs new file mode 100644 index 00000000..89f364e2 --- /dev/null +++ b/libs/sangedama/src/main.rs @@ -0,0 +1,148 @@ +use std::net::Ipv4Addr; +use std::str::FromStr; +use libp2p::{Multiaddr, PeerId}; +use libp2p::multiaddr::Protocol; +use tokio::select; +use tracing::{info}; +use tracing_subscriber::fmt::format; +use uuid::Uuid; +use sangedama::peer::node::create_key; +use crate::peer::message::data::NodeMessage; +use crate::peer::node::{AdminPeer, AdminPeerConfig, get_peer_id, MemberPeer, MemberPeerConfig}; + +mod p2p; +mod peer; +#[tokio::main] +async fn main() { + let subscriber = tracing_subscriber::FmtSubscriber::new(); + // use that subscriber to process traces emitted after this point + tracing::subscriber::set_global_default(subscriber).unwrap(); + let workspace_id = "workspace-test".to_string(); + info!("Starting {}", workspace_id); + + let admin_port = 7845; + let admin_config = AdminPeerConfig::new(admin_port, workspace_id.clone()); + let admin_key = create_key(); + let admin_id_from_key = get_peer_id(&admin_key); + let (mut admin_peer, mut admin_listener) = AdminPeer::create(admin_config.clone(), admin_key).await; + let admin_id = admin_peer.id.clone(); + + + if admin_id.to_string() == admin_id_from_key.to_string() { + info!("Admin peer created with id: {}", admin_id); + } + + + let admin_emitter = admin_peer.emitter(); + let task_admin = tokio::task::spawn(async move { + admin_peer.run(None).await; + }); + + let task_admin_listener = tokio::spawn(async move { + loop { + select! { + event = admin_listener.recv() => { + if event.is_some() { + let event = event.unwrap(); + match event{ + NodeMessage::Message{ data,created_by, ..} => { + info!("Admin listener Message {:?} from {:?}",String::from_utf8(data),created_by); + } + _ => { + info!("peer1 listener {:?}", event); + } + } + } + } + } + } + }); + + let task_run_admin = tokio::task::spawn(async move { + loop { + admin_emitter.send("Admin Send regards".to_string().as_bytes().to_vec()).await; + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + } + }); + + // Here we create localhost address to connect peer with admin + let peer_dial_address = Multiaddr::empty() + .with(Protocol::Ip4(Ipv4Addr::LOCALHOST)) + .with(Protocol::Udp(admin_port)) + .with(Protocol::QuicV1); + + let peer_1 = create_client(workspace_id.clone(), admin_id.clone(), admin_port, peer_dial_address.clone(), "peer1".to_string()).await; + let peer_2 = create_client(workspace_id.clone(), admin_id.clone(), admin_port, peer_dial_address.clone(), "peer2".to_string()).await; + let peer_3 = create_client(workspace_id.clone(), admin_id.clone(), admin_port, peer_dial_address.clone(), "peer3".to_string()).await; + let peer_4 = create_client(workspace_id.clone(), admin_id.clone(), admin_port, peer_dial_address.clone(), "peer4".to_string()).await; + + + task_admin.await.unwrap(); + task_admin_listener.await.unwrap(); + task_run_admin.await.unwrap(); + + peer_1.await.unwrap(); + peer_2.await.unwrap(); + peer_3.await.unwrap(); + peer_4.await.unwrap(); +} + +async fn create_client(workspace_id: String, admin_id: String, admin_port: u16, peer_dial_address: Multiaddr, name: String) -> tokio::task::JoinHandle<()> { + let member_key = create_key(); + let member_id_from_key = get_peer_id(&member_key); + let (mut peer2, mut peer2_listener) = MemberPeer::create(MemberPeerConfig { + name: name.clone(), + workspace_id: workspace_id.clone(), + admin_peer: PeerId::from_str(&admin_id).unwrap(), + rendezvous_point_address: peer_dial_address.clone(), + }, member_key).await; + + + let peer2_emitter = peer2.emitter(); + let peer2_id = peer2.id.clone(); + + if member_id_from_key.to_string() == peer2_id.to_string() { + info!("{} {} created", name.clone(), peer2_id); + } + + let task_peer_2 = tokio::task::spawn(async move { + peer2.run().await; + }); + + let name_clone = name.clone(); + let task_peer_2_listener = tokio::spawn(async move { + loop { + select! { + event = peer2_listener.recv() => { + if event.is_some() { + let event = event.unwrap(); + match event{ + NodeMessage::Message{ data,created_by, ..} => { + info!("{} {} listener Message {:?} from {:?}",name.clone(),peer2_id, String::from_utf8(data),created_by); + } + _ => { + info!("{} listener {:?}",name.clone(), event); + } + } + } + } + } + } + }); + + let task_run_peer_2 = tokio::task::spawn(async move { + loop { + peer2_emitter.send( + format!("{} Send regards", name_clone.clone()).as_bytes().to_vec() + ).await.expect("TODO: panic message"); + tokio::time::sleep(std::time::Duration::from_millis(3000)).await; + } + }); + + + tokio::spawn(async { + task_peer_2.await.unwrap(); + task_peer_2_listener.await.unwrap(); + task_run_peer_2.await.unwrap(); + }) +} \ No newline at end of file diff --git a/libs/sangedama/src/node/blockchain_node.rs b/libs/sangedama/src/node/blockchain_node.rs deleted file mode 100644 index c2285a52..00000000 --- a/libs/sangedama/src/node/blockchain_node.rs +++ /dev/null @@ -1,91 +0,0 @@ -use sha2::{Sha256, Digest}; -use chrono::Utc; -use serde::{Serialize, Deserialize}; - -#[derive(Debug, Clone, Serialize, Deserialize)] -struct Block { - index: u64, - timestamp: i64, - data: Vec, - previous_hash: String, - hash: String, -} - -#[derive(Debug)] -struct Blockchain { - chain: Vec, -} - -impl Block { - fn new(index: u64, data: Vec, previous_hash: String) -> Self { - let timestamp = Utc::now().timestamp(); - let mut block = Block { - index, - timestamp, - data, - previous_hash, - hash: String::new(), - }; - block.hash = block.calculate_hash(); - block - } - - fn calculate_hash(&self) -> String { - let mut hasher = Sha256::new(); - hasher.update(format!("{}{}{:?}{}", self.index, self.timestamp, self.data, self.previous_hash)); - format!("{:x}", hasher.finalize()) - } -} - -impl Blockchain { - fn new() -> Self { - let genesis_block = Block::new(0, String::from("Genesis Block").as_bytes().to_vec(), String::from("0")); - Blockchain { - chain: vec![genesis_block], - } - } - - fn add_block(&mut self, data: Vec) { - let previous_block = self.chain.last().unwrap(); - let new_block = Block::new( - previous_block.index + 1, - data, - previous_block.hash.clone(), - ); - self.chain.push(new_block); - } - - fn is_chain_valid(&self) -> bool { - for i in 1..self.chain.len() { - let current_block = &self.chain[i]; - let previous_block = &self.chain[i - 1]; - - if current_block.hash != current_block.calculate_hash() { - return false; - } - - if current_block.previous_hash != previous_block.hash { - return false; - } - } - true - } -} - -#[cfg(test)] -mod tests { - use super::*; - - - #[test] - fn test_block() { - let mut blockchain = Blockchain::new(); - - blockchain.add_block(String::from("First Block").as_bytes().to_vec()); - blockchain.add_block(String::from("Second Block").as_bytes().to_vec()); - blockchain.add_block(String::from("Third Block").as_bytes().to_vec()); - - println!("{:#?}", blockchain); - println!("Is blockchain valid? {}", blockchain.is_chain_valid()); - } -} \ No newline at end of file diff --git a/libs/sangedama/src/node/message.rs b/libs/sangedama/src/node/message.rs deleted file mode 100644 index 09071459..00000000 --- a/libs/sangedama/src/node/message.rs +++ /dev/null @@ -1,90 +0,0 @@ -use std::time::SystemTime; -use serde::{Deserialize, Serialize}; -use serde_json::json; - -pub enum EventType { - OnMessage, - OnSubscribe, - OnUnsubscribe, - OnListen, - OnExpired, - OnDiscovered, - OnConnectionClosed, - OnConnectionEstablished, -} - -impl EventType { - fn as_str(&self) -> &'static str { - match self { - EventType::OnMessage => "OnMessage", - EventType::OnSubscribe => "OnSubscribe", - EventType::OnUnsubscribe => "OnUnsubscribe", - EventType::OnListen => "OnListen", - EventType::OnExpired => "OnExpired", - EventType::OnDiscovered => "OnDiscovered", - EventType::OnConnectionClosed => "OnConnectionClosed", - EventType::OnConnectionEstablished => "OnConnectionEstablished", - } - } -} - -#[derive(Deserialize, Serialize, Debug, PartialEq, Eq)] -pub enum MessageType { - Message, - Event, -} - -#[derive(Deserialize, Serialize, Debug)] -pub struct NodeMessage { - pub data: Vec, - pub message: String, - pub time: u64, - pub originator: String, - pub originator_id: String, - pub r#type: MessageType, -} - -impl NodeMessage { - fn new( - originator: String, - originator_id: String, - message: String, - data: Vec, - message_type: MessageType, - ) -> Self { - Self { - data, - time: SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_millis() as u64, - originator, - originator_id, - r#type: message_type, - message, - } - } - pub(crate) fn event(originator: String, event: EventType) -> Self { - Self::new( - originator, - "SELF".to_string(), - event.as_str().to_string(), - vec![], - MessageType::Event, - ) - } - - pub fn data(from: String, originator_id: String, data: Vec) -> Self { - Self::new( - from, - originator_id, - "DATA-MESSAGE".to_string(), - data, - MessageType::Message, - ) - } - - pub(crate) fn to_json(&self) -> String { - json!(self).to_string() - } -} \ No newline at end of file diff --git a/libs/sangedama/src/node/mod.rs b/libs/sangedama/src/node/mod.rs deleted file mode 100644 index a259254c..00000000 --- a/libs/sangedama/src/node/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod node; -pub mod message; -mod blockchain_node; \ No newline at end of file diff --git a/libs/sangedama/src/node/node.rs b/libs/sangedama/src/node/node.rs deleted file mode 100644 index 61010bf1..00000000 --- a/libs/sangedama/src/node/node.rs +++ /dev/null @@ -1,370 +0,0 @@ -use std::hash::{DefaultHasher, Hash, Hasher}; -use std::time::{Duration}; - -use libp2p::{futures::StreamExt, gossipsub, mdns, swarm::{NetworkBehaviour, Swarm, SwarmEvent}, SwarmBuilder, tcp, noise, yamux, PeerId, tls}; -use libp2p::core::muxing::StreamMuxerBox; -use libp2p::core::transport::dummy::DummyTransport; -use libp2p_gossipsub::{MessageId, PublishError}; -use log::{debug, error, info}; - - -use tokio::sync::mpsc; -use tokio::{io, select, signal}; -use crate::node::message::{EventType, NodeMessage}; - -// We create a custom network behaviour that combines Gossipsub and Mdns. -#[derive(NetworkBehaviour)] -#[behaviour(to_swarm = "Event")] -struct NodeBehaviour { - gossipsub: gossipsub::Behaviour, - mdns: mdns::tokio::Behaviour, -} - -enum Event { - Gossipsub(gossipsub::Event), - Mdns(mdns::Event), -} - -impl From for Event { - fn from(event: gossipsub::Event) -> Self { - Event::Gossipsub(event) - } -} - -impl From for Event { - fn from(event: mdns::Event) -> Self { - Event::Mdns(event) - } -} - -pub struct Node { - name: String, - swarm: Swarm, - subscribed_topics: Vec, - - in_rx: mpsc::Receiver, - out_tx: mpsc::Sender, - pub id: String, -} - -impl Node { - pub fn connect(&mut self, url: &str, topic_str: &str) { - info!("Connecting to {} with topic {}", url, topic_str); - let topic = gossipsub::IdentTopic::new(topic_str); - match self.swarm - .behaviour_mut() - .gossipsub - .subscribe(&topic) { - Ok(_) => { - // self.subscribed_topics.push(topic_str.to_string()); - // debug!("{:?} Subscribed to topic {:?}", self.name, topic_str); - } - Err(e) => { - error!("{:?} Failed to subscribe to topic {:?}", self.name, e); - } - } - - self.swarm.listen_on(url.parse().unwrap()).unwrap(); - } - - pub fn broadcast(&mut self, message: NodeMessage) -> Result, PublishError> { - let mut message_ids = vec![]; - debug!("Broadcasting message: {:?}", self.subscribed_topics); - for topic in self.subscribed_topics.clone() { - let topic = gossipsub::IdentTopic::new(topic); - - match self - .swarm - .behaviour_mut() - .gossipsub - .publish(topic, message.to_json().as_bytes()) - { - Ok(id) => { - debug!("{:?} Broadcasted message: {:?}", self.name, id); - message_ids.push(id); - } - Err(e) => { - error!("{:?} Failed to broadcast message: {:?}", self.name, e); - return Err(e); - } - } - } - Ok(message_ids) - } - - async fn pass_message_to_node(&mut self, message: NodeMessage) { - match self.out_tx.clone().send(message).await { - Ok(_) => {} - Err(e) => { - error!("{:?} Failed to send message: {:?}", self.name, e.to_string()); - } - }; - } - - pub async fn stop(&mut self) { - for t in self.subscribed_topics.clone() { - self.swarm - .behaviour_mut() - .gossipsub - .unsubscribe(&gossipsub::IdentTopic::new(&t)); - } - } - - pub async fn run(mut self) { - loop { - select! { - message = self.in_rx.recv() => match message { - Some(message) => { - debug!("{:?} Received To Broadcast", self.name); - match self.broadcast(message){ - Ok(message_ids) => { - debug!("{:?} Broad casted message: {:?}", self.name, message_ids); - } - Err(e) => { - error!("{:?} Failed to broadcast message: {:?}", self.name, e); - } - }; - } - None => { - debug!("{:?} Received: None", self.name); - } - } , - - event = self.swarm.select_next_some() => match event { - SwarmEvent::NewListenAddr { address, .. } => { - debug!("{:?} NewListenAddr {:?}", self.name, address); - self.pass_message_to_node(NodeMessage::event(self.swarm.local_peer_id().to_string(),EventType::OnListen,)).await - - }, - SwarmEvent::ConnectionEstablished { peer_id, .. } => { - debug!("{:?} ConnectionEstablished {:?}", self.id, peer_id); - self.pass_message_to_node(NodeMessage::event(self.swarm.local_peer_id().to_string(),EventType::OnConnectionEstablished,)).await - }, - SwarmEvent::ConnectionClosed { peer_id, cause ,.. } => { - debug!("{:?} ConnectionClosed {:?} {:?}", self.id, peer_id, cause); - self.pass_message_to_node(NodeMessage::event(self.swarm.local_peer_id().to_string(),EventType::OnConnectionClosed ,)).await - }, - - SwarmEvent::Behaviour(Event::Gossipsub(event)) => { - debug!("GOSSIP {:?} {:?}", self.name, event); - - match event { - gossipsub::Event::Message { propagation_source, message_id, message } => { - debug!("{:?} Received message '{:?}' from {:?} on {:?}", self.name, String::from_utf8_lossy(&message.data), propagation_source, message_id); - let msg = serde_json::from_slice(message.data.as_slice()).unwrap(); - self.pass_message_to_node(msg).await - }, - - gossipsub::Event::Subscribed { peer_id, topic } => { - debug!("{:?} Subscribed to topic {:?}", self.name, topic.clone().into_string()); - self.subscribed_topics.push(topic.into_string()); - self.pass_message_to_node(NodeMessage::event( peer_id.to_string(),EventType::OnSubscribe,)).await - }, - - _ => { - debug!( "{:?}gossip WILD CARD {:?}", self.name, event); - } - } - }, - - SwarmEvent::Behaviour(Event::Mdns(event)) => { - debug!("MDNS {:?} {:?}", self.name, event); - - match event { - mdns::Event::Discovered(list) => { - for (peer_id, _) in list { - self.swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id); - } - self.pass_message_to_node(NodeMessage::event( self.swarm.local_peer_id().to_string(),EventType::OnDiscovered,)).await - }, - - mdns::Event::Expired(list) => { - for (peer_id, _) in list { - self.swarm.behaviour_mut().gossipsub.remove_explicit_peer(&peer_id); - } - self.pass_message_to_node(NodeMessage::event( self.swarm.local_peer_id().to_string(),EventType::OnExpired,)).await - }, - } - }, - _ => { - debug!( "WILD CARD"); - }, // Wildcard pattern to cover all other cases - }, - - _= signal::ctrl_c() => { - debug!("Agent {:?} received exit signal", self.name); - // Perform any necessary cleanup here - self.stop().await; - break; - }, - } - } - } -} - - -impl NodeBehaviour { - fn new(key: &libp2p::identity::Keypair) -> Self { - let message_id_fn = |message: &gossipsub::Message| { - let mut s = DefaultHasher::new(); - message.data.hash(&mut s); - gossipsub::MessageId::from(s.finish().to_string()) - }; - - // Set a custom gossipsub configuration - let gossipsub_config = gossipsub::ConfigBuilder::default() - .history_length(10) - .history_gossip(10) - .heartbeat_interval(Duration::from_secs(1)) // This is set to aid debugging by not cluttering the log space - .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message signing) - .message_id_fn(message_id_fn) // content-address messages. No two messages of the same content will be propagated. - .build() - .map_err(|msg| io::Error::new(io::ErrorKind::Other, msg)) - .unwrap(); // Temporary hack because `build` does not return a proper `std::error::Error`. - - // build a gossipsub network behaviour - let gossipsub = gossipsub::Behaviour::new( - gossipsub::MessageAuthenticity::Signed(key.clone()), - gossipsub_config, - ).unwrap(); - - Self { - gossipsub, - mdns: mdns::tokio::Behaviour::new(mdns::Config::default(), key.public().to_peer_id()) - .unwrap(), - } - } -} - -pub async fn create_node( - name: String, - in_rx: mpsc::Receiver, -) -> (Node, mpsc::Receiver) { - let swarm = SwarmBuilder::with_new_identity() - .with_tokio() - .with_tcp( - tcp::Config::default(), - noise::Config::new, - yamux::Config::default, - ).unwrap() - .with_quic() - .with_other_transport(|_key| DummyTransport::<(PeerId, StreamMuxerBox)>::new()) - .unwrap() - .with_dns() - .unwrap() - .with_websocket( - (tls::Config::new, noise::Config::new), - yamux::Config::default, - ) - .await - .unwrap() - .with_relay_client( - (tls::Config::new, noise::Config::new), - yamux::Config::default, - ) - .unwrap() - .with_behaviour(|_key, relay| { - Ok(NodeBehaviour::new(_key)) - }) - - .unwrap() - .with_swarm_config(|cfg| { - // Edit cfg here. - cfg - .with_idle_connection_timeout(Duration::from_secs(240)) - }) - .build(); - - let (out_tx, _rx) = mpsc::channel(100); - - ( - Node { - name, - id: swarm.local_peer_id().to_string(), - swarm, - subscribed_topics: Vec::new(), - in_rx, - out_tx, - }, - _rx, - ) -} - -// Create test -#[cfg(test)] -mod tests { - use log::{debug, info, trace, warn}; - use serde_json::json; - - use crate::node::node::{create_node, NodeMessage}; - - - #[tokio::test] - async fn test_ping() { - env_logger::init(); - let port_id = 0; - let topic = "test_topic"; - - let url = format!("/ip4/0.0.0.0/udp/{}/quic-v1", port_id); - - let (tx_0, rx_0) = tokio::sync::mpsc::channel::(100); - let (tx_1, rx_1) = tokio::sync::mpsc::channel::(100); - - let (mut node_0, mut rx_o_0) = create_node("node_0".to_string(), rx_0).await; - let (mut node_1, mut rx_o_1) = create_node("node_1".to_string(), rx_1).await; - - let node_0_id = node_0.id.clone(); - let node_1_id = node_1.id.clone(); - - // let runtime = tokio::runtime::Builder::new_current_thread() - // .enable_all() - // .build() - // .unwrap(); - - tokio::spawn(async move { - while let Some(message_data) = rx_o_0.recv().await { - debug!("Node_0 Received: {:?}", message_data); - let msg = NodeMessage::data( - "node_0".to_string(), - node_0_id.clone(), - json!({ - "data": format!("Hi from Node_0: {}", message_data.message).as_str(), - }) - .to_string() - .as_bytes() - .to_vec(), - ); - tx_0.send(msg).await.unwrap(); - tokio::time::sleep(std::time::Duration::from_millis(3000)).await; - } - }); - - tokio::spawn(async move { - while let Some(message_data) = rx_o_1.recv().await { - debug!("Node_1 Received: {:?}", message_data); - let msg = NodeMessage::data( - "node_1".to_string(), - node_1_id.clone(), - json!({ - "data": format!("Hi from Node_1: {}", message_data.message).as_str(), - }) - .to_string() - .as_bytes() - .to_vec(), - ); - tx_1.send(msg).await.unwrap(); - tokio::time::sleep(std::time::Duration::from_millis(3000)).await; - } - }); - let url_ = url.clone(); - tokio::spawn(async move { - node_0.connect(url_.clone().as_str(), topic); - node_0.run().await; - }); - let url_ = url.clone(); - tokio::time::sleep(std::time::Duration::from_millis(10000)).await; - - node_1.connect(url_.clone().as_str(), topic); - node_1.run().await; - } -} diff --git a/libs/sangedama/src/peer.rs b/libs/sangedama/src/peer.rs new file mode 100644 index 00000000..5e155ffa --- /dev/null +++ b/libs/sangedama/src/peer.rs @@ -0,0 +1,4 @@ +mod behaviour; +mod peer_swarm; +pub mod node; +pub mod message; \ No newline at end of file diff --git a/libs/sangedama/src/peer/behaviour.rs b/libs/sangedama/src/peer/behaviour.rs new file mode 100644 index 00000000..a236d761 --- /dev/null +++ b/libs/sangedama/src/peer/behaviour.rs @@ -0,0 +1,6 @@ +mod server; +mod client; +mod base; +pub use base::PeerBehaviour; +pub use server::{PeerAdminBehaviour, PeerAdminEvent}; +pub use client::{ClientPeerBehaviour, ClientPeerEvent}; \ No newline at end of file diff --git a/libs/sangedama/src/peer/behaviour/base.rs b/libs/sangedama/src/peer/behaviour/base.rs new file mode 100644 index 00000000..2fbdb7d6 --- /dev/null +++ b/libs/sangedama/src/peer/behaviour/base.rs @@ -0,0 +1,31 @@ +use std::hash::{DefaultHasher, Hash, Hasher}; +use std::time::Duration; +use libp2p::gossipsub; +use libp2p::swarm::NetworkBehaviour; +use libp2p_gossipsub::Config; +use tokio::io; + +pub trait PeerBehaviour +where + Self: NetworkBehaviour, +{ + fn new(local_public_key: libp2p::identity::Keypair) -> Self; +} + +pub fn message_id_fn(message: &gossipsub::Message) -> gossipsub::MessageId { + let mut s = DefaultHasher::new(); + message.data.hash(&mut s); + gossipsub::MessageId::from(s.finish().to_string()) +} + +pub fn create_gossip_sub_config() -> Config { + gossipsub::ConfigBuilder::default() + .history_length(10) + .history_gossip(10) + .heartbeat_interval(Duration::from_secs(1)) // This is set to aid debugging by not cluttering the log space + .validation_mode(gossipsub::ValidationMode::Strict) // This sets the kind of message validation. The default is Strict (enforce message signing) + .message_id_fn(message_id_fn) // content-address messages. No two messages of the same content will be propagated. + .build() + .map_err(|msg| io::Error::new(io::ErrorKind::Other, msg)) + .unwrap() +} \ No newline at end of file diff --git a/libs/sangedama/src/peer/behaviour/client.rs b/libs/sangedama/src/peer/behaviour/client.rs new file mode 100644 index 00000000..73e1ae28 --- /dev/null +++ b/libs/sangedama/src/peer/behaviour/client.rs @@ -0,0 +1,77 @@ +use std::time::Duration; + +use libp2p::{gossipsub, identify, identity, mdns, PeerId, ping, rendezvous}; +use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; +use tracing::info; +use crate::peer::behaviour::base::create_gossip_sub_config; +use crate::peer::behaviour::PeerBehaviour; + +// We create a custom network behaviour that combines Gossipsub and Mdns. +#[derive(NetworkBehaviour)] +#[behaviour(to_swarm = "ClientPeerEvent")] +pub struct ClientPeerBehaviour { + pub identify: identify::Behaviour, + pub rendezvous: rendezvous::client::Behaviour, + pub ping: ping::Behaviour, + pub gossip_sub: gossipsub::Behaviour, +} + +#[derive(Debug)] +pub enum ClientPeerEvent { + GossipSub(gossipsub::Event), + Mdns(mdns::Event), + Ping(ping::Event), + Identify(identify::Event), + Rendezvous(rendezvous::client::Event), +} + +impl From for ClientPeerEvent { + fn from(event: gossipsub::Event) -> Self { + ClientPeerEvent::GossipSub(event) + } +} + +impl From for ClientPeerEvent { + fn from(event: mdns::Event) -> Self { + ClientPeerEvent::Mdns(event) + } +} + +impl From for ClientPeerEvent { + fn from(event: ping::Event) -> Self { + ClientPeerEvent::Ping(event) + } +} + +impl From for ClientPeerEvent { + fn from(event: rendezvous::client::Event) -> Self { + ClientPeerEvent::Rendezvous(event) + } +} +impl From for ClientPeerEvent { + fn from(event: identify::Event) -> Self { + ClientPeerEvent::Identify(event) + } +} + + +impl PeerBehaviour for ClientPeerBehaviour { + fn new(local_public_key: identity::Keypair) -> Self { + // Set a custom gossip_sub_config configuration + let gossip_sub_config = create_gossip_sub_config(); + let gossip_sub = gossipsub::Behaviour::new( + gossipsub::MessageAuthenticity::Signed(local_public_key.clone()), + gossip_sub_config, + ).unwrap(); + + Self { + gossip_sub, + identify: identify::Behaviour::new(identify::Config::new( + "/CEYLON-AI-IDENTITY/0.0.1".to_string(), + local_public_key.public(), + )), + rendezvous: rendezvous::client::Behaviour::new(local_public_key.clone()), + ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(10))), + } + } +} diff --git a/libs/sangedama/src/peer/behaviour/server.rs b/libs/sangedama/src/peer/behaviour/server.rs new file mode 100644 index 00000000..0b99999b --- /dev/null +++ b/libs/sangedama/src/peer/behaviour/server.rs @@ -0,0 +1,70 @@ +use std::time::Duration; + +use libp2p::{gossipsub, identify, ping, rendezvous}; +use libp2p::swarm::NetworkBehaviour; +use tracing::info; +use crate::peer::behaviour::base::create_gossip_sub_config; +use crate::peer::behaviour::PeerBehaviour; + +// We create a custom network behaviour that combines Gossipsub and Mdns. +#[derive(NetworkBehaviour)] +#[behaviour(to_swarm = "PeerAdminEvent")] +pub struct PeerAdminBehaviour { + pub rendezvous: rendezvous::server::Behaviour, + pub ping: ping::Behaviour, + pub identify: identify::Behaviour, + pub gossip_sub: gossipsub::Behaviour, +} + +#[derive(Debug)] +pub enum PeerAdminEvent { + Rendezvous(rendezvous::server::Event), + Ping(ping::Event), + Identify(identify::Event), + GossipSub(gossipsub::Event), +} + + +impl From for PeerAdminEvent { + fn from(event: gossipsub::Event) -> Self { + PeerAdminEvent::GossipSub(event) + } +} + +impl From for PeerAdminEvent { + fn from(event: ping::Event) -> Self { + PeerAdminEvent::Ping(event) + } +} + +impl From for PeerAdminEvent { + fn from(event: rendezvous::server::Event) -> Self { + PeerAdminEvent::Rendezvous(event) + } +} +impl From for PeerAdminEvent { + fn from(event: identify::Event) -> Self { + PeerAdminEvent::Identify(event) + } +} + +impl PeerBehaviour for PeerAdminBehaviour { + fn new(local_public_key: libp2p::identity::Keypair) -> Self { + let rendezvous_server = rendezvous::server::Behaviour::new(rendezvous::server::Config::default()); + let gossip_sub_config = create_gossip_sub_config(); + let gossip_sub = gossipsub::Behaviour::new( + gossipsub::MessageAuthenticity::Signed(local_public_key.clone()), + gossip_sub_config, + ).unwrap(); + + Self { + gossip_sub, + rendezvous: rendezvous_server, + ping: ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(10))), + identify: identify::Behaviour::new(identify::Config::new( + "/CEYLON-AI-IDENTITY/0.0.1".to_string(), + local_public_key.public(), + )), + } + } +} diff --git a/libs/sangedama/src/peer/message.rs b/libs/sangedama/src/peer/message.rs new file mode 100644 index 00000000..12e35bbf --- /dev/null +++ b/libs/sangedama/src/peer/message.rs @@ -0,0 +1 @@ +pub mod data; \ No newline at end of file diff --git a/libs/sangedama/src/peer/message/data.rs b/libs/sangedama/src/peer/message/data.rs new file mode 100644 index 00000000..b3ee98bd --- /dev/null +++ b/libs/sangedama/src/peer/message/data.rs @@ -0,0 +1,41 @@ +use serde::{Deserialize, Serialize}; +use serde_json::json; + +#[derive(Debug, Serialize, Deserialize)] +pub enum EventType { + Subscribe { + topic: String, + peer_id: String, + }, + Unsubscribe{ + topic: String, + peer_id: String, + }, +} + +#[derive(Debug, Serialize, Deserialize)] +pub enum NodeMessage { + Event { + time: u64, + created_by: String, + event: EventType, + }, + Message { + time: u64, + created_by: String, + data: Vec, + }, +} + +impl NodeMessage { + pub fn from_bytes(bytes: Vec) -> Self { + serde_json::from_slice(&bytes).unwrap() + } + pub fn to_json(&self) -> String { + json!(self).to_string() + } + + pub fn to_bytes(&self) -> Vec { + serde_json::to_vec(self).unwrap() + } +} \ No newline at end of file diff --git a/libs/sangedama/src/peer/node.rs b/libs/sangedama/src/peer/node.rs new file mode 100644 index 00000000..0969b36e --- /dev/null +++ b/libs/sangedama/src/peer/node.rs @@ -0,0 +1,20 @@ +mod admin; +mod member; +mod peer_builder; + +pub use member::{ + MemberPeer, + MemberPeerConfig, +}; +pub use admin::{ + AdminPeer, + AdminPeerConfig, +}; + +pub use peer_builder::{ + create_key, + get_peer_id, + create_key_from_bytes +}; + + diff --git a/libs/sangedama/src/peer/node/admin.rs b/libs/sangedama/src/peer/node/admin.rs new file mode 100644 index 00000000..0394c3f9 --- /dev/null +++ b/libs/sangedama/src/peer/node/admin.rs @@ -0,0 +1,189 @@ +use std::collections::HashMap; +use std::net::Ipv4Addr; +use crate::peer::behaviour::{PeerAdminBehaviour, PeerAdminEvent}; +use crate::peer::peer_swarm::create_swarm; +use futures::StreamExt; +use libp2p::swarm::SwarmEvent; +use libp2p::{gossipsub, identity, Multiaddr, PeerId, rendezvous, Swarm}; +use libp2p::multiaddr::Protocol; +use libp2p_gossipsub::TopicHash; +use tokio::select; +use tracing::{debug, error, info}; +use crate::peer::message::data::{EventType, NodeMessage}; + +#[derive(Default, Clone)] +pub struct AdminPeerConfig { + pub workspace_id: String, + pub listen_port: Option, +} + +impl AdminPeerConfig { + pub fn new(listen_port: u16, workspace_id: String) -> Self { + Self { listen_port: Some(listen_port), workspace_id } + } + + pub fn get_listen_address(&self) -> Multiaddr { + Multiaddr::empty() + .with(Protocol::Ip4(Ipv4Addr::UNSPECIFIED)) + .with(Protocol::Udp(self.listen_port.unwrap_or(0))) + .with(Protocol::QuicV1) + } +} + +pub struct AdminPeer { + pub id: String, + swarm: Swarm, + pub config: AdminPeerConfig, + + connected_peers: HashMap>, + + outside_tx: tokio::sync::mpsc::Sender, + + inside_rx: tokio::sync::mpsc::Receiver>, + inside_tx: tokio::sync::mpsc::Sender>, +} + +impl AdminPeer { + pub async fn create(config: AdminPeerConfig, key: identity::Keypair) -> (Self, tokio::sync::mpsc::Receiver) { + let swarm = create_swarm::( + key.clone(), + ).await; + let (outside_tx, outside_rx) = tokio::sync::mpsc::channel::(100); + + let (inside_tx, inside_rx) = tokio::sync::mpsc::channel::>(100); + + (Self { + config, + id: swarm.local_peer_id().to_string(), + swarm, + connected_peers: HashMap::new(), + outside_tx, + + inside_tx, + inside_rx, + }, + outside_rx) + } + + pub fn emitter(&self) -> tokio::sync::mpsc::Sender> { + self.inside_tx.clone() + } + + pub async fn run(&mut self, address: Option) { + let address_ = if address.is_none() { + Multiaddr::empty() + .with(Protocol::Ip4(Ipv4Addr::UNSPECIFIED)) + .with(Protocol::Udp(self.config.listen_port.unwrap_or(0))) + .with(Protocol::QuicV1) + } else { address.unwrap() }; + + self.swarm + .listen_on(address_.clone()) + .unwrap(); + info!( "Listening on: {:?}", address_.to_string()); + + loop { + select! { + event = self.swarm.select_next_some() => { + match event { + SwarmEvent::ConnectionEstablished { peer_id, .. } => { + info!("Connected to {}", peer_id); + } + SwarmEvent::ConnectionClosed { peer_id, .. } => { + info!("Disconnected from {}", peer_id); + } + SwarmEvent::Behaviour(event) => { + self.process_event(event).await; + } + other => { + debug!("Unhandled {:?}", other); + } + } + } + + message = self.inside_rx.recv() => { + if let Some(message) = message { + let topic = gossipsub::IdentTopic::new(self.config.workspace_id.clone()); + + let distributed_message = NodeMessage::Message { + data: message, + time: std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs_f64() as u64, + created_by: self.id.clone(), + }; + match self.swarm.behaviour_mut().gossip_sub.publish(topic,distributed_message.to_bytes()){ + Ok(_) => {} + Err(e) => { + error!("Failed to broadcast message: {:?}", e); + } + } + } + } + } + } + } + + + async fn process_event(&mut self, event: PeerAdminEvent) { + match event { + PeerAdminEvent::Rendezvous(event) => { + match event { + rendezvous::server::Event::PeerRegistered { peer, .. } => { + info!( "RendezvousServerConnected: {:?}", peer); + + let topic = gossipsub::IdentTopic::new(self.config.workspace_id.clone()); + self.swarm.behaviour_mut().gossip_sub.subscribe(&topic).unwrap(); + } + _ => { + info!( "RendezvousServer: {:?}", event); + } + } + } + PeerAdminEvent::Ping(event) => { + // info!( "Ping: {:?}", event); + } + PeerAdminEvent::Identify(event) => { + // info!( "Identify: {:?}", event); + } + + PeerAdminEvent::GossipSub(event) => { + match event { + gossipsub::Event::Unsubscribed { topic, peer_id } => { + info!( "GossipSub: Unsubscribed to topic {:?} from peer: {:?}", topic , peer_id); + self.outside_tx.send(NodeMessage::Event { + time: std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs_f64() as u64, + created_by: peer_id.to_string(), + event: EventType::Unsubscribe { + topic: topic.to_string(), + peer_id: peer_id.to_string(), + }, + }).await.expect("Outside tx failed"); + + let peers = self.connected_peers.get_mut(&topic); + if let Some(peers) = peers { + peers.retain(|p| p != &peer_id); + } + } + gossipsub::Event::Subscribed { topic, peer_id } => { + info!( "GossipSub: Subscribed to topic {:?} from peer: {:?}", topic , peer_id); + self.connected_peers.get_mut(&topic).unwrap_or(&mut vec![]).push(peer_id); + self.outside_tx.send(NodeMessage::Event { + time: std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs_f64() as u64, + created_by: peer_id.to_string(), + event: EventType::Subscribe { + topic: topic.to_string(), + peer_id: peer_id.to_string(), + }, + }).await.expect("Outside tx failed"); + } + gossipsub::Event::Message { message, .. } => { + let msg = NodeMessage::from_bytes(message.data); + self.outside_tx.send(msg).await.unwrap(); + } + _ => { + info!( "GossipSub: {:?}", event); + } + } + } + } + } +} diff --git a/libs/sangedama/src/peer/node/member.rs b/libs/sangedama/src/peer/node/member.rs new file mode 100644 index 00000000..5152b7e1 --- /dev/null +++ b/libs/sangedama/src/peer/node/member.rs @@ -0,0 +1,203 @@ +use std::net::Ipv4Addr; +use std::str::FromStr; +use futures::StreamExt; +use libp2p::{gossipsub, identity, Multiaddr, PeerId, rendezvous, Swarm}; +use libp2p::multiaddr::Protocol; +use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; +use libp2p::swarm::SwarmEvent; +use tokio::select; +use tracing::{debug, error, info}; + +use crate::peer::behaviour::{ClientPeerBehaviour, ClientPeerEvent}; +use crate::peer::message::data::NodeMessage; +use crate::peer::peer_swarm::create_swarm; + +#[derive(Debug, Clone)] +pub struct MemberPeerConfig { + pub name: String, + pub workspace_id: String, + pub admin_peer: PeerId, + pub rendezvous_point_address: Multiaddr, +} + +impl MemberPeerConfig { + pub fn new(name: String, workspace_id: String, admin_peer: String, rendezvous_point_admin_port: u16) -> Self { + + let rendezvous_point_address = Multiaddr::empty() + .with(Protocol::Ip4(Ipv4Addr::LOCALHOST)) + .with(Protocol::Udp(rendezvous_point_admin_port)) + .with(Protocol::QuicV1); + + Self { + name, + workspace_id, + admin_peer: PeerId::from_str(&admin_peer).unwrap(), + rendezvous_point_address, + } + } +} + +pub struct MemberPeer { + config: MemberPeerConfig, + pub id: String, + swarm: Swarm, + + outside_tx: tokio::sync::mpsc::Sender, + + inside_rx: tokio::sync::mpsc::Receiver>, + inside_tx: tokio::sync::mpsc::Sender>, +} + + +impl MemberPeer { + pub async fn create(config: MemberPeerConfig, key: identity::Keypair) -> (Self, tokio::sync::mpsc::Receiver) { + let swarm = create_swarm::( key).await; + + + let (outside_tx, outside_rx) = tokio::sync::mpsc::channel::(100); + + let (inside_tx, inside_rx) = tokio::sync::mpsc::channel::>(100); + + (Self { + config, + id: swarm.local_peer_id().to_string(), + swarm, + + outside_tx, + + inside_tx, + inside_rx, + }, + outside_rx) + } + + + pub fn emitter(&self) -> tokio::sync::mpsc::Sender> { + self.inside_tx.clone() + } + pub async fn run(&mut self) { + let name = self.config.name.clone(); + info!("Peer {:?}: {:?} Starting..", name.clone(), self.id.clone()); + let ext_address = Multiaddr::empty() + .with(Protocol::Ip4(Ipv4Addr::UNSPECIFIED)) + .with(Protocol::Udp(0)) + .with(Protocol::QuicV1); + self.swarm.add_external_address(ext_address.clone()); + + let admin_peer_id = self.config.admin_peer; + let rendezvous_point_address = self.config.rendezvous_point_address.clone(); + + let dial_opts = DialOpts::peer_id(admin_peer_id) + .addresses( + vec![rendezvous_point_address] + ) + .condition(PeerCondition::Always) + .build(); + self.swarm.dial(dial_opts).unwrap(); + + + let name_copy = name.clone(); + loop { + select! { + event = self.swarm.select_next_some() => { + match event { + SwarmEvent::ConnectionEstablished { peer_id, .. } => { + if peer_id == self.config.admin_peer { + if let Err(error) = self.swarm.behaviour_mut().rendezvous.register( + rendezvous::Namespace::from_static("CEYLON-AI-PEER"), + self.config.admin_peer, + None, + ) { + error!("Failed to register: {error}"); + } + info!("Connection established with rendezvous point {}", peer_id); + } + } + SwarmEvent::ConnectionClosed { peer_id, cause,.. } => { + if peer_id == self.config.admin_peer { + error!("Lost connection to rendezvous point {:?}", cause); + } + } + SwarmEvent::Behaviour(event) => { + self.process_event(event).await; + } + other => { + debug!("Unhandled {:?}", other); + } + } + } + + message = self.inside_rx.recv() => { + if let Some(message) = message { + let topic = gossipsub::IdentTopic::new(self.config.workspace_id.clone()); + + let distributed_message = NodeMessage::Message { + data: message, + time: std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs_f64() as u64, + created_by: self.id.clone(), + }; + match self.swarm.behaviour_mut().gossip_sub.publish(topic.clone(),distributed_message.to_bytes()){ + Ok(_) => {} + Err(e) => { + error!("Failed to broadcast message from {}: {:?} Topic {:?}",name_copy, e, topic.to_string()); + } + } + } + } + } + } + } + + + async fn process_event(&mut self, event: ClientPeerEvent) { + let name_ = self.config.name.clone(); + match event { + ClientPeerEvent::Rendezvous(event) => { + match event { + rendezvous::client::Event::Registered { namespace, ttl, rendezvous_node } => { + info!( + "Registered for namespace '{}' at rendezvous point {} for the next {} seconds", + namespace, rendezvous_node, ttl + ); + let topic = gossipsub::IdentTopic::new(self.config.workspace_id.clone()); + self.swarm.behaviour_mut().gossip_sub.subscribe(&topic).unwrap(); + } + _ => { + info!( "Rendezvous: {:?}", event); + } + } + } + + ClientPeerEvent::GossipSub(event) => { + match event { + gossipsub::Event::Subscribed { peer_id, topic } => { + info!("Subscribed to topic: {:?} from peer: {:?}", topic, peer_id); + if peer_id.to_string() == self.config.admin_peer.to_string() { + info!( "Member {} Subscribe with Admin",name_.clone() ); + } + } + + gossipsub::Event::Unsubscribed { peer_id, topic } => { + info!("Unsubscribed from topic: {:?} from peer: {:?}", topic, peer_id); + if peer_id.to_string() == self.config.admin_peer.to_string() { + info!( "Member {} Unsubscribe with Admin",name_.clone() ); + } + } + + gossipsub::Event::Message { message, .. } => { + let msg = NodeMessage::from_bytes(message.data); + self.outside_tx.send(msg).await.unwrap(); + } + + _ => { + info!( "GossipSub: {:?}", event); + } + } + } + + other => { + // tracing::info!("Unhandled {:?}", other); + } + } + } +} \ No newline at end of file diff --git a/libs/sangedama/src/peer/node/peer_builder.rs b/libs/sangedama/src/peer/node/peer_builder.rs new file mode 100644 index 00000000..cf9e305a --- /dev/null +++ b/libs/sangedama/src/peer/node/peer_builder.rs @@ -0,0 +1,13 @@ +use libp2p::{identity, PeerId}; + +pub fn create_key() -> identity::Keypair { + identity::Keypair::generate_ed25519() +} + +pub fn create_key_from_bytes(bytes: Vec) -> identity::Keypair { + identity::Keypair::from_protobuf_encoding(&bytes).unwrap() +} + +pub fn get_peer_id(key: &identity::Keypair) -> PeerId { + key.public().to_peer_id() +} \ No newline at end of file diff --git a/libs/sangedama/src/peer/peer_swarm.rs b/libs/sangedama/src/peer/peer_swarm.rs new file mode 100644 index 00000000..1e28d985 --- /dev/null +++ b/libs/sangedama/src/peer/peer_swarm.rs @@ -0,0 +1,40 @@ +use std::time::Duration; + +use libp2p::{identity, noise, PeerId, Swarm, SwarmBuilder, tcp, tls, yamux}; +use libp2p::core::muxing::StreamMuxerBox; +use libp2p::core::transport::dummy::DummyTransport; +use crate::peer::behaviour::PeerBehaviour; + +pub async fn create_swarm( key: identity::Keypair ) -> Swarm +where + B: PeerBehaviour + 'static, +{ + SwarmBuilder::with_existing_identity(key.clone()) + .with_tokio() + .with_tcp( + tcp::Config::default(), + noise::Config::new, + yamux::Config::default, + ).unwrap() + .with_quic() + .with_other_transport(|_key| DummyTransport::<(PeerId, StreamMuxerBox)>::new()) + .unwrap() + .with_dns() + .unwrap() + .with_websocket( + (tls::Config::new, noise::Config::new), + yamux::Config::default, + ) + .await + .unwrap() + .with_behaviour(|_key| { + Ok(B::new(_key.clone())) + }) + .unwrap() + .with_swarm_config(|cfg| { + // Edit cfg here. + cfg + .with_idle_connection_timeout(Duration::from_secs(240)) + }) + .build() +} \ No newline at end of file diff --git a/libs/utils/utils-random-names/Cargo.toml b/libs/utils/utils-random-names/Cargo.toml deleted file mode 100644 index 1f4f11de..00000000 --- a/libs/utils/utils-random-names/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "utils-random-names" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -rand = "0.8.5" diff --git a/libs/utils/utils-random-names/src/lib.rs b/libs/utils/utils-random-names/src/lib.rs deleted file mode 100644 index 2c02a968..00000000 --- a/libs/utils/utils-random-names/src/lib.rs +++ /dev/null @@ -1,1068 +0,0 @@ -//! This small library just generates random names. -//! -//! Its purpose is to create identifiers that do not have to be -//! strictly unique like names for server nodes etc. -//! -//! The names are only usable for better reading where identifiers appear such as log files etc. -//! -//! This library is not meant to create fake data for testing. -extern crate rand; - -use std::fmt::{Display, Formatter, Result}; -use std::convert::Into; -use std::ops::Deref; -use rand::{Rng}; -use rand::prelude::*; -use rand::rngs::OsRng; - - -/// A `RandomName` is just a container for a `String`. -#[derive(Debug, Clone, PartialEq)] -pub struct RandomName { - pub name: String, -} - -impl RandomName { - /// Creates a new `RandomName` that contains a random name. - /// - /// This function just calls `create_name` with a freshly generated random number generator. - /// - /// Internally this function tries to create an `OsRng` which can fail. Then the random name is not - /// random anymore but "EDWIN ERROR". - pub fn new() -> RandomName { - let mut rng = OsRng; - RandomName { name: create_name(&mut rng) } - } -} - -impl Display for RandomName { - fn fmt(&self, f: &mut Formatter) -> Result { - write!(f, "{}", self.name) - } -} - -impl Deref for RandomName { - type Target = str; - fn deref(&self) -> &Self::Target { - self.name.as_ref() - } -} - -impl Into for RandomName { - fn into(self) -> String { - self.name - } -} - -/// Creates a new `RandomName` given a random number generator. -pub fn create_name(rng: &mut T) -> String { - let name: &str = NAMES.choose(rng).copied().unwrap_or(NO_NAMES_NAME); - let surname: &str = SURNAMES.choose(rng).copied().unwrap_or(NO_NAMES_SURNAME); - format!("{} {}", name, surname) -} - -const NO_NAMES_NAME: &'static str = "JOHN"; -const NO_NAMES_SURNAME: &'static str = "DOE"; - -const NAMES: &'static [&'static str] = &["Lyric", - "Bryce", - "Cole", - "Cristofer", - "Kyan", - "Enrique", - "Reid", - "Gage", - "Baron", - "Armando", - "Davin", - "Nash", - "Jaiden", - "Erick", - "Jase", - "Kamari", - "Jamarion", - "Adrian", - "Arjun", - "Donavan", - "Julius", - "Carson", - "Abram", - "Lewis", - "Oscar", - "Julien", - "Ellis", - "Enzo", - "Fletcher", - "Jamar", - "Tyrone", - "Aden", - "Zaiden", - "Chance", - "Jerimiah", - "Joey", - "Bo", - "Joel", - "Humberto", - "Zack", - "Kael", - "Jermaine", - "Adam", - "Clark", - "Rylan", - "Ross", - "Kieran", - "Brodie", - "Sidney", - "Sean", - "Arthur", - "Weston", - "Trenton", - "Conrad", - "Dangelo", - "Ian", - "Isaias", - "Soren", - "Kyle", - "Darien", - "Larry", - "Sage", - "Matteo", - "Jabari", - "Kash", - "Austin", - "Curtis", - "Jonathon", - "Israel", - "Octavio", - "Everett", - "Johnny", - "Gustavo", - "Brayden", - "Hassan", - "Dustin", - "Moses", - "Zayne", - "Kadyn", - "Albert", - "David", - "Zackary", - "Gilbert", - "Darrell", - "Efrain", - "Jon", - "Carmelo", - "Quinten", - "Donald", - "Keith", - "Dominique", - "Timothy", - "Broderick", - "Tyrell", - "Zachery", - "Wayne", - "Marvin", - "Cory", - "Kaeden", - "Gunnar", - "Waylon", - "Keyon", - "Angel", - "Abdiel", - "Dylan", - "Edward", - "Roy", - "Brock", - "Alfredo", - "Camren", - "Rhys", - "Jayce", - "Elliott", - "Felipe", - "Randy", - "Elijah", - "Slade", - "Wilson", - "Randall", - "Remington", - "Lennon", - "Richard", - "Nasir", - "Kellen", - "Carl", - "Kingston", - "Kymani", - "Ruben", - "Gianni", - "Ethan", - "Misael", - "Bradley", - "Paxton", - "Sebastian", - "Boston", - "Devan", - "Case", - "Conner", - "Antonio", - "Lukas", - "Nick", - "Alfonso", - "Lane", - "Andre", - "Thomas", - "Tanner", - "Leland", - "Kylan", - "Barrett", - "Jaden", - "Logan", - "Colton", - "Axel", - "Giovani", - "Royce", - "Caden", - "Titus", - "Vicente", - "Leonel", - "Jason", - "Silas", - "Damian", - "Rocco", - "Antony", - "Kameron", - "Chad", - "Kane", - "Beckett", - "Cedric", - "Shane", - "Mathias", - "Aaron", - "Kade", - "Luca", - "Reece", - "Seth", - "Noe", - "Ezequiel", - "Grady", - "Uriah", - "Russell", - "Emmanuel", - "Rene", - "Teagan", - "Gavyn", - "Jaxson", - "Antwan", - "Maurice", - "Maxwell", - "Brendon", - "Justice", - "Dax", - "Kenny", - "Damon", - "Drake", - "Tony", - "Pedro", - "Mathew", - "Dennis", - "Jorge", - "Ashlyn", - "Meadow", - "Ava", - "Ryan", - "Makayla", - "Livia", - "Giada", - "Vanessa", - "Trinity", - "Pamela", - "Kianna", - "Cristal", - "Natasha", - "Nadia", - "Skylar", - "Chloe", - "Crystal", - "Cherish", - "Victoria", - "Marisol", - "Britney", - "Micah", - "Isabela", - "Grace", - "Ryleigh", - "Jacquelyn", - "Alejandra", - "Kayley", - "Alexia", - "Cindy", - "Sophia", - "Renee", - "Nia", - "Neveah", - "Sherlyn", - "Josie", - "Mareli", - "Addisyn", - "Amaris", - "Madalynn", - "Valery", - "Olive", - "Kinley", - "Kyleigh", - "Paloma", - "Alice", - "Adriana", - "Emilee", - "Jaden", - "Isabella", - "Ximena", - "Chanel", - "Reina", - "Mattie", - "Melina", - "Jayla", - "Audrey", - "Maryjane", - "Maritza", - "Helen", - "Patricia", - "Sasha", - "Paityn", - "Alina", - "Charlize", - "Megan", - "Marlee", - "Jillian", - "Evie", - "Lily", - "Camila", - "Erika", - "Paige", - "Lina", - "Lucille", - "Alexus", - "Natalya", - "Jaylene", - "Isabelle", - "Elsa", - "Jaida", - "Lucia", - "Fatima", - "Elliana", - "Lucy", - "Kenley", - "Nevaeh", - "Jaiden", - "Deja", - "Justine", - "Lola", - "Angelina", - "Tanya", - "Princess", - "Celeste", - "Genesis", - "Carley", - "Emmalee", - "Tess", - "Hanna", - "Alyson", - "Karsyn", - "Virginia", - "Destiny", - "Mireya", - "Aniya", - "Kimberly", - "Ella", - "Sage", - "Cora", - "Kayla", - "Wendy", - "Jolie", - "Lila", - "Samantha", - "Riya", - "Aryanna", - "Krystal", - "Rosemary", - "Isabel", - "Damaris", - "Susan", - "Kara", - "Raelynn", - "Cassidy", - "Monica", - "Alexandra", - "Destiney", - "Alison", - "Johanna", - "Anya", - "Janessa", - "Baylee", - "Caitlyn", - "Sierra", - "Maribel", - "Angeline", - "Ashtyn", - "Anahi", - "Jaylynn", - "Cailyn", - "Giuliana", - "Cristina", - "Tania", - "Karli", - "Hailey", - "Emerson", - "Ellen", - "Teagan", - "Aaliyah", - "Jamiya", - "Giana", - "Maliyah", - "Emely", - "Valerie", - "Rachael", - "Jadyn", - "Kaia", - "June", - "Jenny", - "Hadley", - "Kennedi", - "Selena", - "Josephine", - "Jazmyn", - "Zoie", - "Logan", - "Lainey", - "Mallory", - "Maren", - "Andrea", - "Naima", - "Clarissa", - "Kaya", - "Dalia", - "Kailey", - "Braelyn", - "Faith", - "Daniela", - "Phoebe", - "Diamond", - "Deborah", - "Genevieve", - "Rayne", - "Jade", - "Reagan", - "Brenna", - "Michelle", - "Jaelyn", - "Jaqueline", - "Regina", - "Kylee", - "Evelyn", - "Valeria", - "Kenna", - "Amiyah", - "Zaria", - "Camilla", - "Kate", - "Laney", - "Whitney", - "Anika", - "Erin", - "Jayden", - "Madeline", - "Izabelle", - "Melanie", - "Kassidy", - "Judith", - "Harmony", - "Fernanda", - "Priscilla", - "Cali", - "Anabelle", - "Nathaly", - "Lana", - "Haven", - "Aliza", - "Rachel", - "Nayeli", - "Annabella", - "Yaritza", - "Sonia", - "Ariella", - "Lisa", - "Emmy", - "Kaylah", - "Kenya", - "Miranda", - "Chelsea", - "Taryn", - "Janelle", - "Angelique", - "Leilani", - "Kadence", - "Shyanne", - "Marie", - "Cameron", - "Leia", - "Jordyn", - "Aylin", - "Aimee", - "Kiley", - "Kamari", - "Aniyah", - "Kaylin", - "Sanai", - "Lilian", - "Carlie", - "Jayleen", - "Hannah", - "Dulce", - "Jessie", - "Natalie", - "Giovanna", - "Aryana", - "Nylah", - "Karley", - "Alani", - "Laurel", - "Iliana", - "Taniya", - "Mckenna", - "Rhianna", - "Kyra", - "Cynthia", - "Anabel", - "Salma", - "Melody", - "Armani", - "Lena", - "Hazel", - "Melissa", - "Layla", - "Elisa", - "Eleanor", - "Tamia", - "Linda", - "Penelope", - "Phoenix", - "Jayda", - "Melany", - "Aracely", - "Zoe", - "Kiera", - "Nataly", - "Marianna", - "Kamora", - "Kierra", - "Jocelynn", - "Joselyn", - "Shyla", - "Desiree", - "Liliana", - "Tori", - "Laura", - "Guadalupe", - "Iris", - "Cheyenne", - "Brisa", - "Jakayla", - "Summer", - "Kaylen", - "Irene", - "Leyla", - "Elaine", - "Alena", - "Liberty", - "McKayla", - "Mya", - "Gia", - "Jaslyn", - "Gracie", - "Haylie", - "Brylee", - "Amanda", - "Eileen", - "Kailee", - "Janiya", - "Anaya", - "Keyla", - "Amber", - "Kaitlin", - "Ashlee", - "Shannon", - "Alana", - "Lea", - "Perla", - "Madelynn", - "Kennedy", - "Adison", - "Kenzie", - "Rylie", - "Desirae", - "Jimena", - "Diya", - "Shea", - "Shania", - "Iyana", - "Aleena", - "Emery", - "Ayana", - "Kaiya", - "Justice", - "Sienna", - "Simone", - "Naomi", - "Saige", - "Emelia", - "Emilie", - "Danika", - "Juliana", - "Nyasia", - "Kiersten", - "Janiah", - "Nola", - "Adalynn", - "Reyna", - "Ada", - "Kaley", - "Gina", - "Aliya", - "Cheyanne", - "Felicity", - "Camryn", - "Madisyn", - "Addison", - "Cloe", - "Malia", - "Abigail", - "Lia", - "Macy", - "Abril", - "Kira", - "Lindsey", - "Sophie", - "Esperanza", - "Danna", - "Zariah", - "Laila", - "Molly", - "Jasmine", - "Mikaela", - "Audrina", - "Alisa", - "Gabriella", - "Sofia", - "Arielle", - "Madilynn", - "Angelica", - "Carolina", - "Scarlet", - "Mackenzie", - "Monique", - "Essence", - "Natalia", - "Ruth", - "Katrina", - "Shelby", - "Cara", - "Freddy", - "Gary", - "Lakeisha", - "Ione", - "Margot", - "Kary", - "Dirk", - "Rosella", - "Erinn", - "Mario", - "Dalila", - "Avis", - "Rupert", - "Dione", - "Giovanni", - "Ligia", - "Alysia", - "Coralie", - "Helen", - "Leigha", - "Kathe", - "Katrice", - "Hermila", - "Omar", - "Ivory", - "Deane", - "Odilia", - "Jaymie", - "Brittaney", - "Ofelia", - "Sharonda", - "Jayson", - "Truman", - "Lewis", - "Georgiana", - "Elanor", - "Erma", - "Riva", - "Laureen", - "Ouida", - "Katina", - "Mechelle", - "Lyndon", - "Gertie", - "Jon", - "Tisa", - "Hayden", - "Ty", - "Jacklyn", - "Mickie"]; - - -const SURNAMES: &'static [&'static str] = &["Soto", - "Maddox", - "Weber", - "Ward", - "Brandt", - "Shelton", - "Wilkerson", - "Schmitt", - "Riley", - "Leon", - "Benson", - "Everett", - "Aguilar", - "Brown", - "Dalton", - "Gonzalez", - "Zavala", - "Williamson", - "Dodson", - "Carlson", - "Castro", - "Christian", - "Huynh", - "Sanford", - "Copeland", - "Sharp", - "Freeman", - "Carpenter", - "Herring", - "Dillon", - "Rowland", - "Stanley", - "Shields", - "Newman", - "Gross", - "Harding", - "Duke", - "Higgins", - "McMillan", - "Floyd", - "Pena", - "Yates", - "Butler", - "Beard", - "Burgess", - "Fuller", - "Lawrence", - "Buchanan", - "David", - "Solis", - "Gilmore", - "Holden", - "Petersen", - "Schroeder", - "Duarte", - "Blevins", - "Lynch", - "Washington", - "Sandoval", - "Hunt", - "Hebert", - "Macdonald", - "Mack", - "Sheppard", - "McKenzie", - "Norris", - "Dennis", - "Tate", - "Lyons", - "Morgan", - "Barajas", - "Galvan", - "Patel", - "Santana", - "Meza", - "Martinez", - "Banks", - "Hughes", - "Harvey", - "Grant", - "Clayton", - "Coffey", - "Pittman", - "Hutchinson", - "Collier", - "Stevenson", - "Melendez", - "Murillo", - "Rodgers", - "Mercer", - "Armstrong", - "Morales", - "Douglas", - "Douven", - "Blanchard", - "Wallace", - "Mckinney", - "Bird", - "Bradford", - "Cervantes", - "Church", - "Gordon", - "George", - "Cochran", - "Ayala", - "Schaefer", - "Frey", - "French", - "Obrien", - "James", - "Terry", - "Leblanc", - "Lewis", - "Walton", - "Walters", - "Lloyd", - "Barker", - "Singh", - "Montes", - "Franklin", - "Pennington", - "Dunn", - "Russo", - "Austin", - "Carter", - "Reese", - "Mejia", - "Hurley", - "Krueger", - "Foley", - "Jacobs", - "Fox", - "Miranda", - "Woods", - "Glenn", - "Rich", - "Frank", - "Nash", - "Lawson", - "Keller", - "Novak", - "Holmes", - "Bowen", - "Casey", - "Burton", - "Guzman", - "Vargas", - "Riggs", - "Atkins", - "Lee", - "Becker", - "Potter", - "Gallegos", - "Hahn", - "Nielsen", - "Lane", - "Archer", - "Carr", - "Mason", - "Watson", - "Hardin", - "Fitzpatrick", - "Snyder", - "Webster", - "Rosario", - "Tanner", - "Deleon", - "Cross", - "Solomon", - "Beasley", - "Ibarra", - "Arellano", - "Carney", - "Garner", - "Sexton", - "Garrett", - "Walker", - "Gillespie", - "Hall", - "Maldonado", - "Winters", - "Barnes", - "Robles", - "Saunders", - "Strong", - "Cowan", - "Harrell", - "McGuire", - "Hernandez", - "Potts", - "Reed", - "Mooney", - "Carey", - "Gould", - "Patterson", - "Dawson", - "Horton", - "Farley", - "Callahan", - "Jensen", - "English", - "Abraham", - "Allan", - "Alsop", - "Anderson", - "Arnold", - "Avery", - "Bailey", - "Baker", - "Ball", - "Bell", - "Berry", - "Black", - "Blake", - "Bond", - "Bower", - "Brown", - "Buckland", - "Burgess", - "Butler", - "Cameron", - "Campbell", - "Carr", - "Chapman", - "Churchill", - "Clark", - "Clarkson", - "Coleman", - "Cornish", - "Davidson", - "Davies", - "Dickens", - "Dowd", - "Duncan", - "Dyer", - "Edmunds", - "Ellison", - "Ferguson", - "Fisher", - "Forsyth", - "Fraser", - "Gibson", - "Gill", - "Glover", - "Graham", - "Grant", - "Gray", - "Greene", - "Hamilton", - "Hardacre", - "Harris", - "Hart", - "Hemmings", - "Henderson", - "Hill", - "Hodges", - "Howard", - "Hudson", - "Hughes", - "Hunter", - "Ince", - "Jackson", - "James", - "Johnston", - "Jones", - "Kelly", - "Kerr", - "King", - "Knox", - "Lambert", - "Langdon", - "Lawrence", - "Lee", - "Lewis", - "Lyman", - "MacDonald", - "Mackay", - "Mackenzie", - "MacLeod", - "Manning", - "Marshall", - "Martin", - "Mathis", - "May", - "McDonald", - "McLean", - "McGrath", - "Metcalfe", - "Miller", - "Mills", - "Mitchell", - "Morgan", - "Morrison", - "Murray", - "Nash", - "Newman", - "Nolan", - "North", - "Ogden", - "Oliver", - "Paige", - "Parr", - "Parsons", - "Paterson", - "Payne", - "Peake", - "Peters", - "Piper", - "Poole", - "Powell", - "Pullman", - "Quinn", - "Rampling", - "Randall", - "Rees", - "Reid", - "Roberts", - "Robertson", - "Ross", - "Russell", - "Rutherford", - "Sanderson", - "Scott", - "Sharp", - "Short", - "Simpson", - "Skinner", - "Slater", - "Smith", - "Springer", - "Stewart", - "Sutherland", - "Taylor", - "Terry", - "Thomson", - "Tucker", - "Turner", - "Underwood", - "Vance", - "Vaughan", - "Walker", - "Wallace", - "Walsh", - "Watson", - "Welch", - "White", - "Wilkins", - "Wilson", - "Wright", - "Young"]; \ No newline at end of file