Skip to content

Commit

Permalink
feat: update streaming for steps and tokens (#53)
Browse files Browse the repository at this point in the history
  • Loading branch information
olbychos authored Nov 19, 2024
1 parent aa6a26d commit e6424de
Show file tree
Hide file tree
Showing 13 changed files with 126 additions and 57 deletions.
56 changes: 43 additions & 13 deletions dynamiq/nodes/agents/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@
class StreamChunkChoiceDelta(BaseModel):
"""Delta model for content chunks."""
content: str | dict
type: str
source: str
step: str


class StreamChunkChoice(BaseModel):
Expand Down Expand Up @@ -255,29 +256,58 @@ def _run_llm(self, prompt: str, config: RunnableConfig | None = None, **kwargs)
logger.error(f"Agent {self.name} - {self.id}: LLM execution failed: {str(e)}")
raise

def stream_chunk(self, input_chunk: str, config: RunnableConfig | None = None, **kwargs):
"""Streams the input chunk to the callbacks."""
def stream_content(self, content: str, source: str, step: str, config: RunnableConfig | None = None, **kwargs):
if self.streaming.by_tokens:
return self.stream_by_tokens(content=content, source=source, step=step, config=config, **kwargs)
return self.stream_response(content=content, source=source, step=step, config=config, **kwargs)

def stream_by_tokens(self, content: str, source: str, step: str, config: RunnableConfig | None = None, **kwargs):
"""Streams the input content to the callbacks."""
tokens = content.split(" ")
final_response = []
for chunk in input_chunk.split(" "):
final_response.append(chunk)
chunk_for_stream = StreamChunk(
choices=[StreamChunkChoice(delta=StreamChunkChoiceDelta(content=chunk, type=self.name))]
for token in tokens:
final_response.append(token)
token_with_prefix = " " + token
token_for_stream = StreamChunk(
choices=[
StreamChunkChoice(delta=StreamChunkChoiceDelta(content=token_with_prefix, source=source, step=step))
]
)
logger.debug(f"Agent {self.name} - {self.id}: Streaming chunk: {chunk_for_stream}")
logger.debug(f"Agent {self.name} - {self.id}: Streaming token: {token_for_stream}")
self.run_on_node_execute_stream(
callbacks=config.callbacks,
chunk=chunk_for_stream.model_dump(),
chunk=token_for_stream.model_dump(),
**kwargs,
)
return " ".join(final_response)

def stream_response(self, content: str, source: str, step: str, config: RunnableConfig | None = None, **kwargs):
response_for_stream = StreamChunk(
choices=[StreamChunkChoice(delta=StreamChunkChoiceDelta(content=content, source=source, step=step))]
)
logger.debug(f"Agent {self.name} - {self.id}: Streaming response: {response_for_stream}")

self.run_on_node_execute_stream(
callbacks=config.callbacks,
chunk=response_for_stream.model_dump(),
**kwargs,
)
return content

def _run_agent(self, config: RunnableConfig | None = None, **kwargs) -> str:
"""Runs the agent with the generated prompt and handles exceptions."""
formatted_prompt = self.generate_prompt()
try:
logger.info(f"Streaming config {self.streaming}")
llm_result = self._run_llm(formatted_prompt, config=config, **kwargs)
if self.streaming.enabled:
return self.stream_chunk(llm_result, config=config, **kwargs)
return self.stream_content(
content=llm_result,
source=self.name,
step="answer",
config=config,
**kwargs,
)
return llm_result

except Exception as e:
Expand Down Expand Up @@ -477,21 +507,21 @@ def _plan(self, config: RunnableConfig, **kwargs) -> str:
prompt = self.generate_prompt(block_names=["plan"])
llm_result = self._run_llm(prompt, config, **kwargs)
if self.streaming.enabled and self.streaming.mode == StreamingMode.ALL:
return self.stream_chunk(input_chunk=llm_result, config=config, **kwargs)
return self.stream_content(content=llm_result, step="reasoning", source=self.name, config=config, **kwargs)
return llm_result

def _assign(self, config: RunnableConfig, **kwargs) -> str:
"""Executes the 'assign' action."""
prompt = self.generate_prompt(block_names=["assign"])
llm_result = self._run_llm(prompt, config, **kwargs)
if self.streaming.enabled and self.streaming.mode == StreamingMode.ALL:
return self.stream_chunk(input_chunk=llm_result, config=config, **kwargs)
return self.stream_content(content=llm_result, step="reasoning", source=self.name, config=config, **kwargs)
return llm_result

def _final(self, config: RunnableConfig, **kwargs) -> str:
"""Executes the 'final' action."""
prompt = self.generate_prompt(block_names=["final"])
llm_result = self._run_llm(prompt, config, **kwargs)
if self.streaming.enabled:
return self.stream_chunk(input_chunk=llm_result, config=config, **kwargs)
return self.stream_content(content=llm_result, step="answer", source=self.name, config=config, **kwargs)
return llm_result
70 changes: 45 additions & 25 deletions dynamiq/nodes/agents/react.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,17 +351,21 @@ def _run_agent(self, config: RunnableConfig | None = None, **kwargs) -> str:
self.tracing_intermediate(loop_num, formatted_prompt, llm_generated_output)
if self.streaming.enabled and self.streaming.mode == StreamingMode.ALL:

self.stream_chunk(
input_chunk=llm_generated_output,
self.stream_content(
content=llm_generated_output,
source=self.name,
step=f"reasoning_{loop_num + 1}",
config=config,
**kwargs,
)
if "Answer:" in llm_generated_output:
final_answer = self._extract_final_answer(llm_generated_output)
self.tracing_final(loop_num, final_answer, config, kwargs)
if self.streaming.enabled and self.streaming.mode == StreamingMode.FINAL:
self.stream_chunk(
input_chunk=final_answer,
if self.streaming.enabled:
self.stream_content(
content=final_answer,
source=self.name,
step="answer",
config=config,
**kwargs,
)
Expand All @@ -377,18 +381,22 @@ def _run_agent(self, config: RunnableConfig | None = None, **kwargs) -> str:
llm_generated_output = json.dumps(llm_generated_output_json)
self.tracing_intermediate(loop_num, formatted_prompt, llm_generated_output)
if self.streaming.enabled and self.streaming.mode == StreamingMode.ALL:
self.stream_chunk(
input_chunk=llm_generated_output,
self.stream_content(
content=llm_generated_output,
source=self.name,
step=f"reasoning_{loop_num + 1}",
config=config,
**kwargs,
)

if action == "provide_final_answer":
final_answer = llm_generated_output_json["answer"]
self.tracing_final(loop_num, final_answer, config, kwargs)
if self.streaming.enabled and self.streaming.mode == StreamingMode.FINAL:
self.stream_chunk(
input_chunk=final_answer,
if self.streaming.enabled:
self.stream_content(
content=final_answer,
source=self.name,
step="answer",
config=config,
**kwargs,
)
Expand All @@ -405,18 +413,22 @@ def _run_agent(self, config: RunnableConfig | None = None, **kwargs) -> str:

self.tracing_intermediate(loop_num, formatted_prompt, llm_generated_output)
if self.streaming.enabled and self.streaming.mode == StreamingMode.ALL:
self.stream_chunk(
input_chunk=llm_generated_output,
self.stream_content(
content=llm_generated_output,
source=self.name,
step=f"reasoning_{loop_num + 1}",
config=config,
**kwargs,
)

if action == "finish":
final_answer = llm_generated_output_json["action_input"]
self.tracing_final(loop_num, final_answer, config, kwargs)
if self.streaming.enabled and self.streaming.mode == StreamingMode.FINAL:
self.stream_chunk(
input_chunk=final_answer,
if self.streaming.enabled:
self.stream_content(
content=final_answer,
source=self.name,
step="answer",
config=config,
**kwargs,
)
Expand All @@ -429,17 +441,21 @@ def _run_agent(self, config: RunnableConfig | None = None, **kwargs) -> str:
llm_generated_output = llm_result.output["content"]
self.tracing_intermediate(loop_num, formatted_prompt, llm_generated_output)
if self.streaming.enabled and self.streaming.mode == StreamingMode.ALL:
self.stream_chunk(
input_chunk=llm_generated_output,
self.stream_content(
content=llm_generated_output,
source=self.name,
step=f"reasoning_{loop_num + 1}",
config=config,
**kwargs,
)
if "<answer>" in llm_generated_output:
final_answer = self._extract_final_answer_xml(llm_generated_output)
self.tracing_final(loop_num, final_answer, config, kwargs)
if self.streaming.enabled and self.streaming.mode == StreamingMode.FINAL:
self.stream_chunk(
input_chunk=final_answer,
if self.streaming.enabled:
self.stream_content(
content=final_answer,
source=self.name,
step="answer",
config=config,
**kwargs,
)
Expand All @@ -465,8 +481,10 @@ def _run_agent(self, config: RunnableConfig | None = None, **kwargs) -> str:
observation = f"\nObservation: {tool_result}\n"
llm_generated_output += observation
if self.streaming.enabled and self.streaming.mode == StreamingMode.ALL:
self.stream_chunk(
input_chunk=observation,
self.stream_content(
content=observation,
source=tool.name,
step=f"tool_{loop_num}",
config=config,
**kwargs,
)
Expand Down Expand Up @@ -494,10 +512,12 @@ def _run_agent(self, config: RunnableConfig | None = None, **kwargs) -> str:
)
raise MaxLoopsExceededException(message=error_message)
else:
max_loop_final_answer = self._handle_max_loops_exceeded(previous_responses, config, kwargs)
max_loop_final_answer = self._handle_max_loops_exceeded(previous_responses, config, **kwargs)
if self.streaming.enabled:
self.stream_chunk(
input_chunk=max_loop_final_answer,
self.stream_content(
content=max_loop_final_answer,
source=self.name,
step="answer",
config=config,
**kwargs,
)
Expand Down
12 changes: 10 additions & 2 deletions dynamiq/nodes/agents/reflection.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,10 +84,18 @@ def _run_agent(self, config: RunnableConfig | None = None, **kwargs) -> str:
if not output_content:
logger.warning("No output content extracted.")
return ""
return self.stream_chunk(output_content[-1], config=config, **kwargs)
return self.stream_content(
content=output_content[-1],
step="answer",
source=self.name,
config=config,
**kwargs,
)
elif self.streaming.mode == StreamingMode.ALL:
logger.debug("Streaming mode set to ALL. Returning all output.")
return self.stream_chunk(result, config=config, **kwargs)
return self.stream_content(
content=result, step="reasoning", source=self.name, config=config, **kwargs
)

if not output_content:
logger.warning("No output content extracted.")
Expand Down
4 changes: 3 additions & 1 deletion dynamiq/types/streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,14 +74,16 @@ class StreamingConfig(BaseModel):
timeout (float | None): Timeout for streaming. Defaults to None.
input_queue (Queue | None): Input queue for streaming. Defaults to None.
input_queue_done_event (Event | None): Event to signal input queue completion. Defaults to None.
mode (StreamingMode): Streaming mode. Defaults to StreamingMode.FINAL.
mode (StreamingMode): Streaming mode. Defaults to StreamingMode.ANSWER.
by_tokens (bool): Whether to stream by tokens. Defaults to False.
"""
enabled: bool = False
event: str = STREAMING_EVENT
timeout: float | None = None
input_queue: Queue | None = None
input_queue_done_event: Event | None = None
mode: StreamingMode = StreamingMode.FINAL
by_tokens: bool = True

model_config = ConfigDict(arbitrary_types_allowed=True)

Expand Down
2 changes: 1 addition & 1 deletion examples/agents/use_streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def run_agent(event: str = "data") -> str:
id="agent",
llm=llm,
tools=[e2b_tool],
streaming=StreamingConfig(enabled=True, event=event, mode=StreamingMode.ALL),
streaming=StreamingConfig(enabled=True, event=event, mode=StreamingMode.ALL, by_tokens=True),
)

streaming_handler = StreamingIteratorCallbackHandler()
Expand Down
2 changes: 1 addition & 1 deletion examples/use_case_streaming_agents/orchestrator/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
st.sidebar.title("Agent Configuration")
streaming_enabled = st.sidebar.checkbox("Enable Streaming", value=False)

streaming_mode = st.sidebar.radio("Streaming Mode", options=["Final", "All"], index=0)
streaming_mode = st.sidebar.radio("Streaming Mode", options=["Steps", "Answer"], index=0)
if "agent" not in st.session_state or st.sidebar.button("Apply Changes"):
st.session_state.agent = setup_agent(streaming_enabled, streaming_mode)
st.session_state.messages = []
Expand Down
2 changes: 1 addition & 1 deletion examples/use_case_streaming_agents/orchestrator/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def setup_agent(streaming_enabled: bool, streaming_mode: str) -> ReActAgent:
"""
llm = setup_llm()
memory = Memory(backend=InMemory())
mode_mapping = {"Final": StreamingMode.FINAL, "All": StreamingMode.ALL}
mode_mapping = {"Answer": StreamingMode.FINAL, "Steps": StreamingMode.ALL}
mode = mode_mapping.get(streaming_mode, StreamingMode.FINAL)
streaming_config = StreamingConfig(enabled=streaming_enabled, mode=mode)
tool_code = E2BInterpreterTool(connection=E2B())
Expand Down
5 changes: 3 additions & 2 deletions examples/use_case_streaming_agents/react/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,12 @@
st.sidebar.title("Agent Configuration")
agent_role = st.sidebar.text_input("Agent Role", "helpful assistant")
streaming_enabled = st.sidebar.checkbox("Enable Streaming", value=False)
streaming_tokens = st.sidebar.checkbox("Enable Streaming Tokens", value=False)

streaming_mode = st.sidebar.radio("Streaming Mode", options=["Final", "All"], index=0) # Default to "Final"
streaming_mode = st.sidebar.radio("Streaming Mode", options=["Steps", "Answer"], index=0) # Default to "Answer"

if "agent" not in st.session_state or st.sidebar.button("Apply Changes"):
st.session_state.agent = setup_agent(agent_role, streaming_enabled, streaming_mode)
st.session_state.agent = setup_agent(agent_role, streaming_enabled, streaming_mode, streaming_tokens)
st.session_state.messages = []

st.title("React Agent Chat")
Expand Down
7 changes: 4 additions & 3 deletions examples/use_case_streaming_agents/react/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,15 @@
from examples.llm_setup import setup_llm


def setup_agent(agent_role: str, streaming_enabled: bool, streaming_mode: str) -> ReActAgent:
def setup_agent(agent_role: str, streaming_enabled: bool, streaming_mode: str, streaming_tokens: bool) -> ReActAgent:
"""
Initializes an AI agent with a specified role and streaming configuration.
"""
llm = setup_llm()
memory = Memory(backend=InMemory())
mode_mapping = {"Final": StreamingMode.FINAL, "All": StreamingMode.ALL}
mode_mapping = {"Answer": StreamingMode.FINAL, "Steps": StreamingMode.ALL}
mode = mode_mapping.get(streaming_mode, StreamingMode.FINAL)
streaming_config = StreamingConfig(enabled=streaming_enabled, mode=mode)
streaming_config = StreamingConfig(enabled=streaming_enabled, mode=mode, by_tokens=streaming_tokens)
tool_search = ScaleSerpTool(connection=ScaleSerp())
tool_code = E2BInterpreterTool(connection=E2B())
agent = ReActAgent(
Expand All @@ -44,6 +44,7 @@ def generate_agent_response(agent: ReActAgent, user_input: str):
response_text = ""

for chunk in streaming_handler:
print(chunk)
content = chunk.data.get("choices", [{}])[0].get("delta", {}).get("content", "")
if content:
response_text += " " + content
Expand Down
2 changes: 1 addition & 1 deletion examples/use_case_streaming_agents/reflection/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
agent_role = st.sidebar.text_input("Agent Role", "helpful assistant")
streaming_enabled = st.sidebar.checkbox("Enable Streaming", value=False)

streaming_mode = st.sidebar.radio("Streaming Mode", options=["Final", "All"], index=0) # Default to "Final"
streaming_mode = st.sidebar.radio("Streaming Mode", options=["Answer", "Steps"], index=0) # Default to "Final"

if "agent" not in st.session_state or st.sidebar.button("Apply Changes"):
st.session_state.agent = setup_agent(agent_role, streaming_enabled, streaming_mode)
Expand Down
2 changes: 1 addition & 1 deletion examples/use_case_streaming_agents/reflection/backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def setup_agent(agent_role: str, streaming_enabled: bool, streaming_mode: str) -
llm = setup_llm()
memory = Memory(backend=InMemory())

mode_mapping = {"Final": StreamingMode.FINAL, "All": StreamingMode.ALL}
mode_mapping = {"Answer": StreamingMode.FINAL, "Steps": StreamingMode.ALL}
mode = mode_mapping.get(streaming_mode, StreamingMode.FINAL)
streaming_config = StreamingConfig(enabled=streaming_enabled, mode=mode)

Expand Down
Loading

1 comment on commit e6424de

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Coverage

Coverage Report
FileStmtsMissCoverMissing
dynamiq
   __init__.py30100% 
dynamiq/cache
   __init__.py10100% 
   codecs.py11281%16, 27
   config.py14192%34
   utils.py260100% 
dynamiq/cache/backends
   __init__.py20100% 
   base.py23578%34, 47, 59, 72, 84
   redis.py16475%20, 22, 48, 59
dynamiq/cache/managers
   __init__.py20100% 
   base.py39489%117–118, 120, 146
   workflow.py28292%73–74
dynamiq/callbacks
   __init__.py30100% 
   base.py491275%19, 31, 43, 79, 115, 132, 168, 187, 205, 209–210, 212
   streaming.py551572%96, 126–127, 135, 160–161, 169–174, 182, 190–191
   tracing.py169497%64, 120, 485, 507
dynamiq/clients
   __init__.py10100% 
   base.py8275%5, 21
dynamiq/components
   __init__.py00100% 
   serializers.py27966%25, 40, 58, 70, 101, 120, 122, 134, 136
dynamiq/components/converters
   __init__.py00100% 
   base.py491569%43, 48–50, 53–54, 61–62, 64–65, 98, 102, 107, 118, 122
   pptx.py34973%59–61, 66, 96–97, 101–102, 104
   pypdf.py602066%71–72, 77, 100–102, 125–135, 137–138, 140
   unstructured.py1164858%63, 69–70, 82–83, 90, 109, 143, 194–195, 200, 220–221, 228–229, 232, 259–260, 263, 283, 292–302, 304–305, 307, 312–325
   utils.py11463%23–25, 27
dynamiq/components/embedders
   __init__.py00100% 
   base.py71790%76, 80, 137–138, 159, 163, 167
   bedrock.py18477%19, 26–28
   cohere.py18288%21, 29
   huggingface.py10190%19
   mistral.py10190%19
   openai.py16287%44, 51
   watsonx.py10190%19
dynamiq/components/retrievers
   __init__.py00100% 
   chroma.py19289%33–34
   milvus.py19194%31
   pinecone.py17288%34–35
   qdrant.py17288%31–32
   weaviate.py19289%31–32
dynamiq/components/splitters
   __init__.py00100% 
   document.py55787%60, 63, 83, 88, 97, 118, 124
dynamiq/connections
   __init__.py20100% 
   connections.py3684587%14–18, 102, 110, 121, 144, 531–532, 534–535, 539–540, 542–543, 556–557, 559, 587, 589, 601, 603–605, 646–647, 669–670, 676–678, 680–683, 685, 702–703, 740–741, 749, 857, 912
   managers.py60591%70–71, 107, 167, 179
   storages.py14192%51
dynamiq/executors
   __init__.py00100% 
   base.py12283%35, 58
   pool.py601181%90, 111–112, 115, 163–164, 179, 192, 203–204, 215
dynamiq/flows
   __init__.py20100% 
   base.py25196%35
   flow.py1273175%68, 75, 82, 116, 159–161, 246–247, 249, 268–269, 271, 274, 293–294, 297–298, 301–302, 304–305, 308–310, 312–316, 318
dynamiq/loaders
   __init__.py00100% 
   yaml.py3009070%55, 58, 62–63, 66–69, 72, 97, 101, 112–113, 137–138, 157, 186, 213, 237, 240, 243, 246, 266–270, 273, 293–297, 300–301, 327–328, 333–334, 339–340, 382, 385–386, 397–400, 413–414, 452, 455, 458, 492, 496, 514–515, 599–600, 602–603, 607–610, 612, 617, 656, 659, 666, 674, 687–688, 720, 722, 725, 730, 757, 761, 765, 769–770, 823–824, 853, 902–906
dynamiq/memory
   __init__.py10100% 
   memory.py592852%17, 33–35, 50, 53, 56, 60–61, 67–77, 79, 83, 87–92
dynamiq/memory/backend
   __init__.py50100% 
   base.py19573%15, 20, 25, 30, 35
   in_memory.py704634%19–22, 26–27, 31, 35–39, 41–49, 51, 67–68, 76–81, 83–84, 88–89, 91–100, 104, 108
   pinecone.py765527%34–37, 39–40, 54–55, 57–58, 62, 71–73, 77–80, 82, 84–85, 89–94, 98–99, 101–106, 110–111, 113–115, 122–124, 132, 134–136, 140–143, 147–150
   qdrant.py856325%41–43, 45–46, 55–59, 63, 72–74, 85–88, 90, 93–94, 105–110, 123–127, 133–136, 138, 140–142, 146–147, 149–156, 158, 160–161, 165–170, 174–177
   sqlite.py1199421%50–51, 53–56, 60–61, 63–67, 69–71, 73, 75–76, 80–87, 91–96, 106, 108–109, 113–119, 124–125, 129–135, 137–138, 142–149, 153–156, 158–160, 162–164, 166–168, 171–173, 175–176, 178–182, 184–187, 189, 191–192, 194–195
dynamiq/nodes
   __init__.py20100% 
   exceptions.py15193%4
   managers.py12741%28–34
   node.py3454686%249, 260, 275, 280–281, 285–286, 309–310, 312, 327, 345–346, 376, 378, 385–386, 587–589, 658–664, 666–671, 675, 677, 679, 854, 872, 880, 883, 901–903, 981, 1006, 1012
   types.py230100% 
dynamiq/nodes/agents
   __init__.py40100% 
   base.py30213455%100, 104–109, 115, 118–120, 124–126, 152, 156, 160, 181–186, 188–189, 197–198, 224–225, 231–235, 251, 255–257, 260–262, 266–271, 276–277, 282, 285, 288, 290, 295, 304, 313–315, 319–320, 325–329, 331–332, 334–337, 349–350, 354–356, 362, 366–369, 371, 377–381, 383–384, 401–406, 412, 417, 450–451, 455–456, 460, 464, 468, 474–477, 481–483, 487, 489–490, 492–493, 495, 500, 503, 507–511, 515–519, 523–527
   exceptions.py15380%7–8, 61
   react.py22417621%232–235, 237–240, 242, 246–247, 251–253, 255–258, 266, 270–271, 273–274, 276, 279, 282, 290–291, 306–307, 309–310, 317, 319, 321, 323, 332, 334–335, 339–340, 342–347, 351–352, 354, 361–365, 372, 374, 376–378, 381–384, 392–396, 403, 405–406, 410–412, 414–416, 424–428, 435, 437–438, 440–444, 451–455, 462–463, 465–467, 469–472, 474, 478–479, 481–484, 492, 501, 503–509, 513, 515–517, 524, 532–535, 537, 541–543, 546–548, 554–555, 558, 560, 587, 592–599, 602–609, 611, 635, 639, 641, 649–661, 663
   reflection.py473427%42–43, 46–47, 50, 63–65, 67–70, 72, 75–76, 79–87, 94–96, 100–102, 104–107
   simple.py30100% 
dynamiq/nodes/audio
   __init__.py20100% 
   elevenlabs.py76692%39, 41, 89, 127, 172, 214
   whisper.py47882%46, 72, 79–80, 82, 108, 124, 126
dynamiq/nodes/converters
   __init__.py40100% 
   llm_text_extractor.py17312130%14, 49–53, 107–108, 114, 124, 132–134, 145–147, 175–177, 179, 187, 210–211, 213, 215–218, 225–226, 233–234, 236–239, 241–243, 245–246, 248–253, 255, 267, 269, 290, 292, 294–296, 298, 302, 326–327, 332, 334–335, 344, 348, 363, 370, 372–375, 389–390, 392–397, 410, 433–439, 442–443, 496, 498–500, 527–529, 531, 539, 562–563, 565, 567–570, 577–578, 585–586, 588–591, 593–594, 596–601, 603
   pptx.py31196%33
   pypdf.py32196%34
   unstructured.py39197%58
dynamiq/nodes/embedders
   __init__.py60100% 
   bedrock.py53296%48, 135
   cohere.py53296%48, 136
   huggingface.py53296%48, 137
   mistral.py53296%49, 137
   openai.py55296%53, 151
   watsonx.py53296%46, 130
dynamiq/nodes/llms
   __init__.py200100% 
   ai21.py9188%24
   anthropic.py8187%22
   anyscale.py9188%24
   azureai.py9188%24
   base.py104892%13, 173, 231–233, 235–237
   bedrock.py9188%24
   cerebras.py9188%24
   cohere.py8362%21–23
   custom_llm.py40100% 
   deepinfra.py9188%24
   gemini.py21195%43
   groq.py9188%24
   huggingface.py9188%24
   mistral.py9188%24
   openai.py8187%22
   perplexity.py9188%25
   replicate.py9188%24
   sambanova.py9366%23–25
   togetherai.py9188%24
   watsonx.py9188%24
dynamiq/nodes/operators
   __init__.py10100% 
   operators.py1552981%158, 174, 176, 178, 180, 184–191, 193, 245–246, 258–260, 287, 305–309, 311, 313–314, 316
dynamiq/nodes/retrievers
   __init__.py50100% 
   chroma.py370100% 
   milvus.py370100% 
   pinecone.py410100% 
   qdrant.py370100% 
   weaviate.py370100% 
dynamiq/nodes/splitters
   __init__.py10100% 
   document.py300100% 
dynamiq/nodes/tools
   __init__.py80100% 
   e2b_sandbox.py17311831%50, 64–65, 67–68, 75–78, 80–82, 90, 113–115, 122, 160, 163–165, 167, 177, 189–192, 196–200, 204–206, 210–212, 216–219, 226, 228–229, 233–234, 237–238, 241–242, 244, 248–252, 257, 261–268, 272–274, 278–279, 281–285, 291–292, 294–295, 297–301, 303–315, 321–323, 325–335, 337–338, 342–345
   firecrawl.py713649%63–64, 68–69, 77–78, 80, 86, 88–89, 91–94, 96, 107–108, 112–113, 119–122, 125, 127, 129–136, 140, 152, 154
   http_api_call.py651380%28–34, 36, 90, 104, 113, 122–123
   llm_summarizer.py552750%95–97, 103, 113, 125–127, 144–145, 151–154, 176–178, 180–181, 186–188, 192–193, 195, 197, 201
   python.py661872%46–47, 52–54, 101, 105–106, 169, 173–177, 193, 207–209
   scale_serp.py714733%23–25, 70–71, 73–78, 80–81, 90, 105, 107–108, 110–112, 114–115, 119, 121, 123–124, 129–132, 135, 137, 139, 141–147, 149, 154–155, 163, 165, 172, 174
   tavily.py613345%83–91, 93, 107, 109–110, 112–113, 126, 128–129, 134–137, 140, 142, 144, 146, 150–151, 158–159, 162, 172, 174
   zenrows.py351654%52, 55–56, 58, 63, 65, 70–73, 75, 77–78, 83–84, 86
dynamiq/nodes/utils
   __init__.py10100% 
   utils.py110100% 
dynamiq/nodes/validators
   __init__.py50100% 
   base.py22577%37–40, 45
   regex_match.py14192%37
   valid_choices.py9188%31
   valid_json.py10280%27–28
   valid_python.py8275%23–24
dynamiq/nodes/writers
   __init__.py50100% 
   chroma.py271062%37–39, 43, 47, 69–70, 72, 74–75
   milvus.py29196%39
   pinecone.py40490%41, 54, 57, 60
   qdrant.py28196%38
   weaviate.py291162%38–40, 44, 48, 69–70, 72, 74–75, 77
dynamiq/prompts
   __init__.py10100% 
   prompts.py841582%98, 141, 154, 197–201, 206–207, 216–217, 221, 223, 230
dynamiq/runnables
   __init__.py10100% 
   base.py45197%145
dynamiq/storages
   __init__.py00100% 
dynamiq/storages/vector
   __init__.py50100% 
   base.py50100% 
   exceptions.py60100% 
   policies.py60100% 
   utils.py20100% 
dynamiq/storages/vector/chroma
   __init__.py10100% 
   chroma.py18614919%10–11, 56–57, 60, 71, 89–91, 94, 96, 98–99, 101–102, 104, 106, 117–119, 123–124, 128, 140–141, 143–144, 154–155, 176–177, 183–184, 192, 237–239, 241–244, 246, 248, 252, 261–262, 284–285, 288–289, 292, 294–295, 298–300, 302–304, 306, 308–315, 317–319, 321–323, 325–327, 329, 338–341, 343–358, 360–361, 363–364, 366, 382–384, 387, 389, 402–403, 407, 423–425, 427, 429–430, 432, 446–449, 451–454, 459–460, 462–463, 465–466, 468–469, 471, 485–488, 490–491, 496–497, 499–500, 502–503, 505–506
dynamiq/storages/vector/milvus
   __init__.py10100% 
   filter.py34294%49, 102
   milvus.py108991%12, 55–57, 61, 97, 178, 299–300
dynamiq/storages/vector/pinecone
   __init__.py10100% 
   filters.py958015%22–24, 26–28, 45–50, 52–53, 55–56, 58–59, 75, 78, 80–88, 93, 95, 97, 114–116, 120, 122, 139–141, 145, 147, 164–166, 170, 172, 189–191, 195, 197, 214–216, 220, 222, 239–241, 245, 247, 264–265, 268, 270–273, 277, 279, 296–298, 300–303, 307, 309
   pinecone.py1645964%17, 81–83, 117, 131, 143, 145–146, 148, 161–163, 170, 172, 190–192, 197, 201–202, 212–214, 216–217, 221, 233, 237, 243–244, 246–247, 257–258, 271–273, 275–277, 279–281, 283, 290, 292–293, 302–303, 306–308, 324–325, 354, 357, 396–397
dynamiq/storages/vector/qdrant
   __init__.py10100% 
   converters.py411368%26, 28–30, 32–35, 67–68, 70–71, 75
   filters.py1333375%61–62, 65–66, 80–81, 91–92, 99, 111, 177–178, 186–187, 217–218, 227, 233–234, 261–262, 277, 282–283, 288, 293–294, 299, 304–305, 310, 315–316
   qdrant.py2677571%29, 240, 256, 280, 284, 302–303, 306, 335–336, 339–340, 380–381, 445, 485, 518–519, 523, 525–528, 540, 544–549, 591–594, 596–597, 633–634, 638, 640, 642–643, 666–668, 670, 672, 688–689, 694, 747–748, 761, 763, 768–769, 777, 779–780, 785, 787–789, 791–792, 794–795, 801, 803–804, 810, 843, 845, 897–898
dynamiq/storages/vector/weaviate
   __init__.py10100% 
   filters.py12610516%22–24, 26–28, 56–63, 65, 87–92, 94–99, 101–105, 107–108, 121–126, 140–142, 156–157, 159, 178–184, 188–189, 206–212, 216–217, 234–240, 244–245, 262–268, 272–273, 290–292, 294, 311–315, 343, 345, 351, 353–360, 362, 377–378
   weaviate.py16512723%17–18, 57–59, 67–68, 70, 80–81, 90–91, 103–105, 107–108, 110–111, 113, 125–126, 128, 130–133, 135, 137–139, 141–145, 147–148, 150, 158, 160, 172–174, 177–180, 195, 197–200, 203–204, 211–216, 228–229, 231–232, 244–245, 249–251, 267–271, 273, 279–284, 286, 292, 294, 311–316, 318, 321, 323–324, 330–337, 352–353, 355, 368–371, 373–375, 386–387, 389, 398–399, 418–419, 429, 457–459, 461–462, 473
dynamiq/types
   __init__.py10100% 
   document.py160100% 
   streaming.py37391%57, 65, 97
dynamiq/utils
   __init__.py20100% 
   duration.py11190%45
   env.py80100% 
   jsonpath.py461469%18–19, 41, 43, 50–51, 55, 59–61, 84, 94–96
   logger.py100100% 
   utils.py61788%32, 34, 94–96, 149–150
dynamiq/workflow
   __init__.py10100% 
   workflow.py671774%15, 31, 34, 55, 57–58, 61–63, 65, 82–83, 86, 91–94
examples
   __init__.py00100% 
examples/rag
   __init__.py00100% 
   dag_yaml.py37878%65–67, 74–75, 81, 86, 88
   utils.py190100% 
tests
   __init__.py00100% 
   conftest.py78198%83
tests/integration
   __init__.py00100% 
tests/integration/flows
   __init__.py00100% 
tests/integration/nodes
   __init__.py00100% 
tests/integration/nodes/audio
   __init__.py00100% 
tests/integration/nodes/llms
   __init__.py00100% 
tests/integration/nodes/operators
   __init__.py00100% 
tests/integration/nodes/tools
   __init__.py00100% 
tests/integration/nodes/validators
   __init__.py00100% 
tests/integration_with_creds
   __init__.py00100% 
TOTAL7568234269% 

Tests Skipped Failures Errors Time
245 0 💤 0 ❌ 0 🔥 2m 47s ⏱️

Please sign in to comment.