Skip to content

Commit

Permalink
make the tests independent from the log level
Browse files Browse the repository at this point in the history
  • Loading branch information
masci committed Apr 12, 2024
1 parent 5e13577 commit 14f796d
Show file tree
Hide file tree
Showing 4 changed files with 9 additions and 5 deletions.
2 changes: 1 addition & 1 deletion haystack/core/pipeline/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -849,7 +849,7 @@ def run(self, word: str):
) as span:
span.set_content_tag("haystack.component.input", last_inputs[name])

logger.info("Running component {name}", name=name)
logger.info("Running component {component_name}", component_name=name)
res = comp.run(**last_inputs[name])
self.graph.nodes[name]["visits"] += 1

Expand Down
4 changes: 3 additions & 1 deletion test/components/generators/chat/test_openai.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
import logging
import os

import pytest
from openai import OpenAIError
from haystack.utils.auth import Secret

from haystack.components.generators.chat import OpenAIChatGenerator
from haystack.components.generators.utils import print_streaming_chunk
from haystack.dataclasses import ChatMessage, StreamingChunk
from haystack.utils.auth import Secret


@pytest.fixture
Expand Down Expand Up @@ -191,6 +192,7 @@ def streaming_callback(chunk: StreamingChunk) -> None:
assert "Hello" in response["replies"][0].content # see mock_chat_completion_chunk

def test_check_abnormal_completions(self, caplog):
caplog.set_level(logging.INFO)
component = OpenAIChatGenerator(api_key=Secret.from_token("test-api-key"))
messages = [
ChatMessage.from_assistant(
Expand Down
6 changes: 4 additions & 2 deletions test/components/generators/test_openai.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import logging
import os
from typing import List
from haystack.utils.auth import Secret

import pytest
from openai import OpenAIError

from haystack.components.generators import OpenAIGenerator
from haystack.components.generators.utils import print_streaming_chunk
from haystack.dataclasses import StreamingChunk, ChatMessage
from haystack.dataclasses import ChatMessage, StreamingChunk
from haystack.utils.auth import Secret


class TestOpenAIGenerator:
Expand Down Expand Up @@ -181,6 +182,7 @@ def test_run_with_params(self, mock_chat_completion):
assert [isinstance(reply, str) for reply in response["replies"]]

def test_check_abnormal_completions(self, caplog):
caplog.set_level(logging.INFO)
component = OpenAIGenerator(api_key=Secret.from_token("test-api-key"))

# underlying implementation uses ChatMessage objects so we have to use them here
Expand Down
2 changes: 1 addition & 1 deletion test/test_logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ def test_log_filtering_when_using_debug(self, capfd: CaptureFixture) -> None:
haystack_logging.configure_logging(use_json=False)

logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.debug("Hello, structured logging!", extra={"key1": "value1", "key2": "value2"})

# Use `capfd` to capture the output of the final structlog rendering result
Expand Down Expand Up @@ -372,7 +373,6 @@ def test_correct_stack_level_with_stdlib_rendering(
# the pytest fixture caplog only captures logs being rendered from the stdlib logging module
assert caplog.messages == ["Hello, structured logging!"]
assert caplog.records[0].name == "test.test_logging"
assert caplog.records[0].lineno == 370

# Nothing should be captured by capfd since structlog is not configured
assert capfd.readouterr().err == ""
Expand Down

0 comments on commit 14f796d

Please sign in to comment.