Skip to content

feat: support metadata output from LLM #685

feat: support metadata output from LLM

feat: support metadata output from LLM #685

GitHub Actions / JUnit Test Report failed Jan 15, 2025 in 0s

245 tests run, 230 passed, 9 skipped, 6 failed.

Annotations

Check failure on line 21 in packages/ragbits-conversations/tests/unit/history/test_llm_compressor.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_llm_compressor.test_messages_included

TypeError: string indices must be integers
Raw output
async def test_messages_included():
        conversation: ChatFormat = [
            {"role": "user", "content": "foo1"},
            {"role": "assistant", "content": "foo2"},
            {"role": "user", "content": "foo3"},
        ]
        llm = MockLLM(default_options=MockLLMOptions(response="some answer"))
        compressor = StandaloneMessageCompressor(llm)
>       answer = await compressor.compress(conversation)

packages/ragbits-conversations/tests/unit/history/test_llm_compressor.py:21: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
packages/ragbits-conversations/src/ragbits/conversations/history/compressors/llm.py:85: in compress
    response = await self._llm.generate(prompt)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <ragbits.core.llms.mock.MockLLM object at 0x7fb7576bc6d0>
prompt = <ragbits.conversations.history.compressors.llm.StandaloneMessageCompressorPrompt object at 0x7fb7576a8bb0>

    async def generate(
        self,
        prompt: BasePrompt,
        *,
        options: LLMClientOptionsT | None = None,
    ) -> OutputT:
        """
        Prepares and sends a prompt to the LLM and returns response parsed to the
        output type of the prompt (if available).
    
        Args:
            prompt: Formatted prompt template with conversation and optional response parsing configuration.
            options: Options to use for the LLM client.
    
        Returns:
            Text response from LLM.
        """
        response = await self.generate_raw(prompt, options=options)
>       response_str = cast(str, response["response"])
E       TypeError: string indices must be integers

packages/ragbits-core/src/ragbits/core/llms/base.py:126: TypeError

Check failure on line 59 in packages/ragbits-conversations/tests/unit/history/test_llm_compressor.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_llm_compressor.test_history_len

TypeError: string indices must be integers
Raw output
async def test_history_len():
        conversation: ChatFormat = [
            {"role": "user", "content": "foo1"},
            {"role": "assistant", "content": "foo2"},
            {"role": "user", "content": "foo3"},
            {"role": "user", "content": "foo4"},
            {"role": "user", "content": "foo5"},
        ]
        llm = MockLLM()
        compressor = StandaloneMessageCompressor(llm, history_len=3)
>       await compressor.compress(conversation)

packages/ragbits-conversations/tests/unit/history/test_llm_compressor.py:59: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
packages/ragbits-conversations/src/ragbits/conversations/history/compressors/llm.py:85: in compress
    response = await self._llm.generate(prompt)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <ragbits.core.llms.mock.MockLLM object at 0x7fb7576097e0>
prompt = <ragbits.conversations.history.compressors.llm.StandaloneMessageCompressorPrompt object at 0x7fb75760ad70>

    async def generate(
        self,
        prompt: BasePrompt,
        *,
        options: LLMClientOptionsT | None = None,
    ) -> OutputT:
        """
        Prepares and sends a prompt to the LLM and returns response parsed to the
        output type of the prompt (if available).
    
        Args:
            prompt: Formatted prompt template with conversation and optional response parsing configuration.
            options: Options to use for the LLM client.
    
        Returns:
            Text response from LLM.
        """
        response = await self.generate_raw(prompt, options=options)
>       response_str = cast(str, response["response"])
E       TypeError: string indices must be integers

packages/ragbits-core/src/ragbits/core/llms/base.py:126: TypeError

Check failure on line 88 in packages/ragbits-conversations/tests/unit/history/test_llm_compressor.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_llm_compressor.test_only_user_and_assistant_messages_in_history

TypeError: string indices must be integers
Raw output
async def test_only_user_and_assistant_messages_in_history():
        conversation: ChatFormat = [
            {"role": "user", "content": "foo4"},
            {"role": "system", "content": "foo1"},
            {"role": "unknown", "content": "foo2"},
            {"role": "assistant", "content": "foo3"},
            {"role": "user", "content": "foo4"},
            {"role": "assistant", "content": "foo5"},
            {"role": "user", "content": "foo6"},
        ]
        llm = MockLLM()
        compressor = StandaloneMessageCompressor(llm, history_len=4)
>       await compressor.compress(conversation)

packages/ragbits-conversations/tests/unit/history/test_llm_compressor.py:88: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
packages/ragbits-conversations/src/ragbits/conversations/history/compressors/llm.py:85: in compress
    response = await self._llm.generate(prompt)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <ragbits.core.llms.mock.MockLLM object at 0x7fb757683f40>
prompt = <ragbits.conversations.history.compressors.llm.StandaloneMessageCompressorPrompt object at 0x7fb7576815d0>

    async def generate(
        self,
        prompt: BasePrompt,
        *,
        options: LLMClientOptionsT | None = None,
    ) -> OutputT:
        """
        Prepares and sends a prompt to the LLM and returns response parsed to the
        output type of the prompt (if available).
    
        Args:
            prompt: Formatted prompt template with conversation and optional response parsing configuration.
            options: Options to use for the LLM client.
    
        Returns:
            Text response from LLM.
        """
        response = await self.generate_raw(prompt, options=options)
>       response_str = cast(str, response["response"])
E       TypeError: string indices must be integers

packages/ragbits-core/src/ragbits/core/llms/base.py:126: TypeError

Check failure on line 108 in packages/ragbits-conversations/tests/unit/history/test_llm_compressor.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_llm_compressor.test_changing_prompt

TypeError: string indices must be integers
Raw output
async def test_changing_prompt():
        conversation: ChatFormat = [
            {"role": "user", "content": "foo1"},
            {"role": "assistant", "content": "foo2"},
            {"role": "user", "content": "foo3"},
        ]
        llm = MockLLM()
        compressor = StandaloneMessageCompressor(llm, prompt=MockPrompt)
>       await compressor.compress(conversation)

packages/ragbits-conversations/tests/unit/history/test_llm_compressor.py:108: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
packages/ragbits-conversations/src/ragbits/conversations/history/compressors/llm.py:85: in compress
    response = await self._llm.generate(prompt)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <ragbits.core.llms.mock.MockLLM object at 0x7fb75776e4d0>
prompt = <test_llm_compressor.MockPrompt object at 0x7fb75776efe0>

    async def generate(
        self,
        prompt: BasePrompt,
        *,
        options: LLMClientOptionsT | None = None,
    ) -> OutputT:
        """
        Prepares and sends a prompt to the LLM and returns response parsed to the
        output type of the prompt (if available).
    
        Args:
            prompt: Formatted prompt template with conversation and optional response parsing configuration.
            options: Options to use for the LLM client.
    
        Returns:
            Text response from LLM.
        """
        response = await self.generate_raw(prompt, options=options)
>       response_str = cast(str, response["response"])
E       TypeError: string indices must be integers

packages/ragbits-core/src/ragbits/core/llms/base.py:126: TypeError

Check failure on line 88 in packages/ragbits-core/tests/unit/llms/test_litellm.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_litellm.test_generation_with_parser

assert {'completion_tokens': 20, 'prompt_tokens': 10, 'response': "I'm fine, thank you.", 'total_tokens': 30} == "I'm fine, thank you."
Raw output
async def test_generation_with_parser():
        """Test generation of a response with a parser."""
        llm = LiteLLM(api_key="test_key")
        prompt = MockPromptWithParser("Hello, how are you?")
        options = LiteLLMOptions(mock_response="I'm fine, thank you.")
        output = await llm.generate(prompt, options=options)
        assert output == 42
        raw_output = await llm.generate_raw(prompt, options=options)
>       assert raw_output == "I'm fine, thank you."
E       assert {'completion_tokens': 20, 'prompt_tokens': 10, 'response': "I'm fine, thank you.", 'total_tokens': 30} == "I'm fine, thank you."

packages/ragbits-core/tests/unit/llms/test_litellm.py:88: AssertionError

Check failure on line 120 in packages/ragbits-core/tests/unit/llms/test_litellm.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_litellm.test_generation_with_static_prompt_with_parser

AssertionError: assert {'completion_tokens': 20, 'prompt_tokens': 10, 'response': '42', 'total_tokens': 30} == '42'
Raw output
async def test_generation_with_static_prompt_with_parser():
        """Test generation of a response with a static prompt with a parser."""
    
        class StaticPromptWithParser(Prompt[None, int]):
            """A static prompt."""
    
            user_prompt = "Hello, how are you?"
    
        llm = LiteLLM(api_key="test_key")
        prompt = StaticPromptWithParser()
        options = LiteLLMOptions(mock_response="42")
        output = await llm.generate(prompt, options=options)
        assert output == 42
        raw_output = await llm.generate_raw(prompt, options=options)
>       assert raw_output == "42"
E       AssertionError: assert {'completion_tokens': 20, 'prompt_tokens': 10, 'response': '42', 'total_tokens': 30} == '42'

packages/ragbits-core/tests/unit/llms/test_litellm.py:120: AssertionError