Skip to content

Commit

Permalink
feat: Added support for moderations in generation_info (#42)
Browse files Browse the repository at this point in the history
* feat: Added support for moderations in generation_info
  • Loading branch information
MateuszOssGit authored Nov 19, 2024
1 parent 3b34b94 commit ec5b5f0
Show file tree
Hide file tree
Showing 4 changed files with 160 additions and 131 deletions.
56 changes: 33 additions & 23 deletions libs/ibm/langchain_ibm/llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,19 +318,24 @@ def _get_chat_params(

def _create_llm_result(self, response: List[dict]) -> LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
for res in response:
results = res.get("results")
if results:
finish_reason = results[0].get("stop_reason")
gen = Generation(
text=results[0].get("generated_text"),
generation_info={"finish_reason": finish_reason},
generations = [
[
Generation(
text=result.get("generated_text", ""),
generation_info={"finish_reason": result.get("stop_reason")}
| (
{"moderations": moderations}
if (moderations := result.get("moderations"))
else {}
),
)
generations.append([gen])
final_token_usage = self._extract_token_usage(response)
]
for res in response
if (results := res.get("results"))
for result in results
]
llm_output = {
"token_usage": final_token_usage,
"token_usage": self._extract_token_usage(response),
"model_id": self.model_id,
"deployment_id": self.deployment_id,
}
Expand All @@ -341,22 +346,27 @@ def _stream_response_to_generation_chunk(
stream_response: Dict[str, Any],
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
if not stream_response["results"]:
result = stream_response.get("results", [{}])[0]
if not result:
return GenerationChunk(text="")

finish_reason = stream_response["results"][0].get("stop_reason", None)
finish_reason = result.get("stop_reason")
finish_reason = None if finish_reason == "not_finished" else finish_reason

generation_info = {
"finish_reason": finish_reason,
"llm_output": {
"model_id": self.model_id,
"deployment_id": self.deployment_id,
},
}

if moderations := result.get("moderations"):
generation_info["moderations"] = moderations

return GenerationChunk(
text=stream_response["results"][0]["generated_text"],
generation_info=dict(
finish_reason=(
None if finish_reason == "not_finished" else finish_reason
),
llm_output={
"model_id": self.model_id,
"deployment_id": self.deployment_id,
},
),
text=result.get("generated_text", ""),
generation_info=generation_info,
)

def _call(
Expand Down
Loading

0 comments on commit ec5b5f0

Please sign in to comment.