Skip to content

Commit

Permalink
fixing an issue with sentenc construction
Browse files Browse the repository at this point in the history
  • Loading branch information
iterix committed Oct 23, 2023
1 parent 9aca3c8 commit 7acab79
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 2 deletions.
15 changes: 14 additions & 1 deletion auditor/evaluation/expected_behavior.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from auditor.utils.progress_logger import ProgressLogger
from auditor.utils.similarity import compute_similarity
from auditor.utils.logging import get_logger
from auditor.utils.format import construct_llm_input

FAILED_TEST = 0
PASSED_TEST = 1
Expand Down Expand Up @@ -228,6 +229,10 @@ def check(
post_context: Optional[str],
) -> List[Tuple[bool, Dict[str, float]]]:
test_results = []
progress_bar = ProgressLogger(
total_steps=len(perturbed_generations),
description=f"Grading responses with {self.grading_model}"
)
for peturbed_gen in perturbed_generations:
try:
rationale, test_status = self._grade(
Expand All @@ -241,9 +246,13 @@ def check(
self.metric_key: rationale,
}
test_results.append((test_status, score_dict))
progress_bar.update()
except Exception as e:
# LOG.error('Unable to complete semanatic similarity checks')
progress_bar.close()
raise e

progress_bar.close()
return test_results

def _grade(
Expand All @@ -254,7 +263,11 @@ def _grade(
pre_context: Optional[str],
post_context: Optional[str],
):
query = pre_context + prompt + post_context
query = construct_llm_input(
prompt=prompt,
pre_context=pre_context,
post_context=post_context,
)
grading_str = (
f'Given the following context and question are the following two answers factually same?' # noqa: E501
f'If the reponses provide different details when asked a question they must be flagged as different.\n' # noqa: E501
Expand Down
15 changes: 15 additions & 0 deletions auditor/utils/format.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
from typing import Optional

def construct_llm_input(
prompt: str,
pre_context: Optional[str],
post_context: Optional[str],
delimiter: str = " ",
) -> str:
if pre_context is not None:
full_prompt = pre_context + delimiter + prompt
else:
full_prompt = prompt
if post_context is not None:
full_prompt += delimiter + post_context
return full_prompt
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "fiddler-auditor"
version = "0.0.3"
version = "0.0.4.rc0"
authors = [
{ name="Fiddler Labs", email="[email protected]" },
]
Expand Down

0 comments on commit 7acab79

Please sign in to comment.