Skip to content

Commit

Permalink
Mock OpenAI API keys
Browse files Browse the repository at this point in the history
  • Loading branch information
shadeMe committed Feb 6, 2024
1 parent 248dfb0 commit 053b71f
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 21 deletions.
25 changes: 20 additions & 5 deletions integrations/deepeval/tests/test_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,10 @@ def eval(self, test_cases, metric):
return out


def test_evaluator_metric_init_params():
@patch("os.environ.get")
def test_evaluator_metric_init_params(os_environ_get):
os_environ_get.return_value = "abacab"

eval = DeepEvalEvaluator(DeepEvalMetric.ANSWER_RELEVANCY, metric_params={"model": "gpt-4-32k"})
assert eval._backend_metric.evaluation_model == "gpt-4-32k"

Expand All @@ -79,7 +82,10 @@ def test_evaluator_metric_init_params():
DeepEvalEvaluator(DeepEvalMetric.CONTEXTUAL_RECALL)


def test_evaluator_serde():
@patch("os.environ.get")
def test_evaluator_serde(os_environ_get):
os_environ_get.return_value = "abacab"

init_params = {
"metric": DeepEvalMetric.ANSWER_RELEVANCY,
"metric_params": {"model": "gpt-4-32k"},
Expand Down Expand Up @@ -127,7 +133,10 @@ def test_evaluator_serde():
),
],
)
def test_evaluator_valid_inputs(metric, inputs, params):
@patch("os.environ.get")
def test_evaluator_valid_inputs(os_environ_get, metric, inputs, params):
os_environ_get.return_value = "abacab"

init_params = {
"metric": metric,
"metric_params": params,
Expand Down Expand Up @@ -177,7 +186,10 @@ def test_evaluator_valid_inputs(metric, inputs, params):
),
],
)
def test_evaluator_invalid_inputs(metric, inputs, error_string, params):
@patch("os.environ.get")
def test_evaluator_invalid_inputs(os_environ_get, metric, inputs, error_string, params):
os_environ_get.return_value = "abacab"

with pytest.raises(ValueError, match=error_string):
init_params = {
"metric": metric,
Expand Down Expand Up @@ -236,7 +248,10 @@ def test_evaluator_invalid_inputs(metric, inputs, error_string, params):
),
],
)
def test_evaluator_outputs(metric, inputs, expected_outputs, metric_params):
@patch("os.environ.get")
def test_evaluator_outputs(os_environ_get, metric, inputs, expected_outputs, metric_params):
os_environ_get.return_value = "abacab"

init_params = {
"metric": metric,
"metric_params": metric_params,
Expand Down
29 changes: 13 additions & 16 deletions integrations/uptrain/tests/test_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,10 +118,8 @@ def test_evaluator_api(os_environ_get):
UpTrainEvaluator(UpTrainMetric.CONTEXT_RELEVANCE, api="uptrain")


@patch("os.environ.get")
def test_evaluator_metric_init_params(os_environ_get):
api_key = "test-api-key"
os_environ_get.return_value = api_key
def test_evaluator_metric_init_params(monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")

eval = UpTrainEvaluator(UpTrainMetric.CRITIQUE_TONE, metric_params={"llm_persona": "village idiot"})
assert eval._backend_metric.llm_persona == "village idiot"
Expand All @@ -136,9 +134,8 @@ def test_evaluator_metric_init_params(os_environ_get):
UpTrainEvaluator(UpTrainMetric.RESPONSE_MATCHING)


@patch("os.environ.get")
def test_evaluator_serde(os_environ_get):
os_environ_get.return_value = "abacab"
def test_evaluator_serde(monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")

init_params = {
"metric": UpTrainMetric.RESPONSE_MATCHING,
Expand Down Expand Up @@ -186,9 +183,9 @@ def test_evaluator_serde(os_environ_get):
(UpTrainMetric.RESPONSE_MATCHING, {"ground_truths": [], "responses": []}, {"method": "llm"}),
],
)
@patch("os.environ.get")
def test_evaluator_valid_inputs(os_environ_get, metric, inputs, params):
os_environ_get.return_value = "abacab"
def test_evaluator_valid_inputs(metric, inputs, params, monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")

init_params = {
"metric": metric,
"metric_params": params,
Expand All @@ -215,9 +212,9 @@ def test_evaluator_valid_inputs(os_environ_get, metric, inputs, params):
(UpTrainMetric.RESPONSE_RELEVANCE, {"responses": []}, "expected input parameter ", None),
],
)
@patch("os.environ.get")
def test_evaluator_invalid_inputs(os_environ_get, metric, inputs, error_string, params):
os_environ_get.return_value = "abacab"
def test_evaluator_invalid_inputs(metric, inputs, error_string, params, monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")

with pytest.raises(ValueError, match=error_string):
init_params = {
"metric": metric,
Expand Down Expand Up @@ -294,9 +291,9 @@ def test_evaluator_invalid_inputs(os_environ_get, metric, inputs, error_string,
),
],
)
@patch("os.environ.get")
def test_evaluator_outputs(os_environ_get, metric, inputs, expected_outputs, metric_params):
os_environ_get.return_value = "abacab"
def test_evaluator_outputs(metric, inputs, expected_outputs, metric_params, monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test-api-key")

init_params = {
"metric": metric,
"metric_params": metric_params,
Expand Down

0 comments on commit 053b71f

Please sign in to comment.