Skip to content

Commit

Permalink
fmt
Browse files Browse the repository at this point in the history
  • Loading branch information
baskaryan committed Jan 22, 2025
1 parent 6124968 commit 6111ce7
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 15 deletions.
4 changes: 3 additions & 1 deletion python/docs/create_api_rst.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,9 @@ def _load_module_members(module_path: str, namespace: str) -> ModuleMembers:
else (
"enum"
if issubclass(type_, Enum)
else "Pydantic" if issubclass(type_, BaseModel) else "Regular"
else "Pydantic"
if issubclass(type_, BaseModel)
else "Regular"
)
)
classes_.append(
Expand Down
20 changes: 6 additions & 14 deletions python/tests/evaluation/test_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,32 +242,24 @@ async def apredict(inputs: dict) -> dict:
if _has_pandas():
df = results.to_pandas()
assert len(df) == 10
examples = client.list_examples(dataset_name=dataset.name)
all_examples = list(client.list_examples(dataset_name=dataset.name))
all_results = [r async for r in results]
all_examples = []
for example in examples:
count = 0
for r in all_results:
if r["run"].reference_example_id == example.id:
count += 1
assert count == 2
all_examples.append(example)

# Wait for there to be 2x runs vs. examples

# Wait for there to be same num runs vs. examples
def check_run_count():
current_runs = list(
client.list_runs(project_name=results.experiment_name, is_root=True)
)
for r in current_runs:
assert "accuracy" in r.feedback_stats
assert "slow_accuracy" in r.feedback_stats
return current_runs, len(current_runs) == 2 * len(all_examples)
return current_runs, len(current_runs) == len(all_examples)

final_runs = wait_for(check_run_count, max_sleep_time=60, sleep_time=2)

assert len(final_runs) == 2 * len(
assert len(final_runs) == len(
all_examples
), f"Expected {2 * len(all_examples)} runs, but got {len(final_runs)}"
), f"Expected {len(all_examples)} runs, but got {len(final_runs)}"

# Run it again with the existing project
results2 = await aevaluate(
Expand Down

0 comments on commit 6111ce7

Please sign in to comment.