-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
fix: Reduced warnings in distance calculations
- added np errstate context - added clips for non negative sqrt - added clips for positive log
- Loading branch information
Showing
4 changed files
with
1,579 additions
and
35 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,47 @@ | ||
import numpy as np | ||
import pandas as pd | ||
from .distances import Distance | ||
|
||
|
||
class MetricEvaluator: | ||
def __init__(self, metrics=None): | ||
self.metrics = metrics or [ | ||
"euclidean", | ||
"manhattan", | ||
"canberra", | ||
"chebyshev", | ||
"cosine", | ||
] | ||
self.distance_calculator = Distance() | ||
|
||
def evaluate_across_quantiles(self, X, y, quantiles=4): | ||
quantile_indices = np.array_split(np.argsort(X, axis=0), quantiles) | ||
best_metrics = {} | ||
|
||
for q, indices in enumerate(quantile_indices): | ||
X_q, y_q = X[indices], y[indices] | ||
results = self.evaluate(X_q, y_q) | ||
best_metric = max(results, key=results.get) | ||
best_metrics[q] = best_metric | ||
|
||
return best_metrics | ||
|
||
def evaluate(self, X, y): | ||
results = {} | ||
for metric in self.metrics: | ||
# Example: Calculate some performance metric for each distance | ||
# This could be accuracy, computation time, etc. | ||
performance = self._evaluate_metric(X, y, metric) | ||
results[metric] = performance | ||
return results | ||
|
||
def _evaluate_metric(self, X, y, metric): | ||
# Implement the logic to evaluate the performance of a given metric | ||
# This is a placeholder for demonstration purposes | ||
distances = [] | ||
for i in range(len(X)): | ||
for j in range(i + 1, len(X)): | ||
dist = getattr(self.distance_calculator, metric)(X[i], X[j]) | ||
distances.append(dist) | ||
# Return some evaluation metric, e.g., mean distance | ||
return np.mean(distances) |
Oops, something went wrong.