diff --git a/.gitignore b/.gitignore index 4ff2fd406..78deb1a61 100644 --- a/.gitignore +++ b/.gitignore @@ -212,3 +212,4 @@ ee/experimental/ai_temp/* oauth2.cfg scripts/keep_slack_bot.py keepnew.db +providers_cache.json diff --git a/docker/Dockerfile.api b/docker/Dockerfile.api index 9e000384e..8342b29e8 100644 --- a/docker/Dockerfile.api +++ b/docker/Dockerfile.api @@ -24,12 +24,15 @@ COPY examples examples COPY README.md README.md RUN poetry build && /venv/bin/pip install --use-deprecated=legacy-resolver dist/*.whl + FROM base as final ENV PATH="/venv/bin:${PATH}" ENV VIRTUAL_ENV="/venv" ENV EE_PATH="ee" COPY --from=builder /venv /venv COPY --from=builder /app/examples /examples +# Build the providers cache +RUN keep provider build_cache # as per Openshift guidelines, https://docs.openshift.com/container-platform/4.11/openshift_images/create-images.html#use-uid_create-images RUN chgrp -R 0 /app && chmod -R g=u /app RUN chown -R keep:keep /app diff --git a/ee/experimental/__init__.py b/ee/experimental/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/ee/experimental/ai_temp/.gitkeep b/ee/experimental/ai_temp/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/ee/experimental/generative_utils.py b/ee/experimental/generative_utils.py deleted file mode 100644 index 5689eb7c0..000000000 --- a/ee/experimental/generative_utils.py +++ /dev/null @@ -1,239 +0,0 @@ -import logging -import os - -import numpy as np -from openai import OpenAI - -from keep.api.core.db import get_incident_by_id - -from keep.api.models.db.alert import Incident - -logger = logging.getLogger(__name__) - -SUMMARY_GENERATOR_VERBOSE_NAME = "Summary generator v0.1" -NAME_GENERATOR_VERBOSE_NAME = "Name generator v0.1" -MAX_SUMMARY_LENGTH = 900 -MAX_NAME_LENGTH = 75 - -def generate_incident_summary( - incident: Incident, - use_n_alerts_for_summary: int = -1, - generate_summary: str = None, - max_summary_length: int = None, -) -> str: - if "OPENAI_API_KEY" not in os.environ: - logger.error( - "OpenAI API key is not set. Incident summary generation is not available.", - extra={"algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, - "incident_id": incident.id, "tenant_id": incident.tenant_id} - ) - return "" - - if not generate_summary: - generate_summary = os.environ.get("GENERATE_INCIDENT_SUMMARY", "True") - - if generate_summary == "False": - logger.info(f"Incident summary generation is disabled. Aborting.", - extra={"algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) - return "" - - if incident.user_summary: - return "" - - if not max_summary_length: - max_summary_length = os.environ.get( - "MAX_SUMMARY_LENGTH", MAX_SUMMARY_LENGTH) - - try: - client = OpenAI(api_key=os.environ["OPENAI_API_KEY"]) - - incident = get_incident_by_id(incident.tenant_id, incident.id) - - description_strings = np.unique( - [f'{alert.event["name"]}' for alert in incident.alerts] - ).tolist() - - if use_n_alerts_for_summary > 0: - incident_description = "\n".join( - description_strings[:use_n_alerts_for_summary] - ) - else: - incident_description = "\n".join(description_strings) - - timestamps = [alert.timestamp for alert in incident.alerts] - incident_start = min(timestamps).replace(microsecond=0) - incident_end = max(timestamps).replace(microsecond=0) - - model = os.environ.get("OPENAI_MODEL", "gpt-4o-mini") - - summary = ( - client.chat.completions.create( - model=model, - messages=[ - { - "role": "system", - "content": f"""You are a very skilled DevOps specialist who can summarize any incident based on alert descriptions. - When provided with information, summarize it in a 2-3 sentences explaining what happened and when. - ONLY SUMMARIZE WHAT YOU SEE. In the end add information about potential scenario of the incident. - When provided with information, answer with max a {int(max_summary_length * 0.9)} symbols excerpt - describing incident thoroughly. - - EXAMPLE: - An incident occurred between 2022-11-17 14:11:04 and 2022-11-22 22:19:04, involving a - total of 200 alerts. The alerts indicated critical and warning issues such as high CPU and memory - usage in pods and nodes, as well as stuck Kubernetes Daemonset rollout. Potential incident scenario: - Kubernetes Daemonset rollout stuck due to high CPU and memory usage in pods and nodes. This caused a - long tail of alerts on various topics.""", - }, - { - "role": "user", - "content": f"""Here are alerts of an incident for summarization:\n{incident_description}\n This incident started on - {incident_start}, ended on {incident_end}, included {incident.alerts_count} alerts.""", - }, - ], - ) - .choices[0] - .message.content - ) - - logger.info(f"Generated incident summary with length {len(summary)} symbols", - extra={"algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) - - if len(summary) > max_summary_length: - logger.info(f"Generated incident summary is too long. Applying smart truncation", - extra={"algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) - - summary = ( - client.chat.completions.create( - model=model, - messages=[ - { - "role": "system", - "content": f"""You are a very skilled DevOps specialist who can summarize any incident based on a description. - When provided with information, answer with max a {int(max_summary_length * 0.9)} symbols excerpt describing - incident thoroughly. - """, - }, - { - "role": "user", - "content": f"""Here is the description of an incident for summarization:\n{summary}""", - }, - ], - ) - .choices[0] - .message.content - ) - - logger.info(f"Generated new incident summary with length {len(summary)} symbols", - extra={"algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) - - if len(summary) > max_summary_length: - logger.info(f"Generated incident summary is too long. Applying hard truncation", - extra={"algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) - summary = summary[: max_summary_length] - - return summary - except Exception as e: - logger.error(f"Error in generating incident summary: {e}", - extra={"algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) - return "" - - -def generate_incident_name(incident: Incident, generate_name: str = None, max_name_length: int = None, use_n_alerts_for_name: int = -1) -> str: - if "OPENAI_API_KEY" not in os.environ: - logger.error( - "OpenAI API key is not set. Incident name generation is not available.", - extra={"algorithm": NAME_GENERATOR_VERBOSE_NAME, - "incident_id": incident.id, "tenant_id": incident.tenant_id} - ) - return "" - - if not generate_name: - generate_name = os.environ.get("GENERATE_INCIDENT_NAME", "True") - - if generate_name == "False": - logger.info(f"Incident name generation is disabled. Aborting.", - extra={"algorithm": NAME_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) - return "" - - if incident.user_generated_name: - return "" - - if not max_name_length: - max_name_length = os.environ.get( - "MAX_NAME_LENGTH", MAX_NAME_LENGTH) - - try: - client = OpenAI(api_key=os.environ["OPENAI_API_KEY"]) - - incident = get_incident_by_id(incident.tenant_id, incident.id) - - description_strings = np.unique( - [f'{alert.event["name"]}' for alert in incident.alerts]).tolist() - - if use_n_alerts_for_name > 0: - incident_description = "\n".join( - description_strings[:use_n_alerts_for_name]) - else: - incident_description = "\n".join(description_strings) - - timestamps = [alert.timestamp for alert in incident.alerts] - incident_start = min(timestamps).replace(microsecond=0) - - model = os.environ.get("OPENAI_MODEL", "gpt-4o-mini") - - name = client.chat.completions.create(model=model, messages=[ - { - "role": "system", - "content": f"""You are a very skilled DevOps specialist who can name any incident based on alert descriptions. - When provided with information, output a short descriptive name of incident that could cause these alerts. - Add information about start time to the name. ONLY USE WHAT YOU SEE. Answer with max a {int(max_name_length * 0.9)} - symbols excerpt. - - EXAMPLE: - Kubernetes rollout stuck (started on 2022.11.17 14:11)""" - }, - { - "role": "user", - "content": f"""This incident started on {incident_start}. - Here are alerts of an incident:\n{incident_description}\n""" - } - ]).choices[0].message.content - - logger.info(f"Generated incident name with length {len(name)} symbols", - extra={"incident_id": incident.id, "tenant_id": incident.tenant_id}) - - if len(name) > max_name_length: - logger.info(f"Generated incident name is too long. Applying smart truncation", - extra={"algorithm": NAME_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) - - name = client.chat.completions.create(model=model, messages=[ - { - "role": "system", - "content": f"""You are a very skilled DevOps specialist who can name any incident based on a description. - Add information about start time to the name.When provided with information, answer with max a - {int(max_name_length * 0.9)} symbols. - - EXAMPLE: - Kubernetes rollout stuck (started on 2022.11.17 14:11)""" - }, - { - "role": "user", - "content": f"""This incident started on {incident_start}. - Here is the description of an incident to name:\n{name}.""" - } - ]).choices[0].message.content - - logger.info(f"Generated new incident name with length {len(name)} symbols", - extra={"algorithm": NAME_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) - - if len(name) > max_name_length: - logger.info(f"Generated incident name is too long. Applying hard truncation", - extra={"algorithm": NAME_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) - name = name[: max_name_length] - - return name - except Exception as e: - logger.error(f"Error in generating incident name: {e}", - extra={"algorithm": NAME_GENERATOR_VERBOSE_NAME, "incident_id": incident.id, "tenant_id": incident.tenant_id}) - return "" diff --git a/ee/experimental/graph_utils.py b/ee/experimental/graph_utils.py deleted file mode 100644 index 8916a69a1..000000000 --- a/ee/experimental/graph_utils.py +++ /dev/null @@ -1,122 +0,0 @@ -import logging -from typing import List, Tuple - -import networkx as nx -import numpy as np - -logger = logging.getLogger(__name__) - - -def detect_knee_1d_auto_increasing(y: List[float]) -> Tuple[int, float]: - """ - This function detects the knee point in an increasing 1D curve. Knee point is the point where a curve - starts to flatten out (https://en.wikipedia.org/wiki/Knee_of_a_curve). - - Parameters: - y (List[float]): a list of float values - - Returns: - tuple: knee_index, knee_y - """ - - def detect_knee_1d( - y: List[float], curve: str, direction: str = "increasing" - ) -> Tuple[int, float, List[float]]: - x = np.arange(len(y)) - - x_norm = (x - np.min(x)) / (np.max(x) - np.min(x)) - y_norm = (y - np.min(y)) / (np.max(y) - np.min(y)) - - diff_curve = y_norm - x_norm - - if curve == "concave": - knee_index = np.argmax(diff_curve) - else: - knee_index = np.argmin(diff_curve) - - knee_y = y[knee_index] - - return knee_index, knee_y, diff_curve - - knee_index_concave, knee_y_concave, diff_curve_concave = detect_knee_1d( - y, "concave" - ) - knee_index_convex, knee_y_convex, diff_curve_convex = detect_knee_1d(y, "convex") - max_diff_concave = np.max(np.abs(diff_curve_concave)) - max_diff_convex = np.max(np.abs(diff_curve_convex)) - - if max_diff_concave > max_diff_convex: - return knee_index_concave, knee_y_concave - else: - return knee_index_convex, knee_y_convex - - -def create_graph( - tenant_id: str, - fingerprints: List[str], - pmi_values: np.ndarray, - fingerprint2idx: dict, - pmi_threshold: float = 0.0, - delete_nodes: bool = False, - knee_threshold: float = 0.8, -) -> nx.Graph: - """ - This function creates a graph from a list of fingerprints. The graph is created based on the PMI values between - the fingerprints. The edges are created between the fingerprints that have a PMI value greater than the threshold. - The nodes are removed if the knee point of the PMI values of the edges connected to the node is less than the threshold. - - Parameters: - tenant_id (str): tenant id - fingerprints (List[str]): a list of fingerprints - pmi_threshold (float): PMI threshold - knee_threshold (float): knee threshold - - Returns: - nx.Graph: a graph - """ - graph = nx.Graph() - - if len(fingerprints) == 1: - graph.add_node(fingerprints[0]) - return graph - - logger.info("Creating alert graph edges", extra={"tenant_id": tenant_id}) - - for idx_i, fingerprint_i in enumerate(fingerprints): - if fingerprint_i not in fingerprint2idx: - continue - - for idx_j in range(idx_i + 1, len(fingerprints)): - fingerprint_j = fingerprints[idx_j] - - if fingerprint_j not in fingerprint2idx: - continue - - weight = pmi_values[ - fingerprint2idx[fingerprint_i], fingerprint2idx[fingerprint_j] - ] - - if weight > pmi_threshold: - graph.add_edge(fingerprint_i, fingerprint_j, weight=weight) - - if delete_nodes: - nodes_to_delete = [] - logger.info( - "Preparing candidate nodes for deletion", extra={"tenant_id": tenant_id} - ) - - for node in graph.nodes: - weights = sorted([edge["weight"] for edge in graph[node].values()]) - - knee_index, knee_statistic = detect_knee_1d_auto_increasing(weights) - - if knee_statistic < knee_threshold: - nodes_to_delete.append(node) - - logger.info( - f"Removing nodes from graph, {len(nodes_to_delete)} nodes will be removed, {len(graph.nodes) - len(nodes_to_delete)} nodes will be left", - extra={"tenant_id": tenant_id}, - ) - graph.remove_nodes_from(nodes_to_delete) - - return graph diff --git a/ee/experimental/incident_utils.py b/ee/experimental/incident_utils.py deleted file mode 100644 index fd8420a15..000000000 --- a/ee/experimental/incident_utils.py +++ /dev/null @@ -1,250 +0,0 @@ -import logging -from datetime import datetime, timedelta -from typing import Any, Dict, List, Set, Tuple - -from arq.connections import ArqRedis - -from ee.experimental.generative_utils import ( - NAME_GENERATOR_VERBOSE_NAME, - SUMMARY_GENERATOR_VERBOSE_NAME, - generate_incident_name, - generate_incident_summary, -) -from keep.api.core.db import ( - add_alerts_to_incident_by_incident_id, - create_incident_from_dict, - get_incident_by_id, - update_incident_name, - update_incident_summary, -) -from keep.api.models.db.alert import Alert, Incident - -logger = logging.getLogger(__name__) - -ALGORITHM_VERBOSE_NAME = "Correlation algorithm v0.2" -USE_N_HISTORICAL_ALERTS_MINING = 10e4 -USE_N_HISTORICAL_ALERTS_PMI = 10e4 -USE_N_HISTORICAL_INCIDENTS = 10e4 -MIN_ALERT_NUMBER = 100 -INCIDENT_VALIDITY_THRESHOLD = 3600 -ALERT_VALIDITY_THRESHOLD = 3600 -# We assume that incident / alert validity threshold is greater than a size of a batch -STRIDE_DENOMINATOR = 4 -DEFAULT_TEMP_DIR_LOCATION = "./ee/experimental/ai_temp" -PMI_SLIDING_WINDOW = 3600 - - -def update_existing_incident( - incident: Incident, alerts: List[Alert] -) -> Tuple[str, bool]: - add_alerts_to_incident_by_incident_id(incident.tenant_id, incident.id, alerts) - return incident.id, True - - -def create_new_incident( - component: Set[str], alerts: List[Alert], tenant_id: str -) -> Tuple[str, bool]: - incident_start_time = min( - alert.timestamp for alert in alerts if alert.fingerprint in component - ) - incident_start_time = incident_start_time.replace(microsecond=0) - - incident = create_incident_from_dict( - tenant_id, - { - "ai_generated_name": f"Incident started at {incident_start_time}", - "generated_summary": "Summarization is Disabled", - "is_predicted": True, - }, - ) - add_alerts_to_incident_by_incident_id( - tenant_id, - incident.id, - [alert.id for alert in alerts if alert.fingerprint in component], - ) - return incident.id, False - - -async def schedule_incident_processing( - pool: ArqRedis, tenant_id: str, incident_id: str -) -> None: - job_summary = await pool.enqueue_job( - "process_summary_generation", - tenant_id=tenant_id, - incident_id=incident_id, - ) - logger.info( - f"Summary generation for incident {incident_id} scheduled, job: {job_summary}", - extra={ - "algorithm": SUMMARY_GENERATOR_VERBOSE_NAME, - "tenant_id": tenant_id, - "incident_id": incident_id, - }, - ) - - job_name = await pool.enqueue_job( - "process_name_generation", tenant_id=tenant_id, incident_id=incident_id - ) - logger.info( - f"Name generation for incident {incident_id} scheduled, job: {job_name}", - extra={ - "algorithm": NAME_GENERATOR_VERBOSE_NAME, - "tenant_id": tenant_id, - "incident_id": incident_id, - }, - ) - - -def is_incident_accepting_updates( - incident: Incident, current_time: datetime, incident_validity_threshold: timedelta -) -> bool: - return current_time - incident.last_seen_time < incident_validity_threshold - - -def get_component_first_seen_time(component: Set[str], alerts: List[Alert]) -> datetime: - return min(alert.timestamp for alert in alerts if alert.fingerprint in component) - - -def process_graph_component( - component: Set[str], - batch_incidents: List[Incident], - batch_alerts: List[Alert], - batch_fingerprints: Set[str], - tenant_id: str, - min_incident_size: int, - incident_validity_threshold: timedelta, -) -> Tuple[str, bool]: - is_component_merged = False - for incident in batch_incidents: - incident_fingerprints = set(alert.fingerprint for alert in incident.alerts) - if incident_fingerprints.issubset(component): - if not incident_fingerprints.intersection(batch_fingerprints): - continue - logger.info( - f"Found possible extension for incident {incident.id}", - extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}, - ) - - amendment_time = get_component_first_seen_time(component, batch_alerts) - if is_incident_accepting_updates( - incident, amendment_time, incident_validity_threshold - ): - logger.info( - f"Incident {incident.id} is accepting updates.", - extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}, - ) - - existing_alert_ids = set([alert.id for alert in incident.alerts]) - appendable_alerts = [ - alert - for alert in batch_alerts - if alert.fingerprint in component - and alert.id not in existing_alert_ids - ] - - logger.info( - f"Appending {len(appendable_alerts)} alerts to incident {incident.id}", - extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}, - ) - is_component_merged = True - return update_existing_incident_inmem(incident, appendable_alerts) - else: - logger.info( - f"Incident {incident.id} is not accepting updates. Aborting merge operation.", - extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}, - ) - - if not is_component_merged: - if len(component) >= min_incident_size: - logger.info( - f"Creating new incident with {len(component)} alerts", - extra={"tenant_id": tenant_id, "algorithm": ALGORITHM_VERBOSE_NAME}, - ) - return create_new_incident_inmem(component, batch_alerts, tenant_id) - else: - return None, False - - -async def generate_update_incident_summary(ctx, tenant_id: str, incident_id: str): - incident = get_incident_by_id(tenant_id, incident_id) - summary = generate_incident_summary(incident) - - if summary: - update_incident_summary(tenant_id, incident_id, summary) - - return summary - - -async def generate_update_incident_name(ctx, tenant_id: str, incident_id: str): - incident = get_incident_by_id(tenant_id, incident_id) - name = generate_incident_name(incident) - - if name: - update_incident_name(tenant_id, incident_id, name) - - return name - - -def get_last_incidents_inmem( - incidents: List[Incident], upper_timestamp: datetime, lower_timestamp: datetime -) -> List[Incident]: - return [ - incident - for incident in incidents - if lower_timestamp < incident.last_seen_time < upper_timestamp - ] - - -def add_alerts_to_incident_by_incident_id_inmem(incident: Incident, alerts: List[str]): - incident.alerts.extend(alerts) - return incident - - -def create_incident_from_dict_inmem( - tenant_id: str, incident_dict: Dict[str, Any] -) -> Incident: - return Incident(tenant_id=tenant_id, **incident_dict) - - -def create_new_incident_inmem( - component: Set[str], alerts: List[Alert], tenant_id: str -) -> Tuple[Incident, bool]: - incident_start_time = min( - alert.timestamp for alert in alerts if alert.fingerprint in component - ) - incident_start_time = incident_start_time.replace(microsecond=0) - - incident = create_incident_from_dict_inmem( - tenant_id, - { - "name": f"Incident started at {incident_start_time}", - "description": "Summarization is Disabled", - "is_predicted": True, - }, - ) - - incident = add_alerts_to_incident_by_incident_id_inmem( - incident, - [alert for alert in alerts if alert.fingerprint in component], - ) - incident.last_seen_time = max([alert.timestamp for alert in incident.alerts]) - - return incident, False - - -def update_existing_incident_inmem( - incident: Incident, alerts: List[str] -) -> Tuple[str, bool]: - incident = add_alerts_to_incident_by_incident_id_inmem(incident, alerts) - incident.last_seen_time = max([alert.timestamp for alert in incident.alerts]) - return incident, True - - -def update_incident_summary_inmem(incident: Incident, summary: str): - incident.summary = summary - return incident - - -def update_incident_name_inmem(incident: Incident, name: str): - incident.name = name - return incident diff --git a/keep/api/bl/incidents_bl.py b/keep/api/bl/incidents_bl.py index 855966bb1..c1ff21a00 100644 --- a/keep/api/bl/incidents_bl.py +++ b/keep/api/bl/incidents_bl.py @@ -12,13 +12,13 @@ from keep.api.arq_pool import get_pool from keep.api.core.db import ( add_alerts_to_incident_by_incident_id, + create_incident_from_dto, delete_incident_by_id, get_incident_alerts_by_incident_id, get_incident_by_id, get_incident_unique_fingerprint_count, remove_alerts_to_incident_by_incident_id, update_incident_from_dto_by_id, - create_incident_from_dto, ) from keep.api.core.elastic import ElasticClient from keep.api.models.alert import IncidentDto, IncidentDtoIn @@ -36,7 +36,6 @@ str(pathlib.Path(__file__).parent.resolve()) + "/../../../ee/experimental" ) sys.path.insert(0, path_with_ee) - from ee.experimental.incident_utils import ALGORITHM_VERBOSE_NAME # noqa else: ALGORITHM_VERBOSE_NAME = NotImplemented diff --git a/keep/cli/cli.py b/keep/cli/cli.py index e0c0ad87f..de2ddec83 100644 --- a/keep/cli/cli.py +++ b/keep/cli/cli.py @@ -6,6 +6,7 @@ import typing import uuid from collections import OrderedDict +from dataclasses import _MISSING_TYPE from importlib import metadata import click @@ -16,11 +17,12 @@ from keep.api.core.db_on_start import try_create_single_tenant from keep.api.core.dependencies import SINGLE_TENANT_UUID +from keep.api.core.posthog import posthog_client from keep.cli.click_extensions import NotRequiredIf +from keep.providers.models.provider_config import ProviderScope from keep.providers.providers_factory import ProvidersFactory from keep.workflowmanager.workflowmanager import WorkflowManager from keep.workflowmanager.workflowstore import WorkflowStore -from keep.api.core.posthog import posthog_client load_dotenv(find_dotenv()) @@ -134,6 +136,7 @@ def set_config(self, keep_config: str): or "api" in arguments or "config" in arguments or "version" in arguments + or "build_cache" in arguments ): return @@ -311,9 +314,19 @@ def whoami(info: Info): @cli.command() @click.option("--multi-tenant", is_flag=True, help="Enable multi-tenant mode") -@click.option("--port", "-p", type=int, default=int(os.environ.get("PORT", 8080)), help="The port to run the API on") @click.option( - "--host", "-h", type=str, default=os.environ.get("HOST", "0.0.0.0"), help="The host to run the API on" + "--port", + "-p", + type=int, + default=int(os.environ.get("PORT", 8080)), + help="The port to run the API on", +) +@click.option( + "--host", + "-h", + type=str, + default=os.environ.get("HOST", "0.0.0.0"), + help="The host to run the API on", ) def api(multi_tenant: bool, port: int, host: str): """Start the API.""" @@ -1088,6 +1101,27 @@ def provider(info: Info): pass +@provider.command(name="build_cache", help="Output providers cache for future use") +def build_cache(): + class ProviderEncoder(json.JSONEncoder): + def default(self, o): + if isinstance(o, ProviderScope): + dct = o.__dict__ + dct.pop("__pydantic_initialised__", None) + return dct + elif isinstance(o, _MISSING_TYPE): + return None + return o.dict() + + logger.info("Building providers cache") + providers_cache = ProvidersFactory.get_all_providers(ignore_cache_file=True) + with open("providers_cache.json", "w") as f: + json.dump(providers_cache, f, cls=ProviderEncoder) + logger.info( + "Providers cache built successfully", extra={"file": "providers_cache.json"} + ) + + @provider.command(name="list") @click.option( "--available", @@ -1512,6 +1546,7 @@ def simulate(info: Info, provider_type: str, params: list[str]): else: click.echo(click.style("Alert simulated successfully", bold=True)) + @cli.group() @pass_info def auth(info: Info): diff --git a/keep/providers/models/provider_config.py b/keep/providers/models/provider_config.py index 341c16485..11060041e 100644 --- a/keep/providers/models/provider_config.py +++ b/keep/providers/models/provider_config.py @@ -1,11 +1,12 @@ """ Provider configuration model. """ + import os -from dataclasses import dataclass from typing import Optional import chevron +from pydantic.dataclasses import dataclass @dataclass diff --git a/keep/providers/providers_factory.py b/keep/providers/providers_factory.py index b33e00a1e..b233d0354 100644 --- a/keep/providers/providers_factory.py +++ b/keep/providers/providers_factory.py @@ -9,7 +9,6 @@ import json import logging import os -import sys import types import typing from dataclasses import fields @@ -32,6 +31,8 @@ from keep.providers.models.provider_method import ProviderMethodDTO, ProviderMethodParam from keep.secretmanager.secretmanagerfactory import SecretManagerFactory +PROVIDERS_CACHE_FILE = os.environ.get("PROVIDERS_CACHE_FILE", "providers_cache.json") + logger = logging.getLogger(__name__) @@ -222,7 +223,7 @@ def __get_methods(provider_class: BaseProvider) -> list[ProviderMethodDTO]: return methods @staticmethod - def get_all_providers() -> list[Provider]: + def get_all_providers(ignore_cache_file: bool = False) -> list[Provider]: """ Get all the providers. @@ -235,6 +236,22 @@ def get_all_providers() -> list[Provider]: logger.debug("Using cached providers") return ProvidersFactory._loaded_providers_cache + if os.path.exists(PROVIDERS_CACHE_FILE) and not ignore_cache_file: + logger.info( + "Loading providers from cache file", + extra={"file": PROVIDERS_CACHE_FILE}, + ) + with open(PROVIDERS_CACHE_FILE, "r") as f: + providers_cache = json.load(f) + ProvidersFactory._loaded_providers_cache = [ + Provider(**provider) for provider in providers_cache + ] + logger.info( + "Providers loaded from cache file", + extra={"file": PROVIDERS_CACHE_FILE}, + ) + return ProvidersFactory._loaded_providers_cache + logger.info("Loading providers") providers = [] blacklisted_providers = [ @@ -386,11 +403,6 @@ def get_all_providers() -> list[Provider]: default_fingerprint_fields=default_fingerprint_fields, ) ) - - # Unload the module - del sys.modules[ - f"keep.providers.{provider_directory}.{provider_directory}" - ] except ModuleNotFoundError: logger.error( f"Cannot import provider {provider_directory}, module not found." @@ -403,7 +415,6 @@ def get_all_providers() -> list[Provider]: ) continue - importlib.invalidate_caches() ProvidersFactory._loaded_providers_cache = providers return providers diff --git a/poetry.lock b/poetry.lock index c8a7d2034..e14193a4f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2722,56 +2722,66 @@ files = [ [[package]] name = "numpy" -version = "2.0.0" +version = "2.1.3" description = "Fundamental package for array computing in Python" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" files = [ - {file = "numpy-2.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:04494f6ec467ccb5369d1808570ae55f6ed9b5809d7f035059000a37b8d7e86f"}, - {file = "numpy-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2635dbd200c2d6faf2ef9a0d04f0ecc6b13b3cad54f7c67c61155138835515d2"}, - {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:0a43f0974d501842866cc83471bdb0116ba0dffdbaac33ec05e6afed5b615238"}, - {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:8d83bb187fb647643bd56e1ae43f273c7f4dbcdf94550d7938cfc32566756514"}, - {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79e843d186c8fb1b102bef3e2bc35ef81160ffef3194646a7fdd6a73c6b97196"}, - {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d7696c615765091cc5093f76fd1fa069870304beaccfd58b5dcc69e55ef49c1"}, - {file = "numpy-2.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b4c76e3d4c56f145d41b7b6751255feefae92edbc9a61e1758a98204200f30fc"}, - {file = "numpy-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:acd3a644e4807e73b4e1867b769fbf1ce8c5d80e7caaef0d90dcdc640dfc9787"}, - {file = "numpy-2.0.0-cp310-cp310-win32.whl", hash = "sha256:cee6cc0584f71adefe2c908856ccc98702baf95ff80092e4ca46061538a2ba98"}, - {file = "numpy-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:ed08d2703b5972ec736451b818c2eb9da80d66c3e84aed1deeb0c345fefe461b"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad0c86f3455fbd0de6c31a3056eb822fc939f81b1618f10ff3406971893b62a5"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e7f387600d424f91576af20518334df3d97bc76a300a755f9a8d6e4f5cadd289"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:34f003cb88b1ba38cb9a9a4a3161c1604973d7f9d5552c38bc2f04f829536609"}, - {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:b6f6a8f45d0313db07d6d1d37bd0b112f887e1369758a5419c0370ba915b3871"}, - {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f64641b42b2429f56ee08b4f427a4d2daf916ec59686061de751a55aafa22e4"}, - {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7039a136017eaa92c1848152827e1424701532ca8e8967fe480fe1569dae581"}, - {file = "numpy-2.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:46e161722e0f619749d1cd892167039015b2c2817296104487cd03ed4a955995"}, - {file = "numpy-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0e50842b2295ba8414c8c1d9d957083d5dfe9e16828b37de883f51fc53c4016f"}, - {file = "numpy-2.0.0-cp311-cp311-win32.whl", hash = "sha256:2ce46fd0b8a0c947ae047d222f7136fc4d55538741373107574271bc00e20e8f"}, - {file = "numpy-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd6acc766814ea6443628f4e6751d0da6593dae29c08c0b2606164db026970c"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:354f373279768fa5a584bac997de6a6c9bc535c482592d7a813bb0c09be6c76f"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4d2f62e55a4cd9c58c1d9a1c9edaedcd857a73cb6fda875bf79093f9d9086f85"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:1e72728e7501a450288fc8e1f9ebc73d90cfd4671ebbd631f3e7857c39bd16f2"}, - {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:84554fc53daa8f6abf8e8a66e076aff6ece62de68523d9f665f32d2fc50fd66e"}, - {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c73aafd1afca80afecb22718f8700b40ac7cab927b8abab3c3e337d70e10e5a2"}, - {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49d9f7d256fbc804391a7f72d4a617302b1afac1112fac19b6c6cec63fe7fe8a"}, - {file = "numpy-2.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0ec84b9ba0654f3b962802edc91424331f423dcf5d5f926676e0150789cb3d95"}, - {file = "numpy-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:feff59f27338135776f6d4e2ec7aeeac5d5f7a08a83e80869121ef8164b74af9"}, - {file = "numpy-2.0.0-cp312-cp312-win32.whl", hash = "sha256:c5a59996dc61835133b56a32ebe4ef3740ea5bc19b3983ac60cc32be5a665d54"}, - {file = "numpy-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:a356364941fb0593bb899a1076b92dfa2029f6f5b8ba88a14fd0984aaf76d0df"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e61155fae27570692ad1d327e81c6cf27d535a5d7ef97648a17d922224b216de"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4554eb96f0fd263041baf16cf0881b3f5dafae7a59b1049acb9540c4d57bc8cb"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:903703372d46bce88b6920a0cd86c3ad82dae2dbef157b5fc01b70ea1cfc430f"}, - {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:3e8e01233d57639b2e30966c63d36fcea099d17c53bf424d77f088b0f4babd86"}, - {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cde1753efe513705a0c6d28f5884e22bdc30438bf0085c5c486cdaff40cd67a"}, - {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821eedb7165ead9eebdb569986968b541f9908979c2da8a4967ecac4439bae3d"}, - {file = "numpy-2.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a1712c015831da583b21c5bfe15e8684137097969c6d22e8316ba66b5baabe4"}, - {file = "numpy-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9c27f0946a3536403efb0e1c28def1ae6730a72cd0d5878db38824855e3afc44"}, - {file = "numpy-2.0.0-cp39-cp39-win32.whl", hash = "sha256:63b92c512d9dbcc37f9d81b123dec99fdb318ba38c8059afc78086fe73820275"}, - {file = "numpy-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:3f6bed7f840d44c08ebdb73b1825282b801799e325bcbdfa6bc5c370e5aecc65"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9416a5c2e92ace094e9f0082c5fd473502c91651fb896bc17690d6fc475128d6"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:17067d097ed036636fa79f6a869ac26df7db1ba22039d962422506640314933a"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ecb5b0582cd125f67a629072fed6f83562d9dd04d7e03256c9829bdec027ad"}, - {file = "numpy-2.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cef04d068f5fb0518a77857953193b6bb94809a806bd0a14983a8f12ada060c9"}, - {file = "numpy-2.0.0.tar.gz", hash = "sha256:cf5d1c9e6837f8af9f92b6bd3e86d513cdc11f60fd62185cc49ec7d1aba34864"}, + {file = "numpy-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c894b4305373b9c5576d7a12b473702afdf48ce5369c074ba304cc5ad8730dff"}, + {file = "numpy-2.1.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b47fbb433d3260adcd51eb54f92a2ffbc90a4595f8970ee00e064c644ac788f5"}, + {file = "numpy-2.1.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:825656d0743699c529c5943554d223c021ff0494ff1442152ce887ef4f7561a1"}, + {file = "numpy-2.1.3-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:6a4825252fcc430a182ac4dee5a505053d262c807f8a924603d411f6718b88fd"}, + {file = "numpy-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e711e02f49e176a01d0349d82cb5f05ba4db7d5e7e0defd026328e5cfb3226d3"}, + {file = "numpy-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78574ac2d1a4a02421f25da9559850d59457bac82f2b8d7a44fe83a64f770098"}, + {file = "numpy-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c7662f0e3673fe4e832fe07b65c50342ea27d989f92c80355658c7f888fcc83c"}, + {file = "numpy-2.1.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fa2d1337dc61c8dc417fbccf20f6d1e139896a30721b7f1e832b2bb6ef4eb6c4"}, + {file = "numpy-2.1.3-cp310-cp310-win32.whl", hash = "sha256:72dcc4a35a8515d83e76b58fdf8113a5c969ccd505c8a946759b24e3182d1f23"}, + {file = "numpy-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:ecc76a9ba2911d8d37ac01de72834d8849e55473457558e12995f4cd53e778e0"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4d1167c53b93f1f5d8a139a742b3c6f4d429b54e74e6b57d0eff40045187b15d"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c80e4a09b3d95b4e1cac08643f1152fa71a0a821a2d4277334c88d54b2219a41"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:576a1c1d25e9e02ed7fa5477f30a127fe56debd53b8d2c89d5578f9857d03ca9"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:973faafebaae4c0aaa1a1ca1ce02434554d67e628b8d805e61f874b84e136b09"}, + {file = "numpy-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:762479be47a4863e261a840e8e01608d124ee1361e48b96916f38b119cfda04a"}, + {file = "numpy-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc6f24b3d1ecc1eebfbf5d6051faa49af40b03be1aaa781ebdadcbc090b4539b"}, + {file = "numpy-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:17ee83a1f4fef3c94d16dc1802b998668b5419362c8a4f4e8a491de1b41cc3ee"}, + {file = "numpy-2.1.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:15cb89f39fa6d0bdfb600ea24b250e5f1a3df23f901f51c8debaa6a5d122b2f0"}, + {file = "numpy-2.1.3-cp311-cp311-win32.whl", hash = "sha256:d9beb777a78c331580705326d2367488d5bc473b49a9bc3036c154832520aca9"}, + {file = "numpy-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:d89dd2b6da69c4fff5e39c28a382199ddedc3a5be5390115608345dec660b9e2"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f55ba01150f52b1027829b50d70ef1dafd9821ea82905b63936668403c3b471e"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:13138eadd4f4da03074851a698ffa7e405f41a0845a6b1ad135b81596e4e9958"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:a6b46587b14b888e95e4a24d7b13ae91fa22386c199ee7b418f449032b2fa3b8"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:0fa14563cc46422e99daef53d725d0c326e99e468a9320a240affffe87852564"}, + {file = "numpy-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8637dcd2caa676e475503d1f8fdb327bc495554e10838019651b76d17b98e512"}, + {file = "numpy-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2312b2aa89e1f43ecea6da6ea9a810d06aae08321609d8dc0d0eda6d946a541b"}, + {file = "numpy-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a38c19106902bb19351b83802531fea19dee18e5b37b36454f27f11ff956f7fc"}, + {file = "numpy-2.1.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:02135ade8b8a84011cbb67dc44e07c58f28575cf9ecf8ab304e51c05528c19f0"}, + {file = "numpy-2.1.3-cp312-cp312-win32.whl", hash = "sha256:e6988e90fcf617da2b5c78902fe8e668361b43b4fe26dbf2d7b0f8034d4cafb9"}, + {file = "numpy-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:0d30c543f02e84e92c4b1f415b7c6b5326cbe45ee7882b6b77db7195fb971e3a"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96fe52fcdb9345b7cd82ecd34547fca4321f7656d500eca497eb7ea5a926692f"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f653490b33e9c3a4c1c01d41bc2aef08f9475af51146e4a7710c450cf9761598"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:dc258a761a16daa791081d026f0ed4399b582712e6fc887a95af09df10c5ca57"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:016d0f6f5e77b0f0d45d77387ffa4bb89816b57c835580c3ce8e099ef830befe"}, + {file = "numpy-2.1.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c181ba05ce8299c7aa3125c27b9c2167bca4a4445b7ce73d5febc411ca692e43"}, + {file = "numpy-2.1.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5641516794ca9e5f8a4d17bb45446998c6554704d888f86df9b200e66bdcce56"}, + {file = "numpy-2.1.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ea4dedd6e394a9c180b33c2c872b92f7ce0f8e7ad93e9585312b0c5a04777a4a"}, + {file = "numpy-2.1.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0df3635b9c8ef48bd3be5f862cf71b0a4716fa0e702155c45067c6b711ddcef"}, + {file = "numpy-2.1.3-cp313-cp313-win32.whl", hash = "sha256:50ca6aba6e163363f132b5c101ba078b8cbd3fa92c7865fd7d4d62d9779ac29f"}, + {file = "numpy-2.1.3-cp313-cp313-win_amd64.whl", hash = "sha256:747641635d3d44bcb380d950679462fae44f54b131be347d5ec2bce47d3df9ed"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:996bb9399059c5b82f76b53ff8bb686069c05acc94656bb259b1d63d04a9506f"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:45966d859916ad02b779706bb43b954281db43e185015df6eb3323120188f9e4"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:baed7e8d7481bfe0874b566850cb0b85243e982388b7b23348c6db2ee2b2ae8e"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a9f7f672a3388133335589cfca93ed468509cb7b93ba3105fce780d04a6576a0"}, + {file = "numpy-2.1.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7aac50327da5d208db2eec22eb11e491e3fe13d22653dce51b0f4109101b408"}, + {file = "numpy-2.1.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4394bc0dbd074b7f9b52024832d16e019decebf86caf909d94f6b3f77a8ee3b6"}, + {file = "numpy-2.1.3-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:50d18c4358a0a8a53f12a8ba9d772ab2d460321e6a93d6064fc22443d189853f"}, + {file = "numpy-2.1.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:14e253bd43fc6b37af4921b10f6add6925878a42a0c5fe83daee390bca80bc17"}, + {file = "numpy-2.1.3-cp313-cp313t-win32.whl", hash = "sha256:08788d27a5fd867a663f6fc753fd7c3ad7e92747efc73c53bca2f19f8bc06f48"}, + {file = "numpy-2.1.3-cp313-cp313t-win_amd64.whl", hash = "sha256:2564fbdf2b99b3f815f2107c1bbc93e2de8ee655a69c261363a1172a79a257d4"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4f2015dfe437dfebbfce7c85c7b53d81ba49e71ba7eadbf1df40c915af75979f"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:3522b0dfe983a575e6a9ab3a4a4dfe156c3e428468ff08ce582b9bb6bd1d71d4"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c006b607a865b07cd981ccb218a04fc86b600411d83d6fc261357f1c0966755d"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e14e26956e6f1696070788252dcdff11b4aca4c3e8bd166e0df1bb8f315a67cb"}, + {file = "numpy-2.1.3.tar.gz", hash = "sha256:aa08e04e08aaf974d4458def539dece0d28146d866a39da5639596f4921fd761"}, ] [[package]] @@ -5413,4 +5423,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" python-versions = ">=3.11,<3.12" -content-hash = "46a4b79a2154eb1ccee2c9b15164b8c6fda8764fc7df9123775467f43ec50e67" +content-hash = "1212598e892a0f23e91d42a773e303313bc870998e88a6c6726d77d592cffbac" diff --git a/pyproject.toml b/pyproject.toml index f01565bb8..060c5bb69 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -87,7 +87,6 @@ arq = "^0.26.0" alembic = "^1.13.2" -numpy = "^2.0.0" quickchart-io = "^2.0.0" scipy = "^1.14.1" networkx = "^3.3" diff --git a/tests/test_alert_correlation.py b/tests/test_alert_correlation.py deleted file mode 100644 index aaa06be16..000000000 --- a/tests/test_alert_correlation.py +++ /dev/null @@ -1,133 +0,0 @@ -import os -import pytest -import random -import numpy as np - -from datetime import datetime, timedelta -from unittest.mock import patch, MagicMock, AsyncMock -from keep.api.models.db.alert import Alert -from keep.api.models.db.tenant import Tenant -from ee.experimental.incident_utils import mine_incidents_and_create_objects, calculate_pmi_matrix, DEFAULT_TEMP_DIR_LOCATION - -random.seed(42) - -@pytest.mark.asyncio -async def test_mine_incidents_and_create_objects(db_session, tenant_id='test', n_alerts=10000, n_fingerprints=50): - # Add alerts - current_time = datetime.now() - time_lags = [int(round(random.normalvariate(mu=60*24*30/2, sigma=60*24*30/6))) for _ in range(n_alerts)] - alerts = [ - Alert( - tenant_id=tenant_id, - provider_type="test", - provider_id="test", - event={ - "id": f"test-{i}", - "name": f"Test Alert {i}", - "fingerprint": f"fp-{i % n_fingerprints}", - "lastReceived": (current_time - timedelta(minutes=time_lags[i])).isoformat(), - "severity": "critical", - "source": ["test-source"], - }, - fingerprint=f"fp-{i % n_fingerprints}", - timestamp=current_time - timedelta(minutes=time_lags[i]) - ) - for i in range(n_alerts) - ] - db_session.add_all(alerts) - db_session.commit() - - # add Tenant - tenant = Tenant( - id=tenant_id, - name=tenant_id, - configuration={ - "ee_enabled": True, - } - ) - db_session.add(tenant) - db_session.commit() - - # Mock dependencies and call the function - with patch('ee.experimental.incident_utils.get_pusher_client') as mock_pusher, \ - patch('ee.experimental.incident_utils.get_pool') as mock_get_pool: - - mock_pusher.return_value = MagicMock() - mock_pool = AsyncMock() - mock_get_pool.return_value = mock_pool - - result = await mine_incidents_and_create_objects(None, tenant_id) - - assert result is not None - assert mock_pusher.called - assert mock_get_pool.called - -def test_calculate_pmi_matrix(db_session, tenant_id='test', n_alerts=10000, n_fingerprints=50): - # Add Alerts - current_time = datetime.now() - time_lags = [int(round(random.normalvariate(mu=60*24*30/2, sigma=60*24*30/6))) for _ in range(n_alerts)] - alerts = [ - Alert( - tenant_id=tenant_id, - provider_type="test", - provider_id="test", - event={ - "id": f"test-{i}", - "name": f"Test Alert {i}", - "fingerprint": f"fp-{i % n_fingerprints}", - "lastReceived": (current_time - timedelta(minutes=time_lags[i])).isoformat(), - "severity": "critical", - "source": ["test-source"], - }, - fingerprint=f"fp-{i % n_fingerprints}", - timestamp=current_time - timedelta(minutes=time_lags[i]) - ) - for i in range(n_alerts) - ] - db_session.add_all(alerts) - db_session.commit() - - # add Tenant - tenant = Tenant( - id=tenant_id, - name=tenant_id, - configuration={ - "ee_enabled": True, - } - ) - db_session.add(tenant) - db_session.commit() - - # Call the function - result = calculate_pmi_matrix(None, tenant_id) - - assert result["status"] == "success" - pmi_matrix = result["pmi_matrix"] - fingerprints = result["pmi_columns"] - assert (np.unique(fingerprints) == np.unique([f"fp-{i % n_fingerprints}" for i in range(n_fingerprints)])).all() - assert pmi_matrix.shape == (n_fingerprints, n_fingerprints) - - -@pytest.mark.asyncio -async def test_mine_incidents_and_create_objects_with_no_alerts(db_session, tenant_id='test'): - # add Tenant - tenant = Tenant( - id=tenant_id, - name=tenant_id, - configuration={ - "ee_enabled": True, - } - ) - - with patch('ee.experimental.incident_utils.get_pusher_client') as mock_pusher, \ - patch('ee.experimental.incident_utils.get_pool') as mock_get_pool: - - mock_pusher.return_value = MagicMock() - mock_pool = AsyncMock() - mock_get_pool.return_value = mock_pool - - result = await mine_incidents_and_create_objects(None, tenant_id) - - assert result=={"incidents": []} - -