diff --git a/docs/deployment/configuration.mdx b/docs/deployment/configuration.mdx
index f4c1a1714..abcbb00fc 100644
--- a/docs/deployment/configuration.mdx
+++ b/docs/deployment/configuration.mdx
@@ -69,7 +69,7 @@ Resource provisioning settings control how Keep sets up initial resources. This
Authentication configuration determines how Keep verifies user identities and manages access control. These settings are essential for securing your Keep instance and integrating with various authentication providers.
-For specifc authentication type configuration, please see [authentication docs](/deployment/authentication/overview).
+For specific authentication type configuration, please see [authentication docs](/deployment/authentication/overview).
| Env var | Purpose | Required | Default Value | Valid options |
diff --git a/docs/deployment/ecs.mdx b/docs/deployment/ecs.mdx
index 097092e1a..61289afcd 100644
--- a/docs/deployment/ecs.mdx
+++ b/docs/deployment/ecs.mdx
@@ -102,7 +102,7 @@ sidebarTitle: "AWS ECS"
- Configuration Type: Configure at task definition creation
- Volume type: EFS
- Storage configurations:
- - File system ID: Select an exisiting EFS filesystem or create a new one
+ - File system ID: Select an existing EFS filesystem or create a new one
- Root Directory: /
![Volume Configuration](/images/ecs-task-def-backend5.png)
- Container mount points:
diff --git a/docs/deployment/stress-testing.mdx b/docs/deployment/stress-testing.mdx
index 9f9dc9716..14ebb48c2 100644
--- a/docs/deployment/stress-testing.mdx
+++ b/docs/deployment/stress-testing.mdx
@@ -27,7 +27,7 @@ The primary parameters that affect the specification requirements for Keep are:
3. **Number of Workflows**: How many automation run as a result of alert.
### Main Components:
-- **Keep Backend** - API and buisness logic. A container that serves FastAPI on top of gunicorn.
+- **Keep Backend** - API and business logic. A container that serves FastAPI on top of gunicorn.
- **Keep Frontend** - Web app. A container that serves the react app.
- **Database** - Stores the alerts and any other operational data.
- **Elasticsearch** (opt out by default) - Stores alerts as document for better search performance.
diff --git a/docs/overview/comparisons.mdx b/docs/overview/comparisons.mdx
index 2329579f3..e26f70623 100644
--- a/docs/overview/comparisons.mdx
+++ b/docs/overview/comparisons.mdx
@@ -17,7 +17,7 @@ Keep is different because it’s able to correlate alerts between different obse
| | Keep | Alternative |
| ------------------------------------- | -------------------------------------------------------------- | ---------------------------- |
| Aggregates alerts from one platform | ✅ | ✅ |
-| Aggregates alerts from mutliple platforms | ✅ | ❌ |
+| Aggregates alerts from multiple platforms | ✅ | ❌ |
| Correlates alerts between multiple sources | ✅ | ❌ |
| Alerts enrichment | ✅ | ❌ |
| Open source | ✅ | ❌ |
diff --git a/docs/overview/deduplication.mdx b/docs/overview/deduplication.mdx
index 00d755ec5..a0e3eabb6 100644
--- a/docs/overview/deduplication.mdx
+++ b/docs/overview/deduplication.mdx
@@ -20,7 +20,7 @@ Alert deduplication is a crucial feature in Keep that helps reduce noise and str
Partial deduplication allows you to specify certain fields (fingerprint fields) that are used to identify similar alerts. Alerts with matching values in these specified fields are considered duplicates and are grouped together. This method is flexible and allows for fine-tuned control over how alerts are deduplicated.
Every provider integrated with Keep comes with pre-built partial deduplication rule tailored to that provider's specific alert format and common use cases.
-The default fingerprint fields defined using `FINGERPRINT_FIELDS` attributes in the provider code (e.g. [datadog provider](https://github.com/keephq/keep/blob/main/keep/providers/datadog_provider/datadog_provider.py#L188) or [gcp monitoring provder](https://github.com/keephq/keep/blob/main/keep/providers/gcpmonitoring_provider/gcpmonitoring_provider.py#L52)).
+The default fingerprint fields defined using `FINGERPRINT_FIELDS` attributes in the provider code (e.g. [datadog provider](https://github.com/keephq/keep/blob/main/keep/providers/datadog_provider/datadog_provider.py#L188) or [gcp monitoring provider](https://github.com/keephq/keep/blob/main/keep/providers/gcpmonitoring_provider/gcpmonitoring_provider.py#L52)).
### Full Deduplication
When full deduplication is enabled, Keep will also discard exact same events (excluding ignore fields). This mode considers all fields of an alert when determining duplicates, except for explicitly ignored fields.
diff --git a/examples/workflows/cron-digest-alerts.yml b/examples/workflows/cron-digest-alerts.yml
index 10baa9ecc..a4bcd786b 100644
--- a/examples/workflows/cron-digest-alerts.yml
+++ b/examples/workflows/cron-digest-alerts.yml
@@ -2,6 +2,7 @@ workflow:
id: alerts-daily-digest
description: run alerts digest twice a day (on 11:00 and 14:00)
triggers:
+ - type: manual
- type: interval
cron: 0 11,14 * * *
steps:
@@ -10,18 +11,16 @@ workflow:
provider:
type: keep
with:
- filters:
- # filter out alerts that are closed
- - key: status
- value: open
+ version: 2
+ filter: "status == 'firing'"
timerange:
- from: "{{ state.workflows.alerts-daily-digest.last_run_time }}"
+ from: "{{ last_workflow_run_time }}"
to: now
actions:
- name: send-digest
foreach: "{{ steps.get-alerts.results }}"
provider:
- type: slack
- config: "{{ providers.slack }}"
+ type: console
+ config: "{{ providers.console }}"
with:
- message: "Open alert: {{ foreach.value.name }}"
+ message: "Open alerts: {{ foreach.value.name }}"
diff --git a/keep-ui/app/incidents/[id]/incident-activity.tsx b/keep-ui/app/incidents/[id]/incident-activity.tsx
index 2ab11e50f..9cdf91f26 100644
--- a/keep-ui/app/incidents/[id]/incident-activity.tsx
+++ b/keep-ui/app/incidents/[id]/incident-activity.tsx
@@ -15,7 +15,7 @@ import {
import { AuditEvent, useAlerts } from "@/utils/hooks/useAlerts";
import Loading from "@/app/loading";
import { useCallback, useState, useEffect } from "react";
-import { getApiURL } from "@/utils/apiUrl";
+import { useApiUrl } from "@/utils/hooks/useConfig";
import { useSession } from "next-auth/react";
import { KeyedMutator } from "swr";
import { toast } from "react-toastify";
@@ -59,7 +59,6 @@ export function IncidentActivityChronoItem({ activity }: { activity: any }) {
);
}
-
export function IncidentActivityChronoItemComment({
incident,
mutator,
@@ -68,7 +67,7 @@ export function IncidentActivityChronoItemComment({
mutator: KeyedMutator;
}) {
const [comment, setComment] = useState("");
- const apiUrl = getApiURL();
+ const apiUrl = useApiUrl();
const { data: session } = useSession();
const onSubmit = useCallback(async () => {
diff --git a/keep/api/routes/alerts.py b/keep/api/routes/alerts.py
index c14c092f9..8a35b8f9d 100644
--- a/keep/api/routes/alerts.py
+++ b/keep/api/routes/alerts.py
@@ -4,7 +4,7 @@
import json
import logging
import os
-from typing import Optional, List
+from typing import List, Optional
import celpy
from arq import ArqRedis
@@ -25,7 +25,12 @@
from keep.api.consts import KEEP_ARQ_QUEUE_BASIC
from keep.api.core.config import config
from keep.api.core.db import get_alert_audit as get_alert_audit_db
-from keep.api.core.db import get_alerts_by_fingerprint, get_enrichment, get_last_alerts, get_alerts_metrics_by_provider
+from keep.api.core.db import (
+ get_alerts_by_fingerprint,
+ get_alerts_metrics_by_provider,
+ get_enrichment,
+ get_last_alerts,
+)
from keep.api.core.dependencies import extract_generic_body, get_pusher_client
from keep.api.core.elastic import ElasticClient
from keep.api.models.alert import (
@@ -37,15 +42,15 @@
from keep.api.models.alert_audit import AlertAuditDto
from keep.api.models.db.alert import AlertActionType
from keep.api.models.search_alert import SearchAlertsRequest
+from keep.api.models.time_stamp import TimeStampFilter
from keep.api.tasks.process_event_task import process_event
from keep.api.utils.email_utils import EmailTemplates, send_email
from keep.api.utils.enrichment_helpers import convert_db_alerts_to_dto_alerts
+from keep.api.utils.time_stamp_helpers import get_time_stamp_filter
from keep.identitymanager.authenticatedentity import AuthenticatedEntity
from keep.identitymanager.identitymanagerfactory import IdentityManagerFactory
from keep.providers.providers_factory import ProvidersFactory
from keep.searchengine.searchengine import SearchEngine
-from keep.api.utils.time_stamp_helpers import get_time_stamp_filter
-from keep.api.models.time_stamp import TimeStampFilter
router = APIRouter()
logger = logging.getLogger(__name__)
@@ -447,19 +452,23 @@ def enrich_alert(
authenticated_entity: AuthenticatedEntity = Depends(
IdentityManagerFactory.get_auth_verifier(["write:alert"])
),
+ dispose_on_new_alert: Optional[bool] = Query(
+ False, description="Dispose on new alert"
+ ),
) -> dict[str, str]:
- return _enrich_alert(enrich_data, authenticated_entity=authenticated_entity)
+ return _enrich_alert(
+ enrich_data,
+ authenticated_entity=authenticated_entity,
+ dispose_on_new_alert=dispose_on_new_alert,
+ )
def _enrich_alert(
enrich_data: EnrichAlertRequestBody,
- pusher_client: Pusher = Depends(get_pusher_client),
authenticated_entity: AuthenticatedEntity = Depends(
IdentityManagerFactory.get_auth_verifier(["write:alert"])
),
- dispose_on_new_alert: Optional[bool] = Query(
- False, description="Dispose on new alert"
- ),
+ dispose_on_new_alert: Optional[bool] = False,
) -> dict[str, str]:
tenant_id = authenticated_entity.tenant_id
logger.info(
@@ -469,7 +478,6 @@ def _enrich_alert(
"tenant_id": tenant_id,
},
)
-
try:
enrichement_bl = EnrichmentsBl(tenant_id)
# Shahar: TODO, change to the specific action type, good enough for now
@@ -530,6 +538,7 @@ def _enrich_alert(
logger.exception("Failed to push alert to elasticsearch")
pass
# use pusher to push the enriched alert to the client
+ pusher_client = get_pusher_client()
if pusher_client:
logger.info("Telling client to poll alerts")
try:
@@ -770,17 +779,15 @@ def get_alert_quality(
):
logger.info(
"Fetching alert quality metrics per provider",
- extra={
- "tenant_id": authenticated_entity.tenant_id,
- "fields": fields
- },
-
+ extra={"tenant_id": authenticated_entity.tenant_id, "fields": fields},
)
start_date = time_stamp.lower_timestamp if time_stamp else None
end_date = time_stamp.upper_timestamp if time_stamp else None
db_alerts_quality = get_alerts_metrics_by_provider(
- tenant_id=authenticated_entity.tenant_id, start_date=start_date, end_date=end_date,
- fields=fields
+ tenant_id=authenticated_entity.tenant_id,
+ start_date=start_date,
+ end_date=end_date,
+ fields=fields,
)
-
+
return db_alerts_quality
diff --git a/keep/contextmanager/contextmanager.py b/keep/contextmanager/contextmanager.py
index b044962c4..59ca134bc 100644
--- a/keep/contextmanager/contextmanager.py
+++ b/keep/contextmanager/contextmanager.py
@@ -35,6 +35,7 @@ def __init__(self, tenant_id, workflow_id=None, workflow_execution_id=None):
self.click_context = {}
# last workflow context
self.last_workflow_execution_results = {}
+ self.last_workflow_run_time = None
if self.workflow_id:
try:
last_workflow_execution = get_last_workflow_execution_by_workflow_id(
@@ -44,6 +45,7 @@ def __init__(self, tenant_id, workflow_id=None, workflow_execution_id=None):
self.last_workflow_execution_results = (
last_workflow_execution.results
)
+ self.last_workflow_run_time = last_workflow_execution.started
except Exception:
self.logger.exception("Failed to get last workflow execution")
pass
@@ -130,6 +132,7 @@ def get_full_context(self, exclude_providers=False, exclude_env=False):
"foreach": self.foreach_context,
"event": self.event_context,
"last_workflow_results": self.last_workflow_execution_results,
+ "last_workflow_run_time": self.last_workflow_run_time,
"alert": self.event_context, # this is an alias so workflows will be able to use alert.source
"incident": self.incident_context, # this is an alias so workflows will be able to use alert.source
"consts": self.consts_context,
diff --git a/keep/providers/keep_provider/keep_provider.py b/keep/providers/keep_provider/keep_provider.py
index c06f251f0..6a7137b59 100644
--- a/keep/providers/keep_provider/keep_provider.py
+++ b/keep/providers/keep_provider/keep_provider.py
@@ -3,6 +3,7 @@
"""
import logging
+from datetime import datetime, timezone
from keep.api.core.db import get_alerts_with_filters
from keep.api.models.alert import AlertDto
@@ -28,6 +29,31 @@ def dispose(self):
"""
pass
+ def _calculate_time_delta(self, timerange=None, default_time_range=1):
+ """Calculate time delta in days from timerange dict."""
+ if not timerange or "from" not in timerange:
+ return default_time_range # default value
+
+ from_time_str = timerange["from"]
+ to_time_str = timerange.get("to", "now")
+
+ # Parse from_time and ensure it's timezone-aware
+ from_time = datetime.fromisoformat(from_time_str.replace("Z", "+00:00"))
+ if from_time.tzinfo is None:
+ from_time = from_time.replace(tzinfo=timezone.utc)
+
+ # Handle 'to' time
+ if to_time_str == "now":
+ to_time = datetime.now(timezone.utc)
+ else:
+ to_time = datetime.fromisoformat(to_time_str.replace("Z", "+00:00"))
+ if to_time.tzinfo is None:
+ to_time = to_time.replace(tzinfo=timezone.utc)
+
+ # Calculate difference in days
+ delta = (to_time - from_time).total_seconds() / (24 * 3600) # convert to days
+ return delta
+
def _query(self, filters=None, version=1, distinct=True, time_delta=1, **kwargs):
"""
Query Keep for alerts.
@@ -40,6 +66,11 @@ def _query(self, filters=None, version=1, distinct=True, time_delta=1, **kwargs)
"time_delta": time_delta,
},
)
+ # if timerange is provided, calculate time delta
+ if kwargs.get("timerange"):
+ time_delta = self._calculate_time_delta(
+ timerange=kwargs.get("timerange"), default_time_range=time_delta
+ )
if version == 1:
# filters are mandatory for version 1
if not filters:
diff --git a/keep/providers/zabbix_provider/zabbix_provider.py b/keep/providers/zabbix_provider/zabbix_provider.py
index b7bca0ad3..9ca8d7b14 100644
--- a/keep/providers/zabbix_provider/zabbix_provider.py
+++ b/keep/providers/zabbix_provider/zabbix_provider.py
@@ -594,6 +594,8 @@ def _format_alert(
event_id = event.get("id")
trigger_id = event.get("triggerId")
zabbix_url = event.pop("ZABBIX.URL", None)
+ hostname = event.get("HOST.NAME")
+ ip_address = event.get("HOST.IP")
if zabbix_url == "{$ZABBIX.URL}":
# This means user did not configure $ZABBIX.URL in Zabbix probably
@@ -638,6 +640,9 @@ def _format_alert(
url=url,
lastReceived=last_received,
tags=tags,
+ hostname=hostname,
+ service=hostname,
+ ip_address=ip_address,
)
diff --git a/pyproject.toml b/pyproject.toml
index e1de24eae..6d44fbede 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "keep"
-version = "0.27.3"
+version = "0.27.6"
description = "Alerting. for developers, by developers."
authors = ["Keep Alerting LTD"]
readme = "README.md"