From d29f06d5b7f9c1834be9fb2e6e2e13b005d1c0b5 Mon Sep 17 00:00:00 2001 From: Kunal Gupta <39487888+iamKunalGupta@users.noreply.github.com> Date: Wed, 13 Mar 2024 01:10:15 +0530 Subject: [PATCH 1/7] feat(alerting): add email sender (#1433) New ENV Vars: - `PEERDB_ALERTING_EMAIL_SENDER_SOURCE_EMAIL` - `PEERDB_ALERTING_EMAIL_SENDER_CONFIGURATION_SET` - `PEERDB_ALERTING_EMAIL_SENDER_REGION` - `PEERDB_ALERTING_EMAIL_SENDER_REPLY_TO_ADDRESSES` - [x] test aws cross region sending - config set is per region, have added env for setting a specific region UI: image --------- Co-authored-by: Amogh-Bharadwaj --- flow/alerting/alerting.go | 161 ++++++++----- flow/alerting/email_alert_sender.go | 104 ++++++++ flow/alerting/interface.go | 9 + flow/alerting/slack_alert_sender.go | 21 +- flow/alerting/types.go | 8 + flow/go.mod | 1 + flow/go.sum | 2 + flow/peerdbenv/config.go | 17 ++ flow/shared/aws_common/config.go | 21 ++ flow/shared/telemetry/sns_message_sender.go | 12 +- .../V21__alert_constraint_update.sql | 6 + .../V22__alert_column_add_config_id.sql | 2 + ui/app/alert-config/new.tsx | 226 +++++++++++++----- ui/app/alert-config/page.tsx | 84 +++++-- ui/app/alert-config/validation.ts | 86 +++++-- ui/app/api/alert-config/route.ts | 1 + ui/components/AlertDropdown.tsx | 23 +- ui/public/images/email.png | Bin 0 -> 18231 bytes ui/public/images/slack.png | Bin 6109 -> 6999 bytes ui/public/images/slack_full.png | Bin 0 -> 6109 bytes 20 files changed, 592 insertions(+), 192 deletions(-) create mode 100644 flow/alerting/email_alert_sender.go create mode 100644 flow/alerting/interface.go create mode 100644 flow/alerting/types.go create mode 100644 flow/shared/aws_common/config.go create mode 100644 nexus/catalog/migrations/V21__alert_constraint_update.sql create mode 100644 nexus/catalog/migrations/V22__alert_column_add_config_id.sql create mode 100644 ui/public/images/email.png create mode 100644 ui/public/images/slack_full.png diff --git a/flow/alerting/alerting.go b/flow/alerting/alerting.go index 93cb946f78..acdbe6c290 100644 --- a/flow/alerting/alerting.go +++ b/flow/alerting/alerting.go @@ -3,8 +3,10 @@ package alerting import ( "context" "encoding/json" + "errors" "fmt" "log/slog" + "strings" "time" "github.com/jackc/pgx/v5" @@ -23,32 +25,67 @@ type Alerter struct { telemetrySender telemetry.Sender } -func (a *Alerter) registerSendersFromPool(ctx context.Context) ([]*slackAlertSender, error) { +type AlertSenderConfig struct { + Id int64 + Sender AlertSender +} + +func (a *Alerter) registerSendersFromPool(ctx context.Context) ([]AlertSenderConfig, error) { rows, err := a.catalogPool.Query(ctx, - "SELECT service_type,service_config FROM peerdb_stats.alerting_config") + "SELECT id,service_type,service_config FROM peerdb_stats.alerting_config") if err != nil { return nil, fmt.Errorf("failed to read alerter config from catalog: %w", err) } - var slackAlertSenders []*slackAlertSender - var serviceType, serviceConfig string - _, err = pgx.ForEachRow(rows, []any{&serviceType, &serviceConfig}, func() error { + var alertSenderConfigs []AlertSenderConfig + var serviceType ServiceType + var serviceConfig string + var id int64 + _, err = pgx.ForEachRow(rows, []any{&id, &serviceType, &serviceConfig}, func() error { switch serviceType { - case "slack": + case SLACK: var slackServiceConfig slackAlertConfig err = json.Unmarshal([]byte(serviceConfig), &slackServiceConfig) if err != nil { - return fmt.Errorf("failed to unmarshal Slack service config: %w", err) + return fmt.Errorf("failed to unmarshal %s service config: %w", serviceType, err) + } + + alertSenderConfigs = append(alertSenderConfigs, AlertSenderConfig{Id: id, Sender: newSlackAlertSender(&slackServiceConfig)}) + case EMAIL: + var replyToAddresses []string + if replyToEnvString := strings.TrimSpace( + peerdbenv.PeerDBAlertingEmailSenderReplyToAddresses()); replyToEnvString != "" { + replyToAddresses = strings.Split(replyToEnvString, ",") + } + emailServiceConfig := EmailAlertSenderConfig{ + sourceEmail: peerdbenv.PeerDBAlertingEmailSenderSourceEmail(), + configurationSetName: peerdbenv.PeerDBAlertingEmailSenderConfigurationSet(), + replyToAddresses: replyToAddresses, + } + if emailServiceConfig.sourceEmail == "" { + return errors.New("missing sourceEmail for Email alerting service") + } + err = json.Unmarshal([]byte(serviceConfig), &emailServiceConfig) + if err != nil { + return fmt.Errorf("failed to unmarshal %s service config: %w", serviceType, err) + } + var region *string + if envRegion := peerdbenv.PeerDBAlertingEmailSenderRegion(); envRegion != "" { + region = &envRegion } - slackAlertSenders = append(slackAlertSenders, newSlackAlertSender(&slackServiceConfig)) + alertSender, alertSenderErr := NewEmailAlertSenderWithNewClient(ctx, region, &emailServiceConfig) + if alertSenderErr != nil { + return fmt.Errorf("failed to initialize email alerter: %w", alertSenderErr) + } + alertSenderConfigs = append(alertSenderConfigs, AlertSenderConfig{Id: id, Sender: alertSender}) default: return fmt.Errorf("unknown service type: %s", serviceType) } return nil }) - return slackAlertSenders, nil + return alertSenderConfigs, nil } // doesn't take care of closing pool, needs to be done externally. @@ -75,9 +112,9 @@ func NewAlerter(ctx context.Context, catalogPool *pgxpool.Pool) *Alerter { } func (a *Alerter) AlertIfSlotLag(ctx context.Context, peerName string, slotInfo *protos.SlotInfo) { - slackAlertSenders, err := a.registerSendersFromPool(ctx) + alertSenderConfigs, err := a.registerSendersFromPool(ctx) if err != nil { - logger.LoggerFromCtx(ctx).Warn("failed to set Slack senders", slog.Any("error", err)) + logger.LoggerFromCtx(ctx).Warn("failed to set alert senders", slog.Any("error", err)) return } @@ -89,29 +126,30 @@ func (a *Alerter) AlertIfSlotLag(ctx context.Context, peerName string, slotInfo defaultSlotLagMBAlertThreshold := dynamicconf.PeerDBSlotLagMBAlertThreshold(ctx) // catalog cannot use default threshold to space alerts properly, use the lowest set threshold instead lowestSlotLagMBAlertThreshold := defaultSlotLagMBAlertThreshold - for _, slackAlertSender := range slackAlertSenders { - if slackAlertSender.slotLagMBAlertThreshold > 0 { - lowestSlotLagMBAlertThreshold = min(lowestSlotLagMBAlertThreshold, slackAlertSender.slotLagMBAlertThreshold) + for _, alertSender := range alertSenderConfigs { + if alertSender.Sender.getSlotLagMBAlertThreshold() > 0 { + lowestSlotLagMBAlertThreshold = min(lowestSlotLagMBAlertThreshold, alertSender.Sender.getSlotLagMBAlertThreshold()) } } - alertKey := peerName + "-slot-lag-threshold-exceeded" + alertKey := fmt.Sprintf("%s Slot Lag Threshold Exceeded for Peer %s", deploymentUIDPrefix, peerName) alertMessageTemplate := fmt.Sprintf("%sSlot `%s` on peer `%s` has exceeded threshold size of %%dMB, "+ - `currently at %.2fMB! - cc: `, deploymentUIDPrefix, slotInfo.SlotName, peerName, slotInfo.LagInMb) - - if slotInfo.LagInMb > float32(lowestSlotLagMBAlertThreshold) && - a.checkAndAddAlertToCatalog(ctx, alertKey, fmt.Sprintf(alertMessageTemplate, lowestSlotLagMBAlertThreshold)) { - for _, slackAlertSender := range slackAlertSenders { - if slackAlertSender.slotLagMBAlertThreshold > 0 { - if slotInfo.LagInMb > float32(slackAlertSender.slotLagMBAlertThreshold) { - a.alertToSlack(ctx, slackAlertSender, alertKey, - fmt.Sprintf(alertMessageTemplate, slackAlertSender.slotLagMBAlertThreshold)) - } - } else { - if slotInfo.LagInMb > float32(defaultSlotLagMBAlertThreshold) { - a.alertToSlack(ctx, slackAlertSender, alertKey, - fmt.Sprintf(alertMessageTemplate, defaultSlotLagMBAlertThreshold)) + `currently at %.2fMB!`, deploymentUIDPrefix, slotInfo.SlotName, peerName, slotInfo.LagInMb) + + if slotInfo.LagInMb > float32(lowestSlotLagMBAlertThreshold) { + for _, alertSenderConfig := range alertSenderConfigs { + if a.checkAndAddAlertToCatalog(ctx, + alertSenderConfig.Id, alertKey, fmt.Sprintf(alertMessageTemplate, lowestSlotLagMBAlertThreshold)) { + if alertSenderConfig.Sender.getSlotLagMBAlertThreshold() > 0 { + if slotInfo.LagInMb > float32(alertSenderConfig.Sender.getSlotLagMBAlertThreshold()) { + a.alertToProvider(ctx, alertSenderConfig, alertKey, + fmt.Sprintf(alertMessageTemplate, alertSenderConfig.Sender.getSlotLagMBAlertThreshold())) + } + } else { + if slotInfo.LagInMb > float32(defaultSlotLagMBAlertThreshold) { + a.alertToProvider(ctx, alertSenderConfig, alertKey, + fmt.Sprintf(alertMessageTemplate, defaultSlotLagMBAlertThreshold)) + } } } } @@ -121,7 +159,7 @@ func (a *Alerter) AlertIfSlotLag(ctx context.Context, peerName string, slotInfo func (a *Alerter) AlertIfOpenConnections(ctx context.Context, peerName string, openConnections *protos.GetOpenConnectionsForUserResult, ) { - slackAlertSenders, err := a.registerSendersFromPool(ctx) + alertSenderConfigs, err := a.registerSendersFromPool(ctx) if err != nil { logger.LoggerFromCtx(ctx).Warn("failed to set Slack senders", slog.Any("error", err)) return @@ -129,44 +167,45 @@ func (a *Alerter) AlertIfOpenConnections(ctx context.Context, peerName string, deploymentUIDPrefix := "" if peerdbenv.PeerDBDeploymentUID() != "" { - deploymentUIDPrefix = fmt.Sprintf("[%s] ", peerdbenv.PeerDBDeploymentUID()) + deploymentUIDPrefix = fmt.Sprintf("[%s] - ", peerdbenv.PeerDBDeploymentUID()) } // same as with slot lag, use lowest threshold for catalog defaultOpenConnectionsThreshold := dynamicconf.PeerDBOpenConnectionsAlertThreshold(ctx) lowestOpenConnectionsThreshold := defaultOpenConnectionsThreshold - for _, slackAlertSender := range slackAlertSenders { - if slackAlertSender.openConnectionsAlertThreshold > 0 { - lowestOpenConnectionsThreshold = min(lowestOpenConnectionsThreshold, slackAlertSender.openConnectionsAlertThreshold) + for _, alertSender := range alertSenderConfigs { + if alertSender.Sender.getOpenConnectionsAlertThreshold() > 0 { + lowestOpenConnectionsThreshold = min(lowestOpenConnectionsThreshold, alertSender.Sender.getOpenConnectionsAlertThreshold()) } } - alertKey := peerName + "-max-open-connections-threshold-exceeded" + alertKey := fmt.Sprintf("%s Max Open Connections Threshold Exceeded for Peer %s", deploymentUIDPrefix, peerName) alertMessageTemplate := fmt.Sprintf("%sOpen connections from PeerDB user `%s` on peer `%s`"+ - ` has exceeded threshold size of %%d connections, currently at %d connections! - cc: `, deploymentUIDPrefix, openConnections.UserName, peerName, openConnections.CurrentOpenConnections) - - if openConnections.CurrentOpenConnections > int64(lowestOpenConnectionsThreshold) && - a.checkAndAddAlertToCatalog(ctx, alertKey, fmt.Sprintf(alertMessageTemplate, lowestOpenConnectionsThreshold)) { - for _, slackAlertSender := range slackAlertSenders { - if slackAlertSender.openConnectionsAlertThreshold > 0 { - if openConnections.CurrentOpenConnections > int64(slackAlertSender.openConnectionsAlertThreshold) { - a.alertToSlack(ctx, slackAlertSender, alertKey, - fmt.Sprintf(alertMessageTemplate, slackAlertSender.openConnectionsAlertThreshold)) - } - } else { - if openConnections.CurrentOpenConnections > int64(defaultOpenConnectionsThreshold) { - a.alertToSlack(ctx, slackAlertSender, alertKey, - fmt.Sprintf(alertMessageTemplate, defaultOpenConnectionsThreshold)) + ` has exceeded threshold size of %%d connections, currently at %d connections!`, + deploymentUIDPrefix, openConnections.UserName, peerName, openConnections.CurrentOpenConnections) + + if openConnections.CurrentOpenConnections > int64(lowestOpenConnectionsThreshold) { + for _, alertSenderConfig := range alertSenderConfigs { + if a.checkAndAddAlertToCatalog(ctx, + alertSenderConfig.Id, alertKey, fmt.Sprintf(alertMessageTemplate, lowestOpenConnectionsThreshold)) { + if alertSenderConfig.Sender.getOpenConnectionsAlertThreshold() > 0 { + if openConnections.CurrentOpenConnections > int64(alertSenderConfig.Sender.getOpenConnectionsAlertThreshold()) { + a.alertToProvider(ctx, alertSenderConfig, alertKey, + fmt.Sprintf(alertMessageTemplate, alertSenderConfig.Sender.getOpenConnectionsAlertThreshold())) + } + } else { + if openConnections.CurrentOpenConnections > int64(defaultOpenConnectionsThreshold) { + a.alertToProvider(ctx, alertSenderConfig, alertKey, + fmt.Sprintf(alertMessageTemplate, defaultOpenConnectionsThreshold)) + } } } } } } -func (a *Alerter) alertToSlack(ctx context.Context, slackAlertSender *slackAlertSender, alertKey string, alertMessage string) { - err := slackAlertSender.sendAlert(ctx, - ":rotating_light:Alert:rotating_light:: "+alertKey, alertMessage) +func (a *Alerter) alertToProvider(ctx context.Context, alertSenderConfig AlertSenderConfig, alertKey string, alertMessage string) { + err := alertSenderConfig.Sender.sendAlert(ctx, alertKey, alertMessage) if err != nil { logger.LoggerFromCtx(ctx).Warn("failed to send alert", slog.Any("error", err)) return @@ -176,7 +215,7 @@ func (a *Alerter) alertToSlack(ctx context.Context, slackAlertSender *slackAlert // Only raises an alert if another alert with the same key hasn't been raised // in the past X minutes, where X is configurable and defaults to 15 minutes // returns true if alert added to catalog, so proceed with processing alerts to slack -func (a *Alerter) checkAndAddAlertToCatalog(ctx context.Context, alertKey string, alertMessage string) bool { +func (a *Alerter) checkAndAddAlertToCatalog(ctx context.Context, alertConfigId int64, alertKey string, alertMessage string) bool { dur := dynamicconf.PeerDBAlertingGapMinutesAsDuration(ctx) if dur == 0 { logger.LoggerFromCtx(ctx).Warn("Alerting disabled via environment variable, returning") @@ -184,9 +223,9 @@ func (a *Alerter) checkAndAddAlertToCatalog(ctx context.Context, alertKey string } row := a.catalogPool.QueryRow(ctx, - `SELECT created_timestamp FROM peerdb_stats.alerts_v1 WHERE alert_key=$1 + `SELECT created_timestamp FROM peerdb_stats.alerts_v1 WHERE alert_key=$1 AND alert_config_id=$2 ORDER BY created_timestamp DESC LIMIT 1`, - alertKey) + alertKey, alertConfigId) var createdTimestamp time.Time err := row.Scan(&createdTimestamp) if err != nil && err != pgx.ErrNoRows { @@ -196,14 +235,18 @@ func (a *Alerter) checkAndAddAlertToCatalog(ctx context.Context, alertKey string if time.Since(createdTimestamp) >= dur { _, err = a.catalogPool.Exec(ctx, - "INSERT INTO peerdb_stats.alerts_v1(alert_key,alert_message) VALUES($1,$2)", - alertKey, alertMessage) + "INSERT INTO peerdb_stats.alerts_v1(alert_key,alert_message,alert_config_id) VALUES($1,$2,$3)", + alertKey, alertMessage, alertConfigId) if err != nil { logger.LoggerFromCtx(ctx).Warn("failed to insert alert", slog.Any("error", err)) return false } return true } + + logger.LoggerFromCtx(ctx).Info( + fmt.Sprintf("Skipped sending alerts: last alert was sent at %s, which was >=%s ago", + createdTimestamp.String(), dur.String())) return false } diff --git a/flow/alerting/email_alert_sender.go b/flow/alerting/email_alert_sender.go new file mode 100644 index 0000000000..2a534318c0 --- /dev/null +++ b/flow/alerting/email_alert_sender.go @@ -0,0 +1,104 @@ +package alerting + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ses" + "github.com/aws/aws-sdk-go-v2/service/ses/types" + + "github.com/PeerDB-io/peer-flow/logger" + "github.com/PeerDB-io/peer-flow/peerdbenv" + "github.com/PeerDB-io/peer-flow/shared/aws_common" +) + +type EmailAlertSender struct { + AlertSender + client *ses.Client + sourceEmail string + configurationSetName string + replyToAddresses []string + slotLagMBAlertThreshold uint32 + openConnectionsAlertThreshold uint32 + emailAddresses []string +} + +func (e *EmailAlertSender) getSlotLagMBAlertThreshold() uint32 { + return e.slotLagMBAlertThreshold +} + +func (e *EmailAlertSender) getOpenConnectionsAlertThreshold() uint32 { + return e.openConnectionsAlertThreshold +} + +type EmailAlertSenderConfig struct { + sourceEmail string + configurationSetName string + replyToAddresses []string + SlotLagMBAlertThreshold uint32 `json:"slot_lag_mb_alert_threshold"` + OpenConnectionsAlertThreshold uint32 `json:"open_connections_alert_threshold"` + EmailAddresses []string `json:"email_addresses"` +} + +func (e *EmailAlertSender) sendAlert(ctx context.Context, alertTitle string, alertMessage string) error { + _, err := e.client.SendEmail(ctx, &ses.SendEmailInput{ + Destination: &types.Destination{ + ToAddresses: e.emailAddresses, + }, + Message: &types.Message{ + Body: &types.Body{ + Text: &types.Content{ + Data: aws.String(alertMessage), + Charset: aws.String("utf-8"), + }, + }, + Subject: &types.Content{ + Data: aws.String(alertTitle), + Charset: aws.String("utf-8"), + }, + }, + Source: aws.String(e.sourceEmail), + ConfigurationSetName: aws.String(e.configurationSetName), + ReplyToAddresses: e.replyToAddresses, + Tags: []types.MessageTag{ + {Name: aws.String("DeploymentUUID"), Value: aws.String(peerdbenv.PeerDBDeploymentUID())}, + }, + }) + if err != nil { + logger.LoggerFromCtx(ctx).Warn(fmt.Sprintf( + "Error sending email alert from %v to %s subject=[%s], body=[%s], configurationSet=%s, replyToAddresses=[%v]", + e.sourceEmail, e.emailAddresses, alertTitle, alertMessage, e.configurationSetName, e.replyToAddresses)) + return err + } + return nil +} + +func NewEmailAlertSenderWithNewClient(ctx context.Context, region *string, config *EmailAlertSenderConfig) (*EmailAlertSender, error) { + client, err := newSesClient(ctx, region) + if err != nil { + return nil, err + } + return NewEmailAlertSender(client, config), nil +} + +func NewEmailAlertSender(client *ses.Client, config *EmailAlertSenderConfig) *EmailAlertSender { + return &EmailAlertSender{ + client: client, + sourceEmail: config.sourceEmail, + configurationSetName: config.configurationSetName, + replyToAddresses: config.replyToAddresses, + slotLagMBAlertThreshold: config.SlotLagMBAlertThreshold, + openConnectionsAlertThreshold: config.OpenConnectionsAlertThreshold, + emailAddresses: config.EmailAddresses, + } +} + +func newSesClient(ctx context.Context, region *string) (*ses.Client, error) { + sdkConfig, err := aws_common.LoadSdkConfig(ctx, region) + if err != nil { + return nil, err + } + snsClient := ses.NewFromConfig(*sdkConfig) + return snsClient, nil +} diff --git a/flow/alerting/interface.go b/flow/alerting/interface.go new file mode 100644 index 0000000000..a9dd2d51b9 --- /dev/null +++ b/flow/alerting/interface.go @@ -0,0 +1,9 @@ +package alerting + +import "context" + +type AlertSender interface { + sendAlert(ctx context.Context, alertTitle string, alertMessage string) error + getSlotLagMBAlertThreshold() uint32 + getOpenConnectionsAlertThreshold() uint32 +} diff --git a/flow/alerting/slack_alert_sender.go b/flow/alerting/slack_alert_sender.go index 1ad007536b..85f0657d11 100644 --- a/flow/alerting/slack_alert_sender.go +++ b/flow/alerting/slack_alert_sender.go @@ -7,13 +7,22 @@ import ( "github.com/slack-go/slack" ) -type slackAlertSender struct { +type SlackAlertSender struct { + AlertSender client *slack.Client channelIDs []string slotLagMBAlertThreshold uint32 openConnectionsAlertThreshold uint32 } +func (s *SlackAlertSender) getSlotLagMBAlertThreshold() uint32 { + return s.slotLagMBAlertThreshold +} + +func (s *SlackAlertSender) getOpenConnectionsAlertThreshold() uint32 { + return s.openConnectionsAlertThreshold +} + type slackAlertConfig struct { AuthToken string `json:"auth_token"` ChannelIDs []string `json:"channel_ids"` @@ -21,8 +30,8 @@ type slackAlertConfig struct { OpenConnectionsAlertThreshold uint32 `json:"open_connections_alert_threshold"` } -func newSlackAlertSender(config *slackAlertConfig) *slackAlertSender { - return &slackAlertSender{ +func newSlackAlertSender(config *slackAlertConfig) *SlackAlertSender { + return &SlackAlertSender{ client: slack.New(config.AuthToken), channelIDs: config.ChannelIDs, slotLagMBAlertThreshold: config.SlotLagMBAlertThreshold, @@ -30,11 +39,11 @@ func newSlackAlertSender(config *slackAlertConfig) *slackAlertSender { } } -func (s *slackAlertSender) sendAlert(ctx context.Context, alertTitle string, alertMessage string) error { +func (s *SlackAlertSender) sendAlert(ctx context.Context, alertTitle string, alertMessage string) error { for _, channelID := range s.channelIDs { _, _, _, err := s.client.SendMessageContext(ctx, channelID, slack.MsgOptionBlocks( - slack.NewHeaderBlock(slack.NewTextBlockObject("plain_text", alertTitle, true, false)), - slack.NewSectionBlock(slack.NewTextBlockObject("mrkdwn", alertMessage, false, false), nil, nil), + slack.NewHeaderBlock(slack.NewTextBlockObject("plain_text", ":rotating_light:Alert:rotating_light:: "+alertTitle, true, false)), + slack.NewSectionBlock(slack.NewTextBlockObject("mrkdwn", alertMessage+"\ncc: ", false, false), nil, nil), )) if err != nil { return fmt.Errorf("failed to send message to Slack channel %s: %w", channelID, err) diff --git a/flow/alerting/types.go b/flow/alerting/types.go new file mode 100644 index 0000000000..6277010ee0 --- /dev/null +++ b/flow/alerting/types.go @@ -0,0 +1,8 @@ +package alerting + +type ServiceType string + +const ( + SLACK ServiceType = "slack" + EMAIL ServiceType = "email" +) diff --git a/flow/go.mod b/flow/go.mod index 6c319239ae..5126e3146d 100644 --- a/flow/go.mod +++ b/flow/go.mod @@ -15,6 +15,7 @@ require ( github.com/aws/aws-sdk-go-v2/credentials v1.17.7 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.9 github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4 + github.com/aws/aws-sdk-go-v2/service/ses v1.22.2 github.com/aws/aws-sdk-go-v2/service/sns v1.29.2 github.com/cockroachdb/pebble v1.1.0 github.com/google/uuid v1.6.0 diff --git a/flow/go.sum b/flow/go.sum index acb83d06ba..377b2c2a16 100644 --- a/flow/go.sum +++ b/flow/go.sum @@ -92,6 +92,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3 h1:4t+QEX7BsXz98W github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3/go.mod h1:oFcjjUq5Hm09N9rpxTdeMeLeQcxS7mIkBkL8qUKng+A= github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4 h1:lW5xUzOPGAMY7HPuNF4FdyBwRc3UJ/e8KsapbesVeNU= github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4/go.mod h1:MGTaf3x/+z7ZGugCGvepnx2DS6+caCYYqKhzVoLNYPk= +github.com/aws/aws-sdk-go-v2/service/ses v1.22.2 h1:cW5JtW23Lio3KDJ4l3jqRiOcCPKxJg7ooRA1SpIiuMo= +github.com/aws/aws-sdk-go-v2/service/ses v1.22.2/go.mod h1:MLj/NROJoperecxBME2zMN/O8Zrm0wv+6ah1Uqwpa1E= github.com/aws/aws-sdk-go-v2/service/sns v1.29.2 h1:kHm1SYs/NkxZpKINc4zOXOLJHVMzKtU4d7FlAMtDm50= github.com/aws/aws-sdk-go-v2/service/sns v1.29.2/go.mod h1:ZIs7/BaYel9NODoYa8PW39o15SFAXDEb4DxOG2It15U= github.com/aws/aws-sdk-go-v2/service/sso v1.20.2 h1:XOPfar83RIRPEzfihnp+U6udOveKZJvPQ76SKWrLRHc= diff --git a/flow/peerdbenv/config.go b/flow/peerdbenv/config.go index 9a59bad5ef..4903bec091 100644 --- a/flow/peerdbenv/config.go +++ b/flow/peerdbenv/config.go @@ -95,3 +95,20 @@ func PeerDBEnableParallelSyncNormalize() bool { func PeerDBTelemetryAWSSNSTopicArn() string { return getEnvString("PEERDB_TELEMETRY_AWS_SNS_TOPIC_ARN", "") } + +func PeerDBAlertingEmailSenderSourceEmail() string { + return getEnvString("PEERDB_ALERTING_EMAIL_SENDER_SOURCE_EMAIL", "") +} + +func PeerDBAlertingEmailSenderConfigurationSet() string { + return getEnvString("PEERDB_ALERTING_EMAIL_SENDER_CONFIGURATION_SET", "") +} + +func PeerDBAlertingEmailSenderRegion() string { + return getEnvString("PEERDB_ALERTING_EMAIL_SENDER_REGION", "") +} + +// Comma-separated reply-to addresses +func PeerDBAlertingEmailSenderReplyToAddresses() string { + return getEnvString("PEERDB_ALERTING_EMAIL_SENDER_REPLY_TO_ADDRESSES", "") +} diff --git a/flow/shared/aws_common/config.go b/flow/shared/aws_common/config.go new file mode 100644 index 0000000000..6eced96f05 --- /dev/null +++ b/flow/shared/aws_common/config.go @@ -0,0 +1,21 @@ +package aws_common + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" +) + +func LoadSdkConfig(ctx context.Context, region *string) (*aws.Config, error) { + sdkConfig, err := config.LoadDefaultConfig(ctx, func(options *config.LoadOptions) error { + if region != nil { + options.Region = *region + } + return nil + }) + if err != nil { + return nil, err + } + return &sdkConfig, nil +} diff --git a/flow/shared/telemetry/sns_message_sender.go b/flow/shared/telemetry/sns_message_sender.go index 9d32dcf8f9..218b693b38 100644 --- a/flow/shared/telemetry/sns_message_sender.go +++ b/flow/shared/telemetry/sns_message_sender.go @@ -8,10 +8,11 @@ import ( "unicode" "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/sns" "github.com/aws/aws-sdk-go-v2/service/sns/types" "go.temporal.io/sdk/activity" + + "github.com/PeerDB-io/peer-flow/shared/aws_common" ) type SNSMessageSender interface { @@ -109,15 +110,10 @@ func NewSNSMessageSender(client *sns.Client, config *SNSMessageSenderConfig) SNS } func newSnsClient(ctx context.Context, region *string) (*sns.Client, error) { - sdkConfig, err := config.LoadDefaultConfig(ctx, func(options *config.LoadOptions) error { - if region != nil { - options.Region = *region - } - return nil - }) + sdkConfig, err := aws_common.LoadSdkConfig(ctx, region) if err != nil { return nil, err } - snsClient := sns.NewFromConfig(sdkConfig) + snsClient := sns.NewFromConfig(*sdkConfig) return snsClient, nil } diff --git a/nexus/catalog/migrations/V21__alert_constraint_update.sql b/nexus/catalog/migrations/V21__alert_constraint_update.sql new file mode 100644 index 0000000000..18ddb7d9f8 --- /dev/null +++ b/nexus/catalog/migrations/V21__alert_constraint_update.sql @@ -0,0 +1,6 @@ +ALTER TABLE peerdb_stats.alerting_config +DROP CONSTRAINT alerting_config_service_type_check; + +ALTER TABLE peerdb_stats.alerting_config +ADD CONSTRAINT alerting_config_service_type_check +CHECK (service_type IN ('slack', 'email')); \ No newline at end of file diff --git a/nexus/catalog/migrations/V22__alert_column_add_config_id.sql b/nexus/catalog/migrations/V22__alert_column_add_config_id.sql new file mode 100644 index 0000000000..35c1147bf0 --- /dev/null +++ b/nexus/catalog/migrations/V22__alert_column_add_config_id.sql @@ -0,0 +1,2 @@ +ALTER TABLE peerdb_stats.alerts_v1 +ADD COLUMN alert_config_id BIGINT DEFAULT NULL; diff --git a/ui/app/alert-config/new.tsx b/ui/app/alert-config/new.tsx index 6a399e4c81..12dfbc9cf5 100644 --- a/ui/app/alert-config/new.tsx +++ b/ui/app/alert-config/new.tsx @@ -1,21 +1,27 @@ import { Button } from '@/lib/Button'; import { TextField } from '@/lib/TextField'; import Image from 'next/image'; -import { useState } from 'react'; +import { Dispatch, SetStateAction, useState } from 'react'; import ReactSelect from 'react-select'; import { PulseLoader } from 'react-spinners'; import { ToastContainer, toast } from 'react-toastify'; import 'react-toastify/dist/ReactToastify.css'; import SelectTheme from '../styles/select'; -import { alertConfigReqSchema, alertConfigType } from './validation'; +import { + alertConfigReqSchema, + alertConfigType, + emailConfigType, + serviceConfigType, + serviceTypeSchemaMap, + slackConfigType, +} from './validation'; + +export type ServiceType = 'slack' | 'email'; export interface AlertConfigProps { id?: bigint; - serviceType: string; - authToken: string; - channelIdString: string; - slotLagGBAlertThreshold: number; - openConnectionsAlertThreshold: number; + serviceType: ServiceType; + alertConfig: serviceConfigType; forEdit?: boolean; } @@ -25,45 +31,152 @@ const notifyErr = (errMsg: string) => { }); }; -function ConfigLabel() { +function ConfigLabel(data: { label: string; value: string }) { return (
- slack + {data.value} + {data.label}
); } -const NewAlertConfig = (alertProps: AlertConfigProps) => { - const [serviceType, setServiceType] = useState('slack'); - const [authToken, setAuthToken] = useState(alertProps.authToken); - const [channelIdString, setChannelIdString] = useState( - alertProps.channelIdString +function getSlackProps( + config: slackConfigType, + setConfig: Dispatch> +) { + return ( + <> +
+

Authorisation Token

+ { + setConfig((previous) => ({ + ...previous, + auth_token: e.target.value, + })); + }} + /> +
+
+

Channel IDs

+ { + setConfig((previous) => ({ + ...previous, + channel_ids: e.target.value.split(','), + })); + }} + /> +
+ + ); +} + +function getEmailProps( + config: emailConfigType, + setConfig: Dispatch> +) { + return ( + <> +
+

Email Addresses

+ { + setConfig((previous) => ({ + ...previous, + email_addresses: e.target.value.split(','), + })); + }} + /> +
+ + ); +} +function getServiceFields( + serviceType: ServiceType, + config: T, + setConfig: Dispatch> +) { + switch (serviceType) { + case 'email': + return getEmailProps( + config as emailConfigType, + setConfig as Dispatch> + ); + case 'slack': { + return getSlackProps( + config as slackConfigType, + setConfig as Dispatch> + ); + } + } +} + +export function NewConfig(alertProps: AlertConfigProps) { + const [serviceType, setServiceType] = useState( + alertProps.serviceType ); - const [slotLagGBAlertThreshold, setSlotLagGBAlertThreshold] = - useState(alertProps.slotLagGBAlertThreshold); - const [openConnectionsAlertThreshold, setOpenConnectionsAlertThreshold] = - useState(alertProps.openConnectionsAlertThreshold); + + const [config, setConfig] = useState( + alertProps.alertConfig + ); + const [loading, setLoading] = useState(false); + const handleAdd = async () => { - if (serviceType !== 'slack') { - notifyErr('Service Type must be selected'); + if (!serviceType) { + notifyErr('Service type must be selected'); + return; + } + + const serviceSchema = serviceTypeSchemaMap[serviceType]; + const serviceValidity = serviceSchema.safeParse(config); + if (!serviceValidity?.success) { + notifyErr( + 'Invalid alert service configuration for ' + + serviceType + + '. ' + + serviceValidity.error.issues[0].message + ); return; } + const serviceConfig = serviceValidity.data; const alertConfigReq: alertConfigType = { + id: Number(alertProps.id || -1), serviceType: serviceType, - serviceConfig: { - auth_token: authToken ?? '', - channel_ids: channelIdString?.split(',')!, - slot_lag_mb_alert_threshold: slotLagGBAlertThreshold * 1000 || 20000, - open_connections_alert_threshold: openConnectionsAlertThreshold || 5, - }, + serviceConfig, }; + const alertReqValidity = alertConfigReqSchema.safeParse(alertConfigReq); if (!alertReqValidity.success) { notifyErr(alertReqValidity.error.issues[0].message); return; } + setLoading(true); if (alertProps.forEdit) { alertConfigReq.id = Number(alertProps.id); @@ -72,6 +185,7 @@ const NewAlertConfig = (alertProps: AlertConfigProps) => { method: alertProps.forEdit ? 'PUT' : 'POST', body: JSON.stringify(alertConfigReq), }); + const createStatus = await createRes.text(); setLoading(false); if (createStatus !== 'success') { @@ -81,7 +195,7 @@ const NewAlertConfig = (alertProps: AlertConfigProps) => { window.location.reload(); }; - + const ServiceFields = getServiceFields(serviceType, config, setConfig); return (
{

Alert Provider

val && setServiceType(val.value)} + onChange={(val, _) => val && setServiceType(val.value as ServiceType)} theme={SelectTheme} />
-
-

Authorisation Token

- setAuthToken(e.target.value)} - /> -
- -
-

Channel IDs

- setChannelIdString(e.target.value)} - /> -
-

Slot Lag Alert Threshold (in GB)

setSlotLagGBAlertThreshold(e.target.valueAsNumber)} + value={config.slot_lag_mb_alert_threshold / 1000} + onChange={(e) => + setConfig((previous) => ({ + ...previous, + slot_lag_mb_alert_threshold: e.target.valueAsNumber * 1000, + })) + } />
-

Open Connections Alert Threshold

- setOpenConnectionsAlertThreshold(e.target.valueAsNumber) + setConfig((previous) => ({ + ...previous, + open_connections_alert_threshold: e.target.valueAsNumber, + })) } />
- + {ServiceFields}
); -}; +} -export default NewAlertConfig; +export default NewConfig; diff --git a/ui/app/alert-config/page.tsx b/ui/app/alert-config/page.tsx index 514f586155..25a92338b9 100644 --- a/ui/app/alert-config/page.tsx +++ b/ui/app/alert-config/page.tsx @@ -12,13 +12,25 @@ import useSWR from 'swr'; import { UAlertConfigResponse } from '../dto/AlertDTO'; import { tableStyle } from '../peers/[peerName]/style'; import { fetcher } from '../utils/swr'; -import NewAlertConfig, { AlertConfigProps } from './new'; -const ServiceIcon = (serviceType: string) => { - switch (serviceType.toLowerCase()) { - default: - return alt; - } +import { AlertConfigProps, NewConfig, ServiceType } from './new'; + +const ServiceIcon = ({ + serviceType, + size, +}: { + serviceType: string; + size: number; +}) => { + return ( + {serviceType} + ); }; + const AlertConfigPage: React.FC = () => { const { data: alerts, @@ -28,14 +40,19 @@ const AlertConfigPage: React.FC = () => { error: any; isLoading: boolean; } = useSWR('/api/alert-config', fetcher); + const blankAlert: AlertConfigProps = { - serviceType: '', - authToken: '', - channelIdString: '', - slotLagGBAlertThreshold: 20, - openConnectionsAlertThreshold: 5, + serviceType: 'slack', + alertConfig: { + email_addresses: [''], + auth_token: '', + channel_ids: [''], + open_connections_alert_threshold: 20, + slot_lag_mb_alert_threshold: 5000, + }, forEdit: false, }; + const [inEditOrAddMode, setInEditOrAddMode] = useState(false); const [editAlertConfig, setEditAlertConfig] = useState(blankAlert); @@ -43,19 +60,15 @@ const AlertConfigPage: React.FC = () => { const onEdit = (alertConfig: UAlertConfigResponse, id: bigint) => { setInEditOrAddMode(true); const configJSON = JSON.stringify(alertConfig.service_config); - const channelIds: string[] = JSON.parse(configJSON)?.channel_ids; + setEditAlertConfig({ id, - serviceType: alertConfig.service_type, - authToken: JSON.parse(configJSON)?.auth_token, - channelIdString: channelIds.join(','), - slotLagGBAlertThreshold: - (JSON.parse(configJSON)?.slot_lag_mb_alert_threshold as number) / 1000, - openConnectionsAlertThreshold: - JSON.parse(configJSON)?.open_connections_alert_threshold, + serviceType: alertConfig.service_type as ServiceType, + alertConfig: JSON.parse(configJSON), forEdit: true, }); }; + return (
@@ -79,11 +92,32 @@ const AlertConfigPage: React.FC = () => { {alerts?.length ? ( alerts.map((alertConfig: UAlertConfigResponse, index) => ( - - {alertConfig.id} - - - {ServiceIcon(alertConfig.service_type)} + +
+
+ + +
+
@@ -141,7 +175,7 @@ const AlertConfigPage: React.FC = () => { {inEditOrAddMode ? 'Cancel' : 'Add Configuration'} - {inEditOrAddMode && } + {inEditOrAddMode && }
); }; diff --git a/ui/app/alert-config/validation.ts b/ui/app/alert-config/validation.ts index 3256c51aa0..9b1a14b2b3 100644 --- a/ui/app/alert-config/validation.ts +++ b/ui/app/alert-config/validation.ts @@ -1,33 +1,77 @@ import z from 'zod'; -export const alertConfigReqSchema = z.object({ - id: z.optional(z.number()), - serviceType: z.enum(['slack'], { - errorMap: (issue, ctx) => ({ message: 'Invalid service type' }), - }), - serviceConfig: z.object({ +const baseServiceConfigSchema = z.object({ + slot_lag_mb_alert_threshold: z + .number({ + invalid_type_error: 'Slot threshold must be a number', + }) + .min(0, 'Slot threshold must be non-negative'), + open_connections_alert_threshold: z + .number({ + invalid_type_error: 'Threshold must be a number', + }) + .int({ message: 'Connections threshold must be a valid integer' }) + .min(0, 'Connections threshold must be non-negative'), +}); + +export const slackServiceConfigSchema = z.intersection( + baseServiceConfigSchema, + z.object({ auth_token: z .string({ required_error: 'Auth Token is needed.' }) .min(1, { message: 'Auth Token cannot be empty' }) .max(256, { message: 'Auth Token is too long' }), channel_ids: z - .array(z.string().min(1, { message: 'Channel IDs cannot be empty' }), { - required_error: 'We need a channel ID', - }) + .array( + z.string().trim().min(1, { message: 'Channel IDs cannot be empty' }), + { + required_error: 'We need a channel ID', + } + ) .min(1, { message: 'Atleast one channel ID is needed' }), - slot_lag_mb_alert_threshold: z - .number({ - invalid_type_error: 'Threshold must be a number', - }) - .int() - .min(0, 'Threshold must be non-negative'), - open_connections_alert_threshold: z - .number({ - invalid_type_error: 'Threshold must be a number', - }) - .int() - .min(0, 'Threshold must be non-negative'), + }) +); + +export const emailServiceConfigSchema = z.intersection( + baseServiceConfigSchema, + z.object({ + email_addresses: z + .array( + z + .string() + .trim() + .min(1, { message: 'Email Addresses cannot be empty' }) + .includes('@'), + { + required_error: 'We need an Email Address', + } + ) + .min(1, { message: 'Atleast one email address is needed' }), + }) +); + +export const serviceConfigSchema = z.union([ + slackServiceConfigSchema, + emailServiceConfigSchema, +]); +export const alertConfigReqSchema = z.object({ + id: z.optional(z.number({ invalid_type_error: 'ID must be a valid number' })), + serviceType: z.enum(['slack', 'email'], { + errorMap: (issue, ctx) => ({ message: 'Invalid service type' }), }), + serviceConfig: serviceConfigSchema, }); +export type baseServiceConfigType = z.infer; + +export type slackConfigType = z.infer; +export type emailConfigType = z.infer; + +export type serviceConfigType = z.infer; + export type alertConfigType = z.infer; + +export const serviceTypeSchemaMap = { + slack: slackServiceConfigSchema, + email: emailServiceConfigSchema, +}; diff --git a/ui/app/api/alert-config/route.ts b/ui/app/api/alert-config/route.ts index 7f66da191f..9b8ff019ed 100644 --- a/ui/app/api/alert-config/route.ts +++ b/ui/app/api/alert-config/route.ts @@ -23,6 +23,7 @@ export async function POST(request: Request) { if (createRes.id) { createStatus = 'success'; } + return new Response(createStatus); } diff --git a/ui/components/AlertDropdown.tsx b/ui/components/AlertDropdown.tsx index dd3ae482e3..bfc30648a7 100644 --- a/ui/components/AlertDropdown.tsx +++ b/ui/components/AlertDropdown.tsx @@ -40,19 +40,16 @@ const AlertDropdown = ({ borderRadius: '0.5rem', }} > - - - - - - + + + diff --git a/ui/public/images/email.png b/ui/public/images/email.png new file mode 100644 index 0000000000000000000000000000000000000000..0d302af32a0e57210a55831232e060bf016bd8a1 GIT binary patch literal 18231 zcmeI4e{2)i9l&3H5Lh6hl#NhS!OiKfZFTO4eX)IW+=LjKOf`){Ho}@Vx!j$TQ*58H zFR_EP%w0FN)uyeA)NPtr8%;y&2qQ?-enLrTMb)XAs&z#z1GGpJn;NyFonX+E;=OnF zvoFShM*&Tho~7h_cklb-z0ZBW_ujid?&;2L57yOgs09Gjb+iY&0H}b66<|dT6upTT z!ccrB-o7&lz{<7guLAsJ&w2nhx9Z(JW>4rLNim|0vT6)yjp=9{Y6IYZARU*LVa;R) zv_U=A>^OU9#=+=nvtx%C;zIE@ZAfq5ozS-L-qx+`9#(v+ve&T?K> z;GCRK5?vA}F!`S&Pz#Uz2{kNr1-ItI!Lw$^kZHyxmQAHnjVV{7kr-ropU=l~0xJkk zsNqbG#Y{Qvj3pa#POzV#mQ)gY+|-R2gZ#<^#)#SMaG*eiUw&NCcp*?MnP&$nvS~Ta z@{Jr@&PY`XHt~@}#Oj)=uv$cmYB4hj?f9~G5NRk>FfAJ{8ZB#?G`Ebx5#))KwM=%8 z#Wl7|OBy2yMcXn8d(u#@^GUNyE5apk1=Y(siVqp4ksLCX09a^V5~Hbym*@y;S<*~P z>r0rSmYkWDQ@LC1ht#FEgeIF>5dH;VDvC~?cM5zrFG_+>;$2_&vn5K!OtH*EhN_3h z7Mly*JSVwb67T*JCrU0Z=fkF1>H}7dDw{Hq7rSnWA8RN^qT4Vcfn4Qv;v2!VwON(M z+!vA+J%;Yi7i>ez8`=D+iWD{yQP~XWQF%~f5{CV;teQ9Y*b5_n(Aj4T`zX5V4F9D zab);>(h|u)IAKJY!nhdIVB~l_qAN0T&5p*@e6<%66$i2TC=KLi(eost--59w<5L&>C#>h)ul59rUy|;o;d#ONUpy z0jVJ^RxA}O*Evpdxp1!MZil6{v=e1Wjty#RfW@Klg;JASh?S;Lp+xXOzEo^^P)MQ8 zG=&hQ#ZRFdu1ue-3<%)?uhZk!IH%j|_By>Hr#eNKTXU(tCXV-txI}DsrL;P#A8GbJaq)g(Y_me!R0%qU$zyFQ0+7=c*Glz!(EcYfV(o7{ z0tbIAkN)RP?aig8JFjkPZ!R@0T7EUAn*kvot>m)J3hkvO3&wW%{zwZniJ}WGQc5qE zT6F4ZEz%RzVL>I)4o(o^zQZyqZ7FNHb4g2EOWrcKX|TQPv2s6m6~0M=FR|FdTP#~y z;78akki{Y00_WHz4T4D!Ns5aI6IPs{N)#6+K_n?IB1~9`;=&||B*jI9 z2`f=tm;{lexQH-eC5j7^Ad(ap5hkoeabXfflHwx5gq0{ROoB*KTtt|#62*l{5J`%Q z2oqMKxG)JKNpTTj!b%huCP5@AE+R}=iQ>W}h$O{Dgb6EAT$lurq_~JMVI_(SlOU25 z7ZE0`L~&sfM3Ukn!i1G5E=+<*Qd~rsuoA_ENf1ejiwF}|qPQ>#B1v%(VZurj7bZa@ zDJ~*RSc&4oB#7iyan%;T3!%l}#~@Phs}JeZceKJUMlecyR|tUJcLR`l0)P**P`(4e z2nWD?KLFB?0jM`7`i^Y{pgP_WZ0%0}?cGyHLk+F#zx|gDw=>RH*F5z^I^#XnyXrq{ z9$%h%_{Dp7J^1p)spn50k15{+KmE7A@;1l%4@mpmz0U= z-`%x({LVX1z5L;$&!0(p{_GJZ_DuNi{=7>w;RUJ*0L>%m*#YctK@ zK*jze;P*8L7v8Ig*Y%&jw{BLMSPh%9eLJ%MxOC#s7&x+V_PfJiD$Dk<^;O<2;0V(G zX!`QrUsld-n4Ld6v#@>nEcE#I)cGH+$Ux1R*XP-_$bEKUYNi*&M(Pe9s`1Xvvv*`3 zd9Am8&#(tg|1_Z@LHdW_%3vR`qFx1;l=Dh*z)erdL9CYFK)gMRL-?Ah>_5M(1=LPgpm3eL7e%;#yl#)BbwPYTjMV-({rE37_2$C9p9{Me zv8kIK{m$wS{yBK?ocPRL(;JVyQE~CFPwoJdZjgFz)4AJkT5S|+ta`TL`it)ElY~|E z#gpqVe;LFpKqXiK7?1%s=OFLt&5P9lWV2OZ=#iSKbrWy?96qN&$Chou1DpH5^*`|L B-!=dM literal 0 HcmV?d00001 diff --git a/ui/public/images/slack.png b/ui/public/images/slack.png index 32fe41912abaf6f81b1fef4d155fbf5609208325..a045ed1962aa6ffaf6f131e067ac8d1f077fe39a 100644 GIT binary patch literal 6999 zcmeH~c{G&o`^U$UCHt0&GzLW)GlQ{AmTWN?P4*=*W}abU%$OO*PPT{=3WZ7`SyPde zrA@Z7M^Q>72`O1xK7J4N=~L(TJ?D2m=ls6^HFIX3=f3Xie%;sgzOVZ}&y$3=F&A3D zc|8OI5yD!S+JaZ>)gK=Z`2QelpDK8{6Ke0wvL&#gbOwz~@dKc&5IO(_f+=JOB={*6 zZzO&f%6IF_5d$M`A%h2+lh#pP6KtvUBpkbWoeSCQ&R-#ocP=QTaknOCx>|ApJ5FaTtY=w!(*eC#Mr zd)Ogio)m8@-R|DemSCx8CTnMZ^ussZEU8Csx%~(8s%Kk0Hmdljw(q~FnU@X`d)B9WlK~!e@A5c^EW;#H3Es|1*ip6>0s4CAbN(ubOO;AU_reAZ;HP@Z1!d&3`!yC!<^7K zB#v$Z_)siD8Gv1=jXg2cm#9mE85*qD3&wx|D!?K@gQkTn?^n$T7EEXMuK(N_tHMWKt zjp2FVktkm?9^bvWn&XNLH*2*GfFrot-XJHr%U5*ZXai$e2h0QEow6X*z(8WKUJB7PmgWSIqlkRJj4#}Q0>aE~Hv0VXYwK?KZ# z0DqRkuLyH24*!#|nvyq#N?%I~6z{jmNW`Ch^gxE+nh%MH0Q>+d2xWrtp?>pcQON&@ z^;>MKJ!|Rw8VESuPyTP-f9JmD40_>k7*iTCaCK6wsXlBqUJQvwq>wOcw!yMKD@ zDOA9YMOal0rH$0pLTPHEQAiD44fXHotqpes7)-F@R-q`QnugZea1s%-2SgIU%ArsR z-T;E`@4eQsx-J+n8&FxoYN3MUH9MFL#)JV7STu$`jpnBhTV+F6kw3d};C>blp%&D&v~^L$ z)n!8=b>W)YT3}UZdm-U~CejN@(9%SzlQe#(GihWNo4^2!yg?m7t-$(R(+aBmeF;^5 zcW3(mtLp6xG0#8H4&*Sii zXsZX$fARBUG5(7)fapJy{40I`$n{6Af2F{`a{g0Yf8_dC3j8bQKh^dBCfE9ZUZ?i1mz#SGBGPq%PkLLdSXt3M9N>D-N=k(Y(Vneo2i zTF0#=;1TMQ0`_rWO^xh>pHA-&qf7N~T8Z!;k-3v&9R6NZwDg2Ir>Rn(qYBkK{7?+u zd*|DhzM$sq8!qpF_KZTxZGgVS%I>NQfo0_>Qc`WDdKvG#YV3`~c~z7<6&Gh_o;~~c zDRp?a{|v6Ber%g`Yu1yFhcnM+!zb7ci%*2|g?u`iUuDRTEAGA_X}sC9h zqB$l_D|RgBR^nO_&9uv2*9PVIxND#H$Ge@qwQYh!=4RH#!xg0k-tV}*Vp0f^=_5~T z#%v~+DZ4~Cq~5if>56Pb`P&SknrX+6d1T+|#kJaR*!0nSya6ko00b$R)a^@ocDG*U zW4oq07Lg$4CcW4exRp!_Rgf?eNKff(3!mnTv3N*n%C6$QVV2t)t-=}Q>n_Gjb{$dt zSY_=T#Onz!46HA9-1yXPn8Uf9@(N$);k@AZ=!o?422LJ`+xkylIP{2U`fdKi015LU zO>V{pBxKOp@A<*JfxzOP@~QRtIS{L8v+%*OSe@v~rVgN~&rjri>Y;fAWPqb-oOe)> z*R|df1(Df3f5P?2imZjl+1_Y$!NZ9(fhOu})#`~dktqvRjB+*hSScxE?+cD&%h%%$ zu;|KhcRn2|lNy!IP`rOlL?v?}P5%91w8@){1#Zv2Gr;nP(hFB#EK3wh-{k9(BwXTc z=^B|(d);${usBOqkMo%0UrE=GIyHYjY1z(~H8toe_&!_ujScqlKgH2z~de>PZ!prg}^PbO&8xMS;ysX)7buatq zgCYywKlChxU5;r6ZIXWffdDe?$csLeubalswX9% zRXjx|W@aoW#-}J}T zQ?`U4%RWv2aRs^hw^5$iH=p!98llT?@wa*JKG3nHl2yH8QIv4+(QdAWokj_BDwx2T ztJFag^PVdslA*@N#?|wFunDydG@GdB1xmQ*<@ExknQ?7M?e->w0{8u7SvzG$)DL zR5KTw_{Xpp#o0W2tm$}7)yG@f^AY3~)YEg!6Qr}yL+x|@xt58Sm&kyykz

;RZCMCI+L2yl==_`*q*qOJsOPm^{kd_66^|$6z#AFKz1j z=i+JE4IGJeSHh;n*zMRR*~y*W$EuI1FE*Mdw|ef&dtU8Fz89_V&8UilZM}EFCBA5) zb=~kWcN~wT?9SrcxNWakL!(kjbE5KC;kH_#%%|3Bt@g^= zf}+z=VQ2STJUrH>5p=9w^j?gHLm-SN8TvAp(>a`HO4-?J(-9_&qLdJQV$))W66zLS zzbjO{@E)060VT_DavpK!czrD^jU&52uCsKfej1=VQY5=i-R`3M`KW#wRcd}h3*<_; z^2aM`g%yUrn>ahBeRBizoT_$1qE*T$!*`q*ItO%|3uZbi!#_=yeK~pucZmAZAirjo z=O{IivDs~|1`_?~i{*+R->1d&u9!T9J#wX0XO5tY9_CIjvJI$|;EW-1;f2;eo>HA2fO5sr=S9 zVkG(DLhW4+_Ldd(6jr{VqdoG%^%exO)KWxix!(N(hKYX2$a_(gC?hQ2Qb*#s(lE(= znVX-}ipN6nfYnmUp2c|2ZG$|*LzzpWB{gx)nN(WM)}W#j)TXC7m^0^RAUkgsysnGB zb+edP*sAf+RcwW`Wz7H!;<)!gz>Ah{Jk8l&Yaw;y)-KK(8;2TfRdR#6zAJmn$e5<5Y>(S1 zRtA6crQ&fForY7Cp{~dGU*|3oT!G1{kb=*Y91#vocbOtIS@l2_E-llgAH7k@vzPYh zh%zNjHy(>^XS?7H)EDbdeAudg`bc(LWN4-7_$jY-Qch(Cs*v`|@R#lx^X5t_I?tFZ zb3!LO*Owf~>FQ&jn3JLn)j21g14PPQ5(cSz2Ls(}(nsH+H-8=pydWJ)!;DP@R?5;Z z!1l$j`xGBjlI49Z&ZD-(=lm2e&5USUUWrusEBW?iA&w!@&Tf&Kk-UdHU{mw=*V)lT z@fF_kjjB+;-H!21t`4saN7zvmMfb?VBQl{}*noP;+7v}WY%%P8$(iK^#^;wCw&yUH Z4f%DeE5@xUiQw}w1Z!qvT4C%N^FOXGSH}PV literal 6109 zcmXYVbyO7I^Y(%)2uLcOiu584QlfyQbS=GfcXxMp3Mdl70*iDnu#|L5!_u{MDe?0C zecnIj#+*6ldFHwIoO@@YRg`4#a42yA005qxtdtr6fChS6e|e7iG|oN5_# z008dG{}~z}CyyKeAWM{!`lR8Nck~t8@0DhL|LW*E@@Q8dwYeKLjK#l)a@*Pc2q3VS zKFy;@#Arq*PBf@C5NE?lz#yRxDP!QRNZ_TVvvl(?4=1P33afbeTrLRm29|=Woe-8q zC@Y&2)_A-+L(@`m;8Sg=DQJKHpxzj)tspn=vn0TL|V$c`$VqZI|OBuNKoEc&Tg=Hkh$VNr#dV?zy32fte4xc zJzu)~2)=kGMK+qksE`r$Jj%rrDXM%=&lJ?Hvr^c4|2X>g%5J|)Xc2R@wMFxpxole; z9`3Po8rgc?%(=7fo}V4vsi>WiqP|+#@>c9Bh2D9~>Cr!rJzvCP^c| zDLlhOuYk}|BKcYw^?;(MB}xB+M*_DwfUAu$p(#zGNWai;&j*|-D5s*=$%%epVM30?M|NEa5|5M+(3S7T7KXpnIKnc5%{-v6{A zVTzmJAGu*ue);Zn8b3*}(lIKL`2FCY#qj>XXmtrmFg%9hKNcxd?pR!B)bJvgoYPyx zPOuna4)fu~T_atq)Md>RT-<+(_&=N&21s(%{8;0~cO!r9AcxIe zDvyGX*Y=YSpj=GQ^^2X2t>K~h>;KR_2uXB#BqGwTXdX3}lZB;yOfoG*}!=TRDG`8iS zUqOZ6r(ECj@8T^U98@Eh$TK1kerM*5KavS)`07@LhQWdH9Ya9$%^X5}+q> z+LU*CcTD^UezUC<0iMrWct#nD0r+ZSH;0C7My$?cM`puqjNfaQRg2QZz}b8c;ENQw zA+FzF1f|8@rz#xy8*E&!x&F|##()8|Mt=ht8In?jNCjGC8MqkM7IIrmSvUMAD@&6) zs5o};k?1j^!Ae8RqQGa<$QBHBqO}hu#v>V9L{a&aIepUfE8mzEJr?JlQ`qGD;SJ0}$d7 zPs)yEw<%%$y%uB{;^ZrVXKFyEwrRl!3n;1;i+%IPLeq2A+(S@hbc6EL&^e$jcx$C& zv$;;~)}b)v>k658QIig|g)|$tT!oo!vzItgnyYNVT7t^>&XT>y83fO9l<-B+N-XrV z#AD2A3NO~E@z`WE?)Xb$&d#@ldrhvK?Bt2S_6;4rF9%y9ws4(L6a19NWT|V7o$~}< z2c6=)%sN>N|@6ISDlmFq*4`geKc z%GtH3t+ZWi_ITgB4w)1?q#F7se7_D_txTa(_)=+dYh&499aPVPc?PWTXW_yDj|jV< zq~(poYum+EhRMBqLIm5LM~Jq}g~7(aTotoJ7Sd-)6anMu!)1CF(}?EP@4I4uO8GYX ziM3-5V@+DEp(NlbQ~J5_e?HL?Rnh?4$^^g2^Vo~Q@}WncYD9xG%?^3}cW}^n3z4C%0PxbsC z6`z5I&&Ve?!E1pC*SOE!hc?boG0H?s@8{`m?51CY=$KJRYTjuZc+SYFK66x})2NY+ zeiRvON!Zm_>EV~Wl|Dm2gd1MGPDGLQVL6qrjlcaIO^NkAeFy{UeG=}J8*KFaG43Y#qvFW~z1x!Pft_atma5!iKRK@zc|kCCi7BH=kKenj zXBt1xsrle%dvT9sNKkYWAV(ysf?}iNdlD}Jq~-!&;yzg-V6x>Wun7A1B|A@#JZY|w z$CgO)Kaz)U`T={nnGC+YAuf-1tTU|G=T1JKC!&W96y9P{SqE>pA?bV@s1Y?g>)TJe%(=jru8kM>G<{0H6VliXolPqNX$u| z@QMJkr?cAf*|e1bI8Hei7Atni{Mk`F@96l!J@I{o1H{wgWe1p}bC8Dajpa;ib^Ndz zf&T`Z*s{H=2Ix+-eVb%{z9R*WqjN$nVAXj=k!Pr*@pY={5VVb3p-4rz93L>U=Jx~f z5`4_v7us0%PDB0&AEyWOGZ}R@bu4G+&h6ZZxX41L! zlxp6cS@>S%=LfvZFUNw$Fyp_1!5O8aJ5;M-?96-{RK}4sl;#4UcE0w%#f>xvWFGSh z1(%^isydqr)rJ~-RR%sA9Bf9XcW|8UpHdj*YTyuFIwW3_!){C5R|Tf;Ch9h(HBjCP zMkQW`<{&Th*!q8N*&19pie{qK#IS%qa>rkz4R@Na2v*zzK2d}M!Jpc@ob1M(9yN{B z$}s2$?cr7?BI3&Q9SzxSgK{935=~V9V{p}96K)qr-3Wv_Sz6liNnhDTU$5-w{AKL8 znPW2Qj-}Z09dR5`OyQ0bb7ue-j2%}q&D3BsvmCdR`D|7+0?g@Rqu}Ix=O5Q!i+*4j zu^};1bQWIBf`fCo=M4yn*dl!VD51S!H!5_7u7xH^eN}^R&Ck6_j|Ezo!S-vB2uWks zDUSt$Qz;U#FDr#z8(uLW(LgoEhKmwEI@x=33evzW#A-%k7g1ZWxzsSLjp6MjQZW;> zjQr|v%9OLjaVrx2)p?;xhWU|xJhYUl$}4h##jJ+FoNzx=%Oo$)B6aQkW0&=EEW)ab zLNrHG%FV-drb^Qc{YzFJOx;gCJ=n`+gA&n{2wZSo@3|7scE#Ha^Z+$@OVVp4(BLp| z$B|+ZU9{w6X(ersljfJ=@@igMq~6OjXNB%pa*d2iU4OK8HR1%X)?L@lf3Lc@Yp1tk z>Ggdmx~U9e|IG@aC;g(@<#WLoac2AQkll{2E##GmHMnPNMq3A0R^RNy1*_#+u-tmS z60#xTgc!(BY;@vT#nNV@K`l@^lGBoMysLH$5VWK^o1k<+z8_$gB>5DF5N#qxfT--P z;;2mFY+~wh=KXN=qT^?O^DB~oEa;0rDiC1XBnnI0D7y%E&KrDSF||)(_O8x`f4dhK zj5IKwwnJnQa6;}wc%=EZ$5;nnK)U*v%Q@d_>?^W@tQ7Lsv64ttexg-~6t4|7Y_jHT zbUjx02y6`93+$oRf8y<(-#Lm|1j}!Zrd!{Dvum*ha|9ZC&lnXG7n5AP%@E|;^8Ku) zqTf=;V!hB}8`3WSVb*<49Ns7Pa2je7-=mdUaf(2r6b%ZuCkUi#3GS*YC;@LOLciYa zDP;4j=h0Pp#tW-%pY*Wl!ifvb`#ScqzYtH;ZkAtG>@sTMpYg#R zX+j6{Cb#{+xH^#EV0R~4*V_yc1ix(0ozF@lJoH}MNqe>u?ZdOq-0DH(nClNO|&d2=H`?-MxM z3>}(a70L$H1E*JddT?`g`LSj3-^skyNZ$l4~M7dj!UcTrL&$*Yx3GUUM6@&_$Mey_aXpzGr)VD8SZrnhKV%M|5a1#hkfp&byss0rx#$)S?*yyZ_O zFYOpQ^cW+-KV$umitYRivzk4&cOV3)rQkw~swIg?cbNb}5q_)?`?#wc&GGwjM(8D% z!4AvwDuy_U@ei`;Jf}~Wf`68%e0C_d-lKyW#zV2Oci+qWkn$om*ji9_LeH=yZv94O zk>#8bE}!qx9{51=41)is#KUwV&i7f+r3y68_^a-*U10}&Vh%S7^*ZLyumF8N{BIUF zty||qcKKx~&{50Briy*0N4S@epbxixdNOlTBfN=I1+lIQuTFn0J+6Y2?XT`#1fYhz*YG1w$To=lKGDx%cR*?LXl}A-09unpSI56q+z*?4{@lkP< zU;oMx+^nt(M~4Zgi5{{0?R9=eAn`ziE1Ox zWnNx$5oZt#M$a+55)xiK)*Rqgb3)9q0u}U9N+I=Id=EN%(Nma_@7UWk*fNOVe@zBC z$^s1*BEJ@7&GEmQCC1q~V0R4&ZC9FQ!_wW7NERf>S&2hm z(*!KTANvxOXPPFSJeYYcT~4<<>{=yZXT6T-R+W(%%l7=EH7?Z>EqN5}ed9DaU1ciA ztMymSLK)G>o>|Y017^5A`rWjaCU_qHFgAJZmO)sS_Jdd?Y6{)s=F{}0H9#Ku$Z>d# z+)%qW)%3TW5-$mID4Fks3)&}r*J=+GLzm$OG`$4_^JMWtVo#GoL`VKN`Y@bo7K5e~J17%DG< zyPre4#W zx0Yn8+fQXIL_TSS`}Z8{*iQpYKt-g-Br|BMOOJO{$|{)Y^+vxqcaLHNqi{>~JQ96RI(GT}=JCVgoYML54+c>A7sZ(;|jFH{uOm~FO&g%HP zOSD=C7H6uSX2f=9h9GGW$%Rw%fhr0wE+5iPdUw*@#t8mZbjj3%F||YJJMyCt5}72z zPf2>gmabI^E&rglty{ji+iJ=k%0H+p~&e^|^1nHwWLex;D{h|XKYtpZ=zh;NE6k`4pxNfw6Q~uO5t=l*Iop%q))xCM0nS_ty7AG=3+m;us>DT$CC)0ydmalQsrFgR28nbP9MM%pgff^n~JO9lDZ zX1r?61}7VJneD^X!wr7yt^V?{2f8D@n)>)t-TN)uKKLu}RRuo*juNghDNzm!JMs)vR$LfMRqw)GI8{VYV8@#22+Y16BFjQm#XTe+s0{z`>|3>D|>jR zL?ea_Jlz5gr`rs~02P5>+cA9Ov#D*L!r!L^#?;tfiK6M=f~~)>!@l(z=G_(gi4w!A z5tn{DIFZ7z=g@>|cT}H<`iL#;e_ZU)>U;)rXV(#Xp`Rewrh27#A4RhCPfDgyB1EUM z@O7N)>VLDsO5NLp4DNHS>T)^#rHn5@bN)fbtj-@inMMxOjVti`hq gdtzcD@ksF5cbVOS_U*<~zaAj>SxKr&0uu8700L0;!vFvP diff --git a/ui/public/images/slack_full.png b/ui/public/images/slack_full.png new file mode 100644 index 0000000000000000000000000000000000000000..32fe41912abaf6f81b1fef4d155fbf5609208325 GIT binary patch literal 6109 zcmXYVbyO7I^Y(%)2uLcOiu584QlfyQbS=GfcXxMp3Mdl70*iDnu#|L5!_u{MDe?0C zecnIj#+*6ldFHwIoO@@YRg`4#a42yA005qxtdtr6fChS6e|e7iG|oN5_# z008dG{}~z}CyyKeAWM{!`lR8Nck~t8@0DhL|LW*E@@Q8dwYeKLjK#l)a@*Pc2q3VS zKFy;@#Arq*PBf@C5NE?lz#yRxDP!QRNZ_TVvvl(?4=1P33afbeTrLRm29|=Woe-8q zC@Y&2)_A-+L(@`m;8Sg=DQJKHpxzj)tspn=vn0TL|V$c`$VqZI|OBuNKoEc&Tg=Hkh$VNr#dV?zy32fte4xc zJzu)~2)=kGMK+qksE`r$Jj%rrDXM%=&lJ?Hvr^c4|2X>g%5J|)Xc2R@wMFxpxole; z9`3Po8rgc?%(=7fo}V4vsi>WiqP|+#@>c9Bh2D9~>Cr!rJzvCP^c| zDLlhOuYk}|BKcYw^?;(MB}xB+M*_DwfUAu$p(#zGNWai;&j*|-D5s*=$%%epVM30?M|NEa5|5M+(3S7T7KXpnIKnc5%{-v6{A zVTzmJAGu*ue);Zn8b3*}(lIKL`2FCY#qj>XXmtrmFg%9hKNcxd?pR!B)bJvgoYPyx zPOuna4)fu~T_atq)Md>RT-<+(_&=N&21s(%{8;0~cO!r9AcxIe zDvyGX*Y=YSpj=GQ^^2X2t>K~h>;KR_2uXB#BqGwTXdX3}lZB;yOfoG*}!=TRDG`8iS zUqOZ6r(ECj@8T^U98@Eh$TK1kerM*5KavS)`07@LhQWdH9Ya9$%^X5}+q> z+LU*CcTD^UezUC<0iMrWct#nD0r+ZSH;0C7My$?cM`puqjNfaQRg2QZz}b8c;ENQw zA+FzF1f|8@rz#xy8*E&!x&F|##()8|Mt=ht8In?jNCjGC8MqkM7IIrmSvUMAD@&6) zs5o};k?1j^!Ae8RqQGa<$QBHBqO}hu#v>V9L{a&aIepUfE8mzEJr?JlQ`qGD;SJ0}$d7 zPs)yEw<%%$y%uB{;^ZrVXKFyEwrRl!3n;1;i+%IPLeq2A+(S@hbc6EL&^e$jcx$C& zv$;;~)}b)v>k658QIig|g)|$tT!oo!vzItgnyYNVT7t^>&XT>y83fO9l<-B+N-XrV z#AD2A3NO~E@z`WE?)Xb$&d#@ldrhvK?Bt2S_6;4rF9%y9ws4(L6a19NWT|V7o$~}< z2c6=)%sN>N|@6ISDlmFq*4`geKc z%GtH3t+ZWi_ITgB4w)1?q#F7se7_D_txTa(_)=+dYh&499aPVPc?PWTXW_yDj|jV< zq~(poYum+EhRMBqLIm5LM~Jq}g~7(aTotoJ7Sd-)6anMu!)1CF(}?EP@4I4uO8GYX ziM3-5V@+DEp(NlbQ~J5_e?HL?Rnh?4$^^g2^Vo~Q@}WncYD9xG%?^3}cW}^n3z4C%0PxbsC z6`z5I&&Ve?!E1pC*SOE!hc?boG0H?s@8{`m?51CY=$KJRYTjuZc+SYFK66x})2NY+ zeiRvON!Zm_>EV~Wl|Dm2gd1MGPDGLQVL6qrjlcaIO^NkAeFy{UeG=}J8*KFaG43Y#qvFW~z1x!Pft_atma5!iKRK@zc|kCCi7BH=kKenj zXBt1xsrle%dvT9sNKkYWAV(ysf?}iNdlD}Jq~-!&;yzg-V6x>Wun7A1B|A@#JZY|w z$CgO)Kaz)U`T={nnGC+YAuf-1tTU|G=T1JKC!&W96y9P{SqE>pA?bV@s1Y?g>)TJe%(=jru8kM>G<{0H6VliXolPqNX$u| z@QMJkr?cAf*|e1bI8Hei7Atni{Mk`F@96l!J@I{o1H{wgWe1p}bC8Dajpa;ib^Ndz zf&T`Z*s{H=2Ix+-eVb%{z9R*WqjN$nVAXj=k!Pr*@pY={5VVb3p-4rz93L>U=Jx~f z5`4_v7us0%PDB0&AEyWOGZ}R@bu4G+&h6ZZxX41L! zlxp6cS@>S%=LfvZFUNw$Fyp_1!5O8aJ5;M-?96-{RK}4sl;#4UcE0w%#f>xvWFGSh z1(%^isydqr)rJ~-RR%sA9Bf9XcW|8UpHdj*YTyuFIwW3_!){C5R|Tf;Ch9h(HBjCP zMkQW`<{&Th*!q8N*&19pie{qK#IS%qa>rkz4R@Na2v*zzK2d}M!Jpc@ob1M(9yN{B z$}s2$?cr7?BI3&Q9SzxSgK{935=~V9V{p}96K)qr-3Wv_Sz6liNnhDTU$5-w{AKL8 znPW2Qj-}Z09dR5`OyQ0bb7ue-j2%}q&D3BsvmCdR`D|7+0?g@Rqu}Ix=O5Q!i+*4j zu^};1bQWIBf`fCo=M4yn*dl!VD51S!H!5_7u7xH^eN}^R&Ck6_j|Ezo!S-vB2uWks zDUSt$Qz;U#FDr#z8(uLW(LgoEhKmwEI@x=33evzW#A-%k7g1ZWxzsSLjp6MjQZW;> zjQr|v%9OLjaVrx2)p?;xhWU|xJhYUl$}4h##jJ+FoNzx=%Oo$)B6aQkW0&=EEW)ab zLNrHG%FV-drb^Qc{YzFJOx;gCJ=n`+gA&n{2wZSo@3|7scE#Ha^Z+$@OVVp4(BLp| z$B|+ZU9{w6X(ersljfJ=@@igMq~6OjXNB%pa*d2iU4OK8HR1%X)?L@lf3Lc@Yp1tk z>Ggdmx~U9e|IG@aC;g(@<#WLoac2AQkll{2E##GmHMnPNMq3A0R^RNy1*_#+u-tmS z60#xTgc!(BY;@vT#nNV@K`l@^lGBoMysLH$5VWK^o1k<+z8_$gB>5DF5N#qxfT--P z;;2mFY+~wh=KXN=qT^?O^DB~oEa;0rDiC1XBnnI0D7y%E&KrDSF||)(_O8x`f4dhK zj5IKwwnJnQa6;}wc%=EZ$5;nnK)U*v%Q@d_>?^W@tQ7Lsv64ttexg-~6t4|7Y_jHT zbUjx02y6`93+$oRf8y<(-#Lm|1j}!Zrd!{Dvum*ha|9ZC&lnXG7n5AP%@E|;^8Ku) zqTf=;V!hB}8`3WSVb*<49Ns7Pa2je7-=mdUaf(2r6b%ZuCkUi#3GS*YC;@LOLciYa zDP;4j=h0Pp#tW-%pY*Wl!ifvb`#ScqzYtH;ZkAtG>@sTMpYg#R zX+j6{Cb#{+xH^#EV0R~4*V_yc1ix(0ozF@lJoH}MNqe>u?ZdOq-0DH(nClNO|&d2=H`?-MxM z3>}(a70L$H1E*JddT?`g`LSj3-^skyNZ$l4~M7dj!UcTrL&$*Yx3GUUM6@&_$Mey_aXpzGr)VD8SZrnhKV%M|5a1#hkfp&byss0rx#$)S?*yyZ_O zFYOpQ^cW+-KV$umitYRivzk4&cOV3)rQkw~swIg?cbNb}5q_)?`?#wc&GGwjM(8D% z!4AvwDuy_U@ei`;Jf}~Wf`68%e0C_d-lKyW#zV2Oci+qWkn$om*ji9_LeH=yZv94O zk>#8bE}!qx9{51=41)is#KUwV&i7f+r3y68_^a-*U10}&Vh%S7^*ZLyumF8N{BIUF zty||qcKKx~&{50Briy*0N4S@epbxixdNOlTBfN=I1+lIQuTFn0J+6Y2?XT`#1fYhz*YG1w$To=lKGDx%cR*?LXl}A-09unpSI56q+z*?4{@lkP< zU;oMx+^nt(M~4Zgi5{{0?R9=eAn`ziE1Ox zWnNx$5oZt#M$a+55)xiK)*Rqgb3)9q0u}U9N+I=Id=EN%(Nma_@7UWk*fNOVe@zBC z$^s1*BEJ@7&GEmQCC1q~V0R4&ZC9FQ!_wW7NERf>S&2hm z(*!KTANvxOXPPFSJeYYcT~4<<>{=yZXT6T-R+W(%%l7=EH7?Z>EqN5}ed9DaU1ciA ztMymSLK)G>o>|Y017^5A`rWjaCU_qHFgAJZmO)sS_Jdd?Y6{)s=F{}0H9#Ku$Z>d# z+)%qW)%3TW5-$mID4Fks3)&}r*J=+GLzm$OG`$4_^JMWtVo#GoL`VKN`Y@bo7K5e~J17%DG< zyPre4#W zx0Yn8+fQXIL_TSS`}Z8{*iQpYKt-g-Br|BMOOJO{$|{)Y^+vxqcaLHNqi{>~JQ96RI(GT}=JCVgoYML54+c>A7sZ(;|jFH{uOm~FO&g%HP zOSD=C7H6uSX2f=9h9GGW$%Rw%fhr0wE+5iPdUw*@#t8mZbjj3%F||YJJMyCt5}72z zPf2>gmabI^E&rglty{ji+iJ=k%0H+p~&e^|^1nHwWLex;D{h|XKYtpZ=zh;NE6k`4pxNfw6Q~uO5t=l*Iop%q))xCM0nS_ty7AG=3+m;us>DT$CC)0ydmalQsrFgR28nbP9MM%pgff^n~JO9lDZ zX1r?61}7VJneD^X!wr7yt^V?{2f8D@n)>)t-TN)uKKLu}RRuo*juNghDNzm!JMs)vR$LfMRqw)GI8{VYV8@#22+Y16BFjQm#XTe+s0{z`>|3>D|>jR zL?ea_Jlz5gr`rs~02P5>+cA9Ov#D*L!r!L^#?;tfiK6M=f~~)>!@l(z=G_(gi4w!A z5tn{DIFZ7z=g@>|cT}H<`iL#;e_ZU)>U;)rXV(#Xp`Rewrh27#A4RhCPfDgyB1EUM z@O7N)>VLDsO5NLp4DNHS>T)^#rHn5@bN)fbtj-@inMMxOjVti`hq gdtzcD@ksF5cbVOS_U*<~zaAj>SxKr&0uu8700L0;!vFvP literal 0 HcmV?d00001 From ad65c28496553337182e65de14ab1a0de3b11292 Mon Sep 17 00:00:00 2001 From: Kaushik Iska Date: Tue, 12 Mar 2024 15:54:23 -0400 Subject: [PATCH 2/7] For replica identity full don't create pkey on snowflake (#1478) We create primary key indexes on the destination table in Snowflake CDC. For Replica Identity Full on a table, we assume all columns to be primary keys and replay that to the destination. But if one of those columns on the source have null values, our normalize records step fails saying `NULL result in a non-nullable column` This PR fixes this by not creating Primary Keys on the target for replica identity full Co-authored-by: Amogh Bharadwaj --- flow/connectors/postgres/client.go | 2 +- flow/connectors/snowflake/snowflake.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/connectors/postgres/client.go b/flow/connectors/postgres/client.go index 22ac244aa9..898f962aec 100644 --- a/flow/connectors/postgres/client.go +++ b/flow/connectors/postgres/client.go @@ -458,7 +458,7 @@ func generateCreateTableSQLForNormalizedTable( } // add composite primary key to the table - if len(sourceTableSchema.PrimaryKeyColumns) > 0 { + if len(sourceTableSchema.PrimaryKeyColumns) > 0 && !sourceTableSchema.IsReplicaIdentityFull { primaryKeyColsQuoted := make([]string, 0, len(sourceTableSchema.PrimaryKeyColumns)) for _, primaryKeyCol := range sourceTableSchema.PrimaryKeyColumns { primaryKeyColsQuoted = append(primaryKeyColsQuoted, QuoteIdentifier(primaryKeyCol)) diff --git a/flow/connectors/snowflake/snowflake.go b/flow/connectors/snowflake/snowflake.go index 775c130665..4ba626a053 100644 --- a/flow/connectors/snowflake/snowflake.go +++ b/flow/connectors/snowflake/snowflake.go @@ -727,7 +727,7 @@ func generateCreateTableSQLForNormalizedTable( } // add composite primary key to the table - if len(sourceTableSchema.PrimaryKeyColumns) > 0 { + if len(sourceTableSchema.PrimaryKeyColumns) > 0 && !sourceTableSchema.IsReplicaIdentityFull { normalizedPrimaryKeyCols := make([]string, 0, len(sourceTableSchema.PrimaryKeyColumns)) for _, primaryKeyCol := range sourceTableSchema.PrimaryKeyColumns { normalizedPrimaryKeyCols = append(normalizedPrimaryKeyCols, From d9995a7347502997dba6f87ffe3913926b4847c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 13 Mar 2024 21:14:10 +0000 Subject: [PATCH 3/7] PullRecords: update replState.Offset in defer (#1481) When an error happens we must update replState.Offset still, as with activity retries it's possible to end up in a scenario: replState.Offset is 1, last offset in catalog is 1, repl connection is at 3 PullRecords will go ahead reading from 4, skipping LSN 2 & 3 --- flow/activities/flowable.go | 1 + flow/connectors/core.go | 3 +++ flow/connectors/postgres/postgres.go | 18 ++++++++++++++---- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/flow/activities/flowable.go b/flow/activities/flowable.go index 7ad57fd12d..55710ae642 100644 --- a/flow/activities/flowable.go +++ b/flow/activities/flowable.go @@ -410,6 +410,7 @@ func (a *FlowableActivity) SyncFlow( logger.Info(fmt.Sprintf("pushed %d records in %d seconds", numRecords, int(syncDuration.Seconds()))) lastCheckpoint := recordBatch.GetLastCheckpoint() + srcConn.UpdateReplStateLastOffset(lastCheckpoint) err = monitoring.UpdateNumRowsAndEndLSNForCDCBatch( ctx, diff --git a/flow/connectors/core.go b/flow/connectors/core.go index b374ff70e4..a8e39b67b1 100644 --- a/flow/connectors/core.go +++ b/flow/connectors/core.go @@ -54,6 +54,9 @@ type CDCPullConnector interface { // This method should be idempotent, and should be able to be called multiple times with the same request. PullRecords(ctx context.Context, catalogPool *pgxpool.Pool, req *model.PullRecordsRequest) error + // Called when offset has been confirmed to destination + UpdateReplStateLastOffset(lastOffset int64) + // PullFlowCleanup drops both the Postgres publication and replication slot, as a part of DROP MIRROR PullFlowCleanup(ctx context.Context, jobName string) error diff --git a/flow/connectors/postgres/postgres.go b/flow/connectors/postgres/postgres.go index f82b867f2b..1bba4e8eca 100644 --- a/flow/connectors/postgres/postgres.go +++ b/flow/connectors/postgres/postgres.go @@ -8,6 +8,7 @@ import ( "regexp" "strings" "sync" + "sync/atomic" "time" "github.com/google/uuid" @@ -49,6 +50,7 @@ type ReplState struct { Slot string Publication string Offset int64 + LastOffset atomic.Int64 } func NewPostgresConnector(ctx context.Context, pgConfig *protos.PostgresConfig) (*PostgresConnector, error) { @@ -133,7 +135,7 @@ func (c *PostgresConnector) ReplPing(ctx context.Context) error { return pglogrepl.SendStandbyStatusUpdate( ctx, c.replConn.PgConn(), - pglogrepl.StandbyStatusUpdate{WALWritePosition: pglogrepl.LSN(c.replState.Offset)}, + pglogrepl.StandbyStatusUpdate{WALWritePosition: pglogrepl.LSN(c.replState.LastOffset.Load())}, ) } } @@ -184,7 +186,9 @@ func (c *PostgresConnector) MaybeStartReplication( Slot: slotName, Publication: publicationName, Offset: req.LastOffset, + LastOffset: atomic.Int64{}, } + c.replState.LastOffset.Store(req.LastOffset) } return nil } @@ -308,6 +312,9 @@ func (c *PostgresConnector) SetLastOffset(ctx context.Context, jobName string, l func (c *PostgresConnector) PullRecords(ctx context.Context, catalogPool *pgxpool.Pool, req *model.PullRecordsRequest) error { defer func() { req.RecordStream.Close() + if c.replState != nil { + c.replState.Offset = req.RecordStream.GetLastCheckpoint() + } }() // Slotname would be the job name prefixed with "peerflow_slot_" @@ -371,9 +378,6 @@ func (c *PostgresConnector) PullRecords(ctx context.Context, catalogPool *pgxpoo return err } - req.RecordStream.Close() - c.replState.Offset = req.RecordStream.GetLastCheckpoint() - latestLSN, err := c.getCurrentLSN(ctx) if err != nil { c.logger.Error("error getting current LSN", slog.Any("error", err)) @@ -389,6 +393,12 @@ func (c *PostgresConnector) PullRecords(ctx context.Context, catalogPool *pgxpoo return nil } +func (c *PostgresConnector) UpdateReplStateLastOffset(lastOffset int64) { + if c.replState != nil { + c.replState.LastOffset.Store(lastOffset) + } +} + // SyncRecords pushes records to the destination. func (c *PostgresConnector) SyncRecords(ctx context.Context, req *model.SyncRecordsRequest) (*model.SyncResponse, error) { rawTableIdentifier := getRawTableIdentifier(req.FlowJobName) From 864978f462c137ff7cbc829b37c13b13e1e3c71f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 13 Mar 2024 21:49:56 +0000 Subject: [PATCH 4/7] SyncFlow: fix NonRetryableError handling from PullRecords (#1482) Wrapping an application error prevents preventing retries --- flow/activities/flowable.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/flow/activities/flowable.go b/flow/activities/flowable.go index 55710ae642..17187a35c6 100644 --- a/flow/activities/flowable.go +++ b/flow/activities/flowable.go @@ -346,7 +346,11 @@ func (a *FlowableActivity) SyncFlow( err = errGroup.Wait() if err != nil { a.Alerter.LogFlowError(ctx, flowName, err) - return nil, fmt.Errorf("failed in pull records when: %w", err) + if temporal.IsApplicationError(err) { + return nil, err + } else { + return nil, fmt.Errorf("failed in pull records when: %w", err) + } } logger.Info("no records to push") @@ -401,7 +405,11 @@ func (a *FlowableActivity) SyncFlow( err = errGroup.Wait() if err != nil { a.Alerter.LogFlowError(ctx, flowName, err) - return nil, fmt.Errorf("failed to pull records: %w", err) + if temporal.IsApplicationError(err) { + return nil, err + } else { + return nil, fmt.Errorf("failed to pull records: %w", err) + } } numRecords := res.NumRecordsSynced From 0759db5237efc6255ca32da65e749cc81e9027f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 13 Mar 2024 22:03:15 +0000 Subject: [PATCH 5/7] Log LastOffset when logging start of SyncFlow (#1483) --- flow/activities/flowable.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/activities/flowable.go b/flow/activities/flowable.go index 17187a35c6..afd7b3704e 100644 --- a/flow/activities/flowable.go +++ b/flow/activities/flowable.go @@ -287,7 +287,6 @@ func (a *FlowableActivity) SyncFlow( } defer connectors.CloseConnector(ctx, dstConn) - logger.Info("pulling records...") tblNameMapping := make(map[string]model.NameAndExclude, len(options.TableMappings)) for _, v := range options.TableMappings { tblNameMapping[v.SourceTableIdentifier] = model.NewNameAndExclude(v.DestinationTableIdentifier, v.Exclude) @@ -315,6 +314,7 @@ func (a *FlowableActivity) SyncFlow( if err != nil { return nil, err } + logger.Info("pulling records...", slog.Int64("LastOffset", lastOffset)) // start a goroutine to pull records from the source recordBatch := model.NewCDCRecordStream() From 22d652839cb98303b27d4ffff6af9bb6c0e4f812 Mon Sep 17 00:00:00 2001 From: Kunal Gupta <39487888+iamKunalGupta@users.noreply.github.com> Date: Thu, 14 Mar 2024 08:53:48 +0530 Subject: [PATCH 6/7] fix(alerting): update usage (#1484) --- ui/app/alert-config/page.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ui/app/alert-config/page.tsx b/ui/app/alert-config/page.tsx index 25a92338b9..f67bb21b0e 100644 --- a/ui/app/alert-config/page.tsx +++ b/ui/app/alert-config/page.tsx @@ -83,8 +83,8 @@ const AlertConfigPage: React.FC = () => {

From 578b1ed6f01260a3a92e8b65ed32665c9e9d4098 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Thu, 14 Mar 2024 04:27:14 +0000 Subject: [PATCH 7/7] Move PG/SF simple schema changes to generic test (#1477) BQ would need to implement GetTableSchema interface --- flow/connectors/core.go | 10 +- .../snowflake/get_schema_for_tests.go | 2 - flow/e2e/bigquery/bigquery.go | 6 + flow/e2e/bigquery/peer_flow_bq_test.go | 3 +- flow/e2e/generic/generic_test.go | 304 ++++++++++++++++++ flow/e2e/generic/peer_flow_test.go | 82 ----- flow/e2e/postgres/peer_flow_pg_test.go | 209 ------------ flow/e2e/postgres/postgres.go | 5 + flow/e2e/snowflake/peer_flow_sf_test.go | 214 ------------ flow/e2e/snowflake/snowflake.go | 5 + flow/e2e/test_utils.go | 23 +- 11 files changed, 352 insertions(+), 511 deletions(-) create mode 100644 flow/e2e/generic/generic_test.go delete mode 100644 flow/e2e/generic/peer_flow_test.go diff --git a/flow/connectors/core.go b/flow/connectors/core.go index a8e39b67b1..6adb3a2429 100644 --- a/flow/connectors/core.go +++ b/flow/connectors/core.go @@ -27,11 +27,16 @@ type Connector interface { ConnectionActive(context.Context) error } -type CDCPullConnector interface { +type GetTableSchemaConnector interface { Connector // GetTableSchema returns the schema of a table. GetTableSchema(ctx context.Context, req *protos.GetTableSchemaBatchInput) (*protos.GetTableSchemaBatchOutput, error) +} + +type CDCPullConnector interface { + Connector + GetTableSchemaConnector // EnsurePullability ensures that the connector is pullable. EnsurePullability(ctx context.Context, req *protos.EnsurePullabilityBatchInput) ( @@ -255,6 +260,9 @@ var ( _ CDCNormalizeConnector = &connsnowflake.SnowflakeConnector{} _ CDCNormalizeConnector = &connclickhouse.ClickhouseConnector{} + _ GetTableSchemaConnector = &connpostgres.PostgresConnector{} + _ GetTableSchemaConnector = &connsnowflake.SnowflakeConnector{} + _ NormalizedTablesConnector = &connpostgres.PostgresConnector{} _ NormalizedTablesConnector = &connbigquery.BigQueryConnector{} _ NormalizedTablesConnector = &connsnowflake.SnowflakeConnector{} diff --git a/flow/connectors/snowflake/get_schema_for_tests.go b/flow/connectors/snowflake/get_schema_for_tests.go index 05631e635f..476f16f165 100644 --- a/flow/connectors/snowflake/get_schema_for_tests.go +++ b/flow/connectors/snowflake/get_schema_for_tests.go @@ -3,7 +3,6 @@ package connsnowflake import ( "context" - "github.com/PeerDB-io/peer-flow/connectors/utils" "github.com/PeerDB-io/peer-flow/generated/protos" "github.com/PeerDB-io/peer-flow/model/qvalue" ) @@ -47,7 +46,6 @@ func (c *SnowflakeConnector) GetTableSchema( return nil, err } res[tableName] = tableSchema - utils.RecordHeartbeat(ctx, "fetched schema for table "+tableName) } return &protos.GetTableSchemaBatchOutput{ diff --git a/flow/e2e/bigquery/bigquery.go b/flow/e2e/bigquery/bigquery.go index 73f5c38d6e..1e2a3842ee 100644 --- a/flow/e2e/bigquery/bigquery.go +++ b/flow/e2e/bigquery/bigquery.go @@ -8,6 +8,7 @@ import ( "github.com/jackc/pgx/v5" + "github.com/PeerDB-io/peer-flow/connectors" connpostgres "github.com/PeerDB-io/peer-flow/connectors/postgres" "github.com/PeerDB-io/peer-flow/e2e" "github.com/PeerDB-io/peer-flow/generated/protos" @@ -35,6 +36,11 @@ func (s PeerFlowE2ETestSuiteBQ) Connector() *connpostgres.PostgresConnector { return s.conn } +func (s PeerFlowE2ETestSuiteBQ) DestinationConnector() connectors.Connector { + // TODO have BQ connector + return nil +} + func (s PeerFlowE2ETestSuiteBQ) Suffix() string { return s.bqSuffix } diff --git a/flow/e2e/bigquery/peer_flow_bq_test.go b/flow/e2e/bigquery/peer_flow_bq_test.go index ec28b5f97b..3dff6e310e 100644 --- a/flow/e2e/bigquery/peer_flow_bq_test.go +++ b/flow/e2e/bigquery/peer_flow_bq_test.go @@ -643,7 +643,8 @@ func (s PeerFlowE2ETestSuiteBQ) Test_Multi_Table_BQ() { e2e.RequireEnvCanceled(s.t, env) } -// TODO: not checking schema exactly, add later +// TODO: not checking schema exactly +// write a GetTableSchemaConnector for BQ to enable generic_test func (s PeerFlowE2ETestSuiteBQ) Test_Simple_Schema_Changes_BQ() { tc := e2e.NewTemporalClient(s.t) diff --git a/flow/e2e/generic/generic_test.go b/flow/e2e/generic/generic_test.go new file mode 100644 index 0000000000..97e64f5ed3 --- /dev/null +++ b/flow/e2e/generic/generic_test.go @@ -0,0 +1,304 @@ +package e2e_generic + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/PeerDB-io/peer-flow/connectors" + "github.com/PeerDB-io/peer-flow/e2e" + "github.com/PeerDB-io/peer-flow/e2e/bigquery" + "github.com/PeerDB-io/peer-flow/e2e/postgres" + "github.com/PeerDB-io/peer-flow/e2e/snowflake" + "github.com/PeerDB-io/peer-flow/e2eshared" + "github.com/PeerDB-io/peer-flow/generated/protos" + "github.com/PeerDB-io/peer-flow/model/qvalue" + peerflow "github.com/PeerDB-io/peer-flow/workflows" +) + +func TestGenericPG(t *testing.T) { + e2eshared.RunSuite(t, SetupGenericSuite(e2e_postgres.SetupSuite)) +} + +func TestGenericSF(t *testing.T) { + e2eshared.RunSuite(t, SetupGenericSuite(e2e_snowflake.SetupSuite)) +} + +func TestGenericBQ(t *testing.T) { + e2eshared.RunSuite(t, SetupGenericSuite(e2e_bigquery.SetupSuite)) +} + +type Generic struct { + e2e.GenericSuite +} + +func SetupGenericSuite[T e2e.GenericSuite](f func(t *testing.T) T) func(t *testing.T) Generic { + return func(t *testing.T) Generic { + t.Helper() + return Generic{f(t)} + } +} + +func (s Generic) Test_Simple_Flow() { + t := s.T() + srcTable := "test_simple" + dstTable := "test_simple_dst" + srcSchemaTable := e2e.AttachSchema(s, srcTable) + + _, err := s.Connector().Conn().Exec(context.Background(), fmt.Sprintf(` + CREATE TABLE IF NOT EXISTS %s ( + id SERIAL PRIMARY KEY, + key TEXT NOT NULL, + value TEXT NOT NULL, + myh HSTORE NOT NULL + ); + `, srcSchemaTable)) + require.NoError(t, err) + + connectionGen := e2e.FlowConnectionGenerationConfig{ + FlowJobName: e2e.AddSuffix(s, "test_simple"), + TableMappings: e2e.TableMappings(s, srcTable, dstTable), + Destination: s.Peer(), + } + flowConnConfig := connectionGen.GenerateFlowConnectionConfigs() + + tc := e2e.NewTemporalClient(t) + env := e2e.ExecutePeerflow(tc, peerflow.CDCFlowWorkflow, flowConnConfig, nil) + + e2e.SetupCDCFlowStatusQuery(t, env, connectionGen) + // insert 10 rows into the source table + for i := range 10 { + testKey := fmt.Sprintf("test_key_%d", i) + testValue := fmt.Sprintf("test_value_%d", i) + _, err = s.Connector().Conn().Exec(context.Background(), fmt.Sprintf(` + INSERT INTO %s(key, value, myh) VALUES ($1, $2, '"a"=>"b"') + `, srcSchemaTable), testKey, testValue) + e2e.EnvNoError(t, env, err) + } + t.Log("Inserted 10 rows into the source table") + + e2e.EnvWaitForEqualTablesWithNames(env, s, "normalizing 10 rows", srcTable, dstTable, `id,key,value,myh`) + env.Cancel() + e2e.RequireEnvCanceled(t, env) +} + +func (s Generic) Test_Simple_Schema_Changes() { + t := s.T() + + destinationSchemaConnector, ok := s.DestinationConnector().(connectors.GetTableSchemaConnector) + if !ok { + t.SkipNow() + } + + srcTable := "test_simple_schema_changes" + dstTable := "test_simple_schema_changes_dst" + srcTableName := e2e.AttachSchema(s, srcTable) + dstTableName := s.DestinationTable(dstTable) + + _, err := s.Connector().Conn().Exec(context.Background(), fmt.Sprintf(` + CREATE TABLE IF NOT EXISTS %s ( + id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY, + c1 BIGINT + ); + `, srcTableName)) + require.NoError(t, err) + + connectionGen := e2e.FlowConnectionGenerationConfig{ + FlowJobName: e2e.AddSuffix(s, srcTable), + TableMappings: e2e.TableMappings(s, srcTable, dstTable), + Destination: s.Peer(), + } + + flowConnConfig := connectionGen.GenerateFlowConnectionConfigs() + + // wait for PeerFlowStatusQuery to finish setup + // and then insert and mutate schema repeatedly. + tc := e2e.NewTemporalClient(t) + env := e2e.ExecutePeerflow(tc, peerflow.CDCFlowWorkflow, flowConnConfig, nil) + e2e.SetupCDCFlowStatusQuery(t, env, connectionGen) + _, err = s.Connector().Conn().Exec(context.Background(), fmt.Sprintf(` + INSERT INTO %s(c1) VALUES ($1)`, srcTableName), 1) + e2e.EnvNoError(t, env, err) + t.Log("Inserted initial row in the source table") + + e2e.EnvWaitForEqualTablesWithNames(env, s, "normalize reinsert", srcTable, dstTable, "id,c1") + + expectedTableSchema := &protos.TableSchema{ + TableIdentifier: e2e.ExpectedDestinationTableName(s, dstTable), + Columns: []*protos.FieldDescription{ + { + Name: e2e.ExpectedDestinationIdentifier(s, "id"), + Type: string(qvalue.QValueKindNumeric), + TypeModifier: -1, + }, + { + Name: e2e.ExpectedDestinationIdentifier(s, "c1"), + Type: string(qvalue.QValueKindNumeric), + TypeModifier: -1, + }, + { + Name: "_PEERDB_IS_DELETED", + Type: string(qvalue.QValueKindBoolean), + TypeModifier: -1, + }, + { + Name: "_PEERDB_SYNCED_AT", + Type: string(qvalue.QValueKindTimestamp), + TypeModifier: -1, + }, + }, + } + output, err := destinationSchemaConnector.GetTableSchema(context.Background(), &protos.GetTableSchemaBatchInput{ + TableIdentifiers: []string{dstTableName}, + }) + e2e.EnvNoError(t, env, err) + e2e.EnvTrue(t, env, e2e.CompareTableSchemas(expectedTableSchema, output.TableNameSchemaMapping[dstTableName])) + + // alter source table, add column c2 and insert another row. + _, err = s.Connector().Conn().Exec(context.Background(), fmt.Sprintf(` + ALTER TABLE %s ADD COLUMN c2 BIGINT`, srcTableName)) + e2e.EnvNoError(t, env, err) + t.Log("Altered source table, added column c2") + _, err = s.Connector().Conn().Exec(context.Background(), fmt.Sprintf(` + INSERT INTO %s(c1,c2) VALUES ($1,$2)`, srcTableName), 2, 2) + e2e.EnvNoError(t, env, err) + t.Log("Inserted row with added c2 in the source table") + + // verify we got our two rows, if schema did not match up it will error. + e2e.EnvWaitForEqualTablesWithNames(env, s, "normalize altered row", srcTable, dstTable, "id,c1,c2") + expectedTableSchema = &protos.TableSchema{ + TableIdentifier: e2e.ExpectedDestinationTableName(s, dstTable), + Columns: []*protos.FieldDescription{ + { + Name: e2e.ExpectedDestinationIdentifier(s, "id"), + Type: string(qvalue.QValueKindNumeric), + TypeModifier: -1, + }, + { + Name: e2e.ExpectedDestinationIdentifier(s, "c1"), + Type: string(qvalue.QValueKindNumeric), + TypeModifier: -1, + }, + { + Name: "_PEERDB_SYNCED_AT", + Type: string(qvalue.QValueKindTimestamp), + TypeModifier: -1, + }, + { + Name: e2e.ExpectedDestinationIdentifier(s, "c2"), + Type: string(qvalue.QValueKindNumeric), + TypeModifier: -1, + }, + }, + } + output, err = destinationSchemaConnector.GetTableSchema(context.Background(), &protos.GetTableSchemaBatchInput{ + TableIdentifiers: []string{dstTableName}, + }) + e2e.EnvNoError(t, env, err) + e2e.EnvTrue(t, env, e2e.CompareTableSchemas(expectedTableSchema, output.TableNameSchemaMapping[dstTableName])) + e2e.EnvEqualTablesWithNames(env, s, srcTable, dstTable, "id,c1,c2") + + // alter source table, add column c3, drop column c2 and insert another row. + _, err = s.Connector().Conn().Exec(context.Background(), fmt.Sprintf(` + ALTER TABLE %s DROP COLUMN c2, ADD COLUMN c3 BIGINT`, srcTableName)) + e2e.EnvNoError(t, env, err) + t.Log("Altered source table, dropped column c2 and added column c3") + _, err = s.Connector().Conn().Exec(context.Background(), fmt.Sprintf(` + INSERT INTO %s(c1,c3) VALUES ($1,$2)`, srcTableName), 3, 3) + e2e.EnvNoError(t, env, err) + t.Log("Inserted row with added c3 in the source table") + + // verify we got our two rows, if schema did not match up it will error. + e2e.EnvWaitForEqualTablesWithNames(env, s, "normalize dropped c2 column", srcTable, dstTable, "id,c1,c3") + expectedTableSchema = &protos.TableSchema{ + TableIdentifier: e2e.ExpectedDestinationTableName(s, dstTable), + Columns: []*protos.FieldDescription{ + { + Name: e2e.ExpectedDestinationIdentifier(s, "id"), + Type: string(qvalue.QValueKindNumeric), + TypeModifier: -1, + }, + { + Name: e2e.ExpectedDestinationIdentifier(s, "c1"), + Type: string(qvalue.QValueKindNumeric), + TypeModifier: -1, + }, + { + Name: "_PEERDB_SYNCED_AT", + Type: string(qvalue.QValueKindTimestamp), + TypeModifier: -1, + }, + { + Name: e2e.ExpectedDestinationIdentifier(s, "c2"), + Type: string(qvalue.QValueKindNumeric), + TypeModifier: -1, + }, + { + Name: e2e.ExpectedDestinationIdentifier(s, "c3"), + Type: string(qvalue.QValueKindNumeric), + TypeModifier: -1, + }, + }, + } + output, err = destinationSchemaConnector.GetTableSchema(context.Background(), &protos.GetTableSchemaBatchInput{ + TableIdentifiers: []string{dstTableName}, + }) + e2e.EnvNoError(t, env, err) + e2e.EnvTrue(t, env, e2e.CompareTableSchemas(expectedTableSchema, output.TableNameSchemaMapping[dstTableName])) + e2e.EnvEqualTablesWithNames(env, s, srcTable, dstTable, "id,c1,c3") + + // alter source table, drop column c3 and insert another row. + _, err = s.Connector().Conn().Exec(context.Background(), fmt.Sprintf(` + ALTER TABLE %s DROP COLUMN c3`, srcTableName)) + e2e.EnvNoError(t, env, err) + t.Log("Altered source table, dropped column c3") + _, err = s.Connector().Conn().Exec(context.Background(), fmt.Sprintf(` + INSERT INTO %s(c1) VALUES ($1)`, srcTableName), 4) + e2e.EnvNoError(t, env, err) + t.Log("Inserted row after dropping all columns in the source table") + + // verify we got our two rows, if schema did not match up it will error. + e2e.EnvWaitForEqualTablesWithNames(env, s, "normalize dropped c3 column", srcTable, dstTable, "id,c1") + expectedTableSchema = &protos.TableSchema{ + TableIdentifier: e2e.ExpectedDestinationTableName(s, dstTable), + Columns: []*protos.FieldDescription{ + { + Name: e2e.ExpectedDestinationIdentifier(s, "id"), + Type: string(qvalue.QValueKindNumeric), + TypeModifier: -1, + }, + { + Name: e2e.ExpectedDestinationIdentifier(s, "c1"), + Type: string(qvalue.QValueKindNumeric), + TypeModifier: -1, + }, + { + Name: "_PEERDB_SYNCED_AT", + Type: string(qvalue.QValueKindTimestamp), + TypeModifier: -1, + }, + { + Name: e2e.ExpectedDestinationIdentifier(s, "c2"), + Type: string(qvalue.QValueKindNumeric), + TypeModifier: -1, + }, + { + Name: e2e.ExpectedDestinationIdentifier(s, "c3"), + Type: string(qvalue.QValueKindNumeric), + TypeModifier: -1, + }, + }, + } + output, err = destinationSchemaConnector.GetTableSchema(context.Background(), &protos.GetTableSchemaBatchInput{ + TableIdentifiers: []string{dstTableName}, + }) + e2e.EnvNoError(t, env, err) + e2e.EnvTrue(t, env, e2e.CompareTableSchemas(expectedTableSchema, output.TableNameSchemaMapping[dstTableName])) + e2e.EnvEqualTablesWithNames(env, s, srcTable, dstTable, "id,c1") + + env.Cancel() + + e2e.RequireEnvCanceled(t, env) +} diff --git a/flow/e2e/generic/peer_flow_test.go b/flow/e2e/generic/peer_flow_test.go deleted file mode 100644 index 20c5847df4..0000000000 --- a/flow/e2e/generic/peer_flow_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package e2e_generic - -import ( - "context" - "fmt" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/PeerDB-io/peer-flow/e2e" - "github.com/PeerDB-io/peer-flow/e2e/bigquery" - "github.com/PeerDB-io/peer-flow/e2e/postgres" - "github.com/PeerDB-io/peer-flow/e2e/snowflake" - "github.com/PeerDB-io/peer-flow/e2eshared" - peerflow "github.com/PeerDB-io/peer-flow/workflows" -) - -func TestGenericPG(t *testing.T) { - e2eshared.RunSuite(t, SetupGenericSuite(e2e_postgres.SetupSuite)) -} - -func TestGenericSF(t *testing.T) { - e2eshared.RunSuite(t, SetupGenericSuite(e2e_snowflake.SetupSuite)) -} - -func TestGenericBQ(t *testing.T) { - e2eshared.RunSuite(t, SetupGenericSuite(e2e_bigquery.SetupSuite)) -} - -type Generic struct { - e2e.GenericSuite -} - -func SetupGenericSuite[T e2e.GenericSuite](f func(t *testing.T) T) func(t *testing.T) Generic { - return func(t *testing.T) Generic { - t.Helper() - return Generic{f(t)} - } -} - -func (s Generic) Test_Simple_Flow() { - t := s.T() - srcTable := "test_simple" - dstTable := "test_simple_dst" - srcSchemaTable := e2e.AttachSchema(s, srcTable) - - _, err := s.Connector().Conn().Exec(context.Background(), fmt.Sprintf(` - CREATE TABLE IF NOT EXISTS %s ( - id SERIAL PRIMARY KEY, - key TEXT NOT NULL, - value TEXT NOT NULL, - myh HSTORE NOT NULL - ); - `, srcSchemaTable)) - require.NoError(t, err) - - connectionGen := e2e.FlowConnectionGenerationConfig{ - FlowJobName: e2e.AddSuffix(s, "test_simple"), - TableMappings: e2e.TableMappings(s, srcTable, dstTable), - Destination: s.Peer(), - } - flowConnConfig := connectionGen.GenerateFlowConnectionConfigs() - - tc := e2e.NewTemporalClient(t) - env := e2e.ExecutePeerflow(tc, peerflow.CDCFlowWorkflow, flowConnConfig, nil) - - e2e.SetupCDCFlowStatusQuery(t, env, connectionGen) - // insert 10 rows into the source table - for i := range 10 { - testKey := fmt.Sprintf("test_key_%d", i) - testValue := fmt.Sprintf("test_value_%d", i) - _, err = s.Connector().Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s(key, value, myh) VALUES ($1, $2, '"a"=>"b"') - `, srcSchemaTable), testKey, testValue) - e2e.EnvNoError(t, env, err) - } - t.Log("Inserted 10 rows into the source table") - - e2e.EnvWaitForEqualTablesWithNames(env, s, "normalizing 10 rows", srcTable, dstTable, `id,key,value,myh`) - env.Cancel() - e2e.RequireEnvCanceled(t, env) -} diff --git a/flow/e2e/postgres/peer_flow_pg_test.go b/flow/e2e/postgres/peer_flow_pg_test.go index 2e69376b01..eeec5e373d 100644 --- a/flow/e2e/postgres/peer_flow_pg_test.go +++ b/flow/e2e/postgres/peer_flow_pg_test.go @@ -17,7 +17,6 @@ import ( "github.com/PeerDB-io/peer-flow/e2e" "github.com/PeerDB-io/peer-flow/generated/protos" "github.com/PeerDB-io/peer-flow/model" - "github.com/PeerDB-io/peer-flow/model/qvalue" "github.com/PeerDB-io/peer-flow/shared" peerflow "github.com/PeerDB-io/peer-flow/workflows" ) @@ -51,32 +50,6 @@ func (s PeerFlowE2ETestSuitePG) checkPeerdbColumns(dstSchemaQualified string, ro return nil } -func (s PeerFlowE2ETestSuitePG) WaitForSchema( - env e2e.WorkflowRun, - reason string, - srcTableName string, - dstTableName string, - cols string, - expectedSchema *protos.TableSchema, -) { - s.t.Helper() - e2e.EnvWaitFor(s.t, env, 3*time.Minute, reason, func() bool { - s.t.Helper() - output, err := s.conn.GetTableSchema(context.Background(), &protos.GetTableSchemaBatchInput{ - TableIdentifiers: []string{dstTableName}, - }) - if err != nil { - return false - } - tableSchema := output.TableNameSchemaMapping[dstTableName] - if !e2e.CompareTableSchemas(expectedSchema, tableSchema) { - s.t.Log("schemas unequal", expectedSchema, tableSchema) - return false - } - return s.comparePGTables(srcTableName, dstTableName, cols) == nil - }) -} - func (s PeerFlowE2ETestSuitePG) Test_Geospatial_PG() { srcTableName := s.attachSchemaSuffix("test_geospatial_pg") dstTableName := s.attachSchemaSuffix("test_geospatial_pg_dst") @@ -224,188 +197,6 @@ func (s PeerFlowE2ETestSuitePG) Test_Enums_PG() { e2e.RequireEnvCanceled(s.t, env) } -func (s PeerFlowE2ETestSuitePG) Test_Simple_Schema_Changes_PG() { - tc := e2e.NewTemporalClient(s.t) - - srcTableName := s.attachSchemaSuffix("test_simple_schema_changes") - dstTableName := s.attachSchemaSuffix("test_simple_schema_changes_dst") - - _, err := s.Conn().Exec(context.Background(), fmt.Sprintf(` - CREATE TABLE IF NOT EXISTS %s ( - id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY, - c1 BIGINT - ); - `, srcTableName)) - require.NoError(s.t, err) - - connectionGen := e2e.FlowConnectionGenerationConfig{ - FlowJobName: s.attachSuffix("test_simple_schema_changes"), - TableNameMapping: map[string]string{srcTableName: dstTableName}, - Destination: s.peer, - } - - flowConnConfig := connectionGen.GenerateFlowConnectionConfigs() - flowConnConfig.MaxBatchSize = 1 - - // wait for PeerFlowStatusQuery to finish setup - // and then insert and mutate schema repeatedly. - env := e2e.ExecutePeerflow(tc, peerflow.CDCFlowWorkflow, flowConnConfig, nil) - e2e.SetupCDCFlowStatusQuery(s.t, env, connectionGen) - - // insert first row. - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s(c1) VALUES ($1)`, srcTableName), 1) - e2e.EnvNoError(s.t, env, err) - s.t.Log("Inserted initial row in the source table") - - s.WaitForSchema(env, "normalizing first row", srcTableName, dstTableName, "id,c1", &protos.TableSchema{ - TableIdentifier: dstTableName, - PrimaryKeyColumns: []string{"id"}, - Columns: []*protos.FieldDescription{ - { - Name: "id", - Type: string(qvalue.QValueKindInt64), - TypeModifier: -1, - }, - { - Name: "c1", - Type: string(qvalue.QValueKindInt64), - TypeModifier: -1, - }, - { - Name: "_PEERDB_SYNCED_AT", - Type: string(qvalue.QValueKindTimestamp), - TypeModifier: -1, - }, - }, - }) - - // alter source table, add column c2 and insert another row. - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - ALTER TABLE %s ADD COLUMN c2 BIGINT`, srcTableName)) - e2e.EnvNoError(s.t, env, err) - s.t.Log("Altered source table, added column c2") - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s(c1,c2) VALUES ($1,$2)`, srcTableName), 2, 2) - e2e.EnvNoError(s.t, env, err) - s.t.Log("Inserted row with added c2 in the source table") - - s.WaitForSchema(env, "normalizing altered row", srcTableName, dstTableName, "id,c1,c2", &protos.TableSchema{ - TableIdentifier: dstTableName, - PrimaryKeyColumns: []string{"id"}, - Columns: []*protos.FieldDescription{ - { - Name: "id", - Type: string(qvalue.QValueKindInt64), - TypeModifier: -1, - }, - { - Name: "c1", - Type: string(qvalue.QValueKindInt64), - TypeModifier: -1, - }, - { - Name: "_PEERDB_SYNCED_AT", - Type: string(qvalue.QValueKindTimestamp), - TypeModifier: -1, - }, - { - Name: "c2", - Type: string(qvalue.QValueKindInt64), - TypeModifier: -1, - }, - }, - }) - - // alter source table, add column c3, drop column c2 and insert another row. - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - ALTER TABLE %s DROP COLUMN c2, ADD COLUMN c3 BIGINT`, srcTableName)) - e2e.EnvNoError(s.t, env, err) - s.t.Log("Altered source table, dropped column c2 and added column c3") - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s(c1,c3) VALUES ($1,$2)`, srcTableName), 3, 3) - e2e.EnvNoError(s.t, env, err) - s.t.Log("Inserted row with added c3 in the source table") - - s.WaitForSchema(env, "normalizing dropped column row", srcTableName, dstTableName, "id,c1,c3", &protos.TableSchema{ - TableIdentifier: dstTableName, - PrimaryKeyColumns: []string{"id"}, - Columns: []*protos.FieldDescription{ - { - Name: "id", - Type: string(qvalue.QValueKindInt64), - TypeModifier: -1, - }, - { - Name: "c1", - Type: string(qvalue.QValueKindInt64), - TypeModifier: -1, - }, - { - Name: "c2", - Type: string(qvalue.QValueKindInt64), - TypeModifier: -1, - }, - { - Name: "_PEERDB_SYNCED_AT", - Type: string(qvalue.QValueKindTimestamp), - TypeModifier: -1, - }, - { - Name: "c3", - Type: string(qvalue.QValueKindInt64), - TypeModifier: -1, - }, - }, - }) - - // alter source table, drop column c3 and insert another row. - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - ALTER TABLE %s DROP COLUMN c3`, srcTableName)) - e2e.EnvNoError(s.t, env, err) - s.t.Log("Altered source table, dropped column c3") - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s(c1) VALUES ($1)`, srcTableName), 4) - e2e.EnvNoError(s.t, env, err) - s.t.Log("Inserted row after dropping all columns in the source table") - - s.WaitForSchema(env, "normalizing 2nd dropped column row", srcTableName, dstTableName, "id,c1", &protos.TableSchema{ - TableIdentifier: dstTableName, - PrimaryKeyColumns: []string{"id"}, - Columns: []*protos.FieldDescription{ - { - Name: "id", - Type: string(qvalue.QValueKindInt64), - TypeModifier: -1, - }, - { - Name: "c1", - Type: string(qvalue.QValueKindInt64), - TypeModifier: -1, - }, - { - Name: "_PEERDB_SYNCED_AT", - Type: string(qvalue.QValueKindTimestamp), - TypeModifier: -1, - }, - { - Name: "c2", - Type: string(qvalue.QValueKindInt64), - TypeModifier: -1, - }, - { - Name: "c3", - Type: string(qvalue.QValueKindInt64), - TypeModifier: -1, - }, - }, - }) - - env.Cancel() - - e2e.RequireEnvCanceled(s.t, env) -} - func (s PeerFlowE2ETestSuitePG) Test_Composite_PKey_PG() { tc := e2e.NewTemporalClient(s.t) diff --git a/flow/e2e/postgres/postgres.go b/flow/e2e/postgres/postgres.go index 23ca778c8d..8eafd6ade0 100644 --- a/flow/e2e/postgres/postgres.go +++ b/flow/e2e/postgres/postgres.go @@ -9,6 +9,7 @@ import ( "github.com/jackc/pgx/v5" "github.com/stretchr/testify/require" + "github.com/PeerDB-io/peer-flow/connectors" connpostgres "github.com/PeerDB-io/peer-flow/connectors/postgres" "github.com/PeerDB-io/peer-flow/e2e" "github.com/PeerDB-io/peer-flow/generated/protos" @@ -32,6 +33,10 @@ func (s PeerFlowE2ETestSuitePG) Connector() *connpostgres.PostgresConnector { return s.conn } +func (s PeerFlowE2ETestSuitePG) DestinationConnector() connectors.Connector { + return s.conn +} + func (s PeerFlowE2ETestSuitePG) Conn() *pgx.Conn { return s.conn.Conn() } diff --git a/flow/e2e/snowflake/peer_flow_sf_test.go b/flow/e2e/snowflake/peer_flow_sf_test.go index 525d2c7256..56084a1a27 100644 --- a/flow/e2e/snowflake/peer_flow_sf_test.go +++ b/flow/e2e/snowflake/peer_flow_sf_test.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "strings" "testing" "time" @@ -16,7 +15,6 @@ import ( "github.com/PeerDB-io/peer-flow/e2e" "github.com/PeerDB-io/peer-flow/e2eshared" "github.com/PeerDB-io/peer-flow/generated/protos" - "github.com/PeerDB-io/peer-flow/model/qvalue" peerflow "github.com/PeerDB-io/peer-flow/workflows" ) @@ -516,218 +514,6 @@ func (s PeerFlowE2ETestSuiteSF) Test_Multi_Table_SF() { e2e.RequireEnvCanceled(s.t, env) } -func (s PeerFlowE2ETestSuiteSF) Test_Simple_Schema_Changes_SF() { - tc := e2e.NewTemporalClient(s.t) - - srcTableName := s.attachSchemaSuffix("test_simple_schema_changes") - dstTableName := fmt.Sprintf("%s.%s", s.sfHelper.testSchemaName, "test_simple_schema_changes") - - _, err := s.Conn().Exec(context.Background(), fmt.Sprintf(` - CREATE TABLE IF NOT EXISTS %s ( - id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY, - c1 BIGINT - ); - `, srcTableName)) - require.NoError(s.t, err) - - connectionGen := e2e.FlowConnectionGenerationConfig{ - FlowJobName: s.attachSuffix("test_simple_schema_changes"), - TableNameMapping: map[string]string{srcTableName: dstTableName}, - Destination: s.sfHelper.Peer, - } - - flowConnConfig := connectionGen.GenerateFlowConnectionConfigs() - flowConnConfig.MaxBatchSize = 100 - - // wait for PeerFlowStatusQuery to finish setup - // and then insert and mutate schema repeatedly. - env := e2e.ExecutePeerflow(tc, peerflow.CDCFlowWorkflow, flowConnConfig, nil) - e2e.SetupCDCFlowStatusQuery(s.t, env, connectionGen) - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s(c1) VALUES ($1)`, srcTableName), 1) - e2e.EnvNoError(s.t, env, err) - s.t.Log("Inserted initial row in the source table") - - e2e.EnvWaitForEqualTables(env, s, "normalize reinsert", "test_simple_schema_changes", "id,c1") - - expectedTableSchema := &protos.TableSchema{ - TableIdentifier: strings.ToUpper(dstTableName), - Columns: []*protos.FieldDescription{ - { - Name: "ID", - Type: string(qvalue.QValueKindNumeric), - TypeModifier: -1, - }, - { - Name: "C1", - Type: string(qvalue.QValueKindNumeric), - TypeModifier: -1, - }, - { - Name: "_PEERDB_IS_DELETED", - Type: string(qvalue.QValueKindBoolean), - TypeModifier: -1, - }, - { - Name: "_PEERDB_SYNCED_AT", - Type: string(qvalue.QValueKindTimestamp), - TypeModifier: -1, - }, - }, - } - output, err := s.connector.GetTableSchema(context.Background(), &protos.GetTableSchemaBatchInput{ - TableIdentifiers: []string{dstTableName}, - }) - e2e.EnvNoError(s.t, env, err) - e2e.EnvTrue(s.t, env, e2e.CompareTableSchemas(expectedTableSchema, output.TableNameSchemaMapping[dstTableName])) - - // alter source table, add column c2 and insert another row. - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - ALTER TABLE %s ADD COLUMN c2 BIGINT`, srcTableName)) - e2e.EnvNoError(s.t, env, err) - s.t.Log("Altered source table, added column c2") - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s(c1,c2) VALUES ($1,$2)`, srcTableName), 2, 2) - e2e.EnvNoError(s.t, env, err) - s.t.Log("Inserted row with added c2 in the source table") - - // verify we got our two rows, if schema did not match up it will error. - e2e.EnvWaitForEqualTables(env, s, "normalize altered row", "test_simple_schema_changes", "id,c1,c2") - expectedTableSchema = &protos.TableSchema{ - TableIdentifier: strings.ToUpper(dstTableName), - Columns: []*protos.FieldDescription{ - { - Name: "ID", - Type: string(qvalue.QValueKindNumeric), - TypeModifier: -1, - }, - { - Name: "C1", - Type: string(qvalue.QValueKindNumeric), - TypeModifier: -1, - }, - { - Name: "_PEERDB_SYNCED_AT", - Type: string(qvalue.QValueKindTimestamp), - TypeModifier: -1, - }, - { - Name: "C2", - Type: string(qvalue.QValueKindNumeric), - TypeModifier: -1, - }, - }, - } - output, err = s.connector.GetTableSchema(context.Background(), &protos.GetTableSchemaBatchInput{ - TableIdentifiers: []string{dstTableName}, - }) - e2e.EnvNoError(s.t, env, err) - e2e.EnvTrue(s.t, env, e2e.CompareTableSchemas(expectedTableSchema, output.TableNameSchemaMapping[dstTableName])) - e2e.EnvEqualTables(env, s, "test_simple_schema_changes", "id,c1,c2") - - // alter source table, add column c3, drop column c2 and insert another row. - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - ALTER TABLE %s DROP COLUMN c2, ADD COLUMN c3 BIGINT`, srcTableName)) - e2e.EnvNoError(s.t, env, err) - s.t.Log("Altered source table, dropped column c2 and added column c3") - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s(c1,c3) VALUES ($1,$2)`, srcTableName), 3, 3) - e2e.EnvNoError(s.t, env, err) - s.t.Log("Inserted row with added c3 in the source table") - - // verify we got our two rows, if schema did not match up it will error. - e2e.EnvWaitForEqualTables(env, s, "normalize dropped c2 column", "test_simple_schema_changes", "id,c1,c3") - expectedTableSchema = &protos.TableSchema{ - TableIdentifier: strings.ToUpper(dstTableName), - Columns: []*protos.FieldDescription{ - { - Name: "ID", - Type: string(qvalue.QValueKindNumeric), - TypeModifier: -1, - }, - { - Name: "C1", - Type: string(qvalue.QValueKindNumeric), - TypeModifier: -1, - }, - { - Name: "_PEERDB_SYNCED_AT", - Type: string(qvalue.QValueKindTimestamp), - TypeModifier: -1, - }, - { - Name: "C2", - Type: string(qvalue.QValueKindNumeric), - TypeModifier: -1, - }, - { - Name: "C3", - Type: string(qvalue.QValueKindNumeric), - TypeModifier: -1, - }, - }, - } - output, err = s.connector.GetTableSchema(context.Background(), &protos.GetTableSchemaBatchInput{ - TableIdentifiers: []string{dstTableName}, - }) - e2e.EnvNoError(s.t, env, err) - e2e.EnvTrue(s.t, env, e2e.CompareTableSchemas(expectedTableSchema, output.TableNameSchemaMapping[dstTableName])) - e2e.EnvEqualTables(env, s, "test_simple_schema_changes", "id,c1,c3") - - // alter source table, drop column c3 and insert another row. - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - ALTER TABLE %s DROP COLUMN c3`, srcTableName)) - e2e.EnvNoError(s.t, env, err) - s.t.Log("Altered source table, dropped column c3") - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s(c1) VALUES ($1)`, srcTableName), 4) - e2e.EnvNoError(s.t, env, err) - s.t.Log("Inserted row after dropping all columns in the source table") - - // verify we got our two rows, if schema did not match up it will error. - e2e.EnvWaitForEqualTables(env, s, "normalize dropped c3 column", "test_simple_schema_changes", "id,c1") - expectedTableSchema = &protos.TableSchema{ - TableIdentifier: strings.ToUpper(dstTableName), - Columns: []*protos.FieldDescription{ - { - Name: "ID", - Type: string(qvalue.QValueKindNumeric), - TypeModifier: -1, - }, - { - Name: "C1", - Type: string(qvalue.QValueKindNumeric), - TypeModifier: -1, - }, - { - Name: "_PEERDB_SYNCED_AT", - Type: string(qvalue.QValueKindTimestamp), - TypeModifier: -1, - }, - { - Name: "C2", - Type: string(qvalue.QValueKindNumeric), - TypeModifier: -1, - }, - { - Name: "C3", - Type: string(qvalue.QValueKindNumeric), - TypeModifier: -1, - }, - }, - } - output, err = s.connector.GetTableSchema(context.Background(), &protos.GetTableSchemaBatchInput{ - TableIdentifiers: []string{dstTableName}, - }) - e2e.EnvNoError(s.t, env, err) - e2e.EnvTrue(s.t, env, e2e.CompareTableSchemas(expectedTableSchema, output.TableNameSchemaMapping[dstTableName])) - e2e.EnvEqualTables(env, s, "test_simple_schema_changes", "id,c1") - - env.Cancel() - - e2e.RequireEnvCanceled(s.t, env) -} - func (s PeerFlowE2ETestSuiteSF) Test_Composite_PKey_SF() { tc := e2e.NewTemporalClient(s.t) diff --git a/flow/e2e/snowflake/snowflake.go b/flow/e2e/snowflake/snowflake.go index 45132ef601..06c46d1046 100644 --- a/flow/e2e/snowflake/snowflake.go +++ b/flow/e2e/snowflake/snowflake.go @@ -10,6 +10,7 @@ import ( "github.com/jackc/pgx/v5" "github.com/stretchr/testify/require" + "github.com/PeerDB-io/peer-flow/connectors" connpostgres "github.com/PeerDB-io/peer-flow/connectors/postgres" connsnowflake "github.com/PeerDB-io/peer-flow/connectors/snowflake" "github.com/PeerDB-io/peer-flow/e2e" @@ -35,6 +36,10 @@ func (s PeerFlowE2ETestSuiteSF) Connector() *connpostgres.PostgresConnector { return s.conn } +func (s PeerFlowE2ETestSuiteSF) DestinationConnector() connectors.Connector { + return s.connector +} + func (s PeerFlowE2ETestSuiteSF) Conn() *pgx.Conn { return s.Connector().Conn() } diff --git a/flow/e2e/test_utils.go b/flow/e2e/test_utils.go index c018f32df2..dcaef74291 100644 --- a/flow/e2e/test_utils.go +++ b/flow/e2e/test_utils.go @@ -22,6 +22,7 @@ import ( "go.temporal.io/sdk/converter" "go.temporal.io/sdk/temporal" + "github.com/PeerDB-io/peer-flow/connectors" connpostgres "github.com/PeerDB-io/peer-flow/connectors/postgres" connsnowflake "github.com/PeerDB-io/peer-flow/connectors/snowflake" "github.com/PeerDB-io/peer-flow/connectors/utils" @@ -55,6 +56,7 @@ type RowSource interface { type GenericSuite interface { RowSource Peer() *protos.Peer + DestinationConnector() connectors.Connector DestinationTable(table string) string } @@ -112,13 +114,17 @@ func RequireEqualTables(suite RowSource, table string, cols string) { } func EnvEqualTables(env WorkflowRun, suite RowSource, table string, cols string) { + EnvEqualTablesWithNames(env, suite, table, table, cols) +} + +func EnvEqualTablesWithNames(env WorkflowRun, suite RowSource, srcTable string, dstTable string, cols string) { t := suite.T() t.Helper() - pgRows, err := GetPgRows(suite.Connector(), suite.Suffix(), table, cols) + pgRows, err := GetPgRows(suite.Connector(), suite.Suffix(), srcTable, cols) EnvNoError(t, env, err) - rows, err := suite.GetRows(table, cols) + rows, err := suite.GetRows(dstTable, cols) EnvNoError(t, env, err) EnvEqualRecordBatches(t, env, pgRows, rows) @@ -519,6 +525,19 @@ func GetOwnersSelectorStringsSF() [2]string { return [2]string{strings.Join(pgFields, ","), strings.Join(sfFields, ",")} } +func ExpectedDestinationIdentifier(s GenericSuite, ident string) string { + switch s.DestinationConnector().(type) { + case *connsnowflake.SnowflakeConnector: + return strings.ToUpper(ident) + default: + return ident + } +} + +func ExpectedDestinationTableName(s GenericSuite, table string) string { + return ExpectedDestinationIdentifier(s, s.DestinationTable(table)) +} + type testWriter struct { *testing.T }