diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dad2551196..5a5291f822 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,9 +4,7 @@ on: push: branches: [main, release/*] pull_request: - branches: - - "main" - - "release/*" + branches: [main, release/*] jobs: build: diff --git a/.github/workflows/flow.yml b/.github/workflows/flow.yml index 2ddff33bd0..70f65df13f 100644 --- a/.github/workflows/flow.yml +++ b/.github/workflows/flow.yml @@ -2,8 +2,7 @@ name: Flow build and test on: pull_request: - branches: - - "main" + branches: [main] push: branches: [main] diff --git a/.github/workflows/golang-lint.yml b/.github/workflows/golang-lint.yml index 48a0510b20..5b4b31441d 100644 --- a/.github/workflows/golang-lint.yml +++ b/.github/workflows/golang-lint.yml @@ -2,8 +2,8 @@ name: GolangCI-Lint on: pull_request: - branches: - - "main" + branches: [main] + paths: [flow/**] jobs: golangci-lint: diff --git a/.github/workflows/rust-lint.yml b/.github/workflows/rust-lint.yml index 8a56cac05e..c4a43ad791 100644 --- a/.github/workflows/rust-lint.yml +++ b/.github/workflows/rust-lint.yml @@ -2,8 +2,8 @@ name: clippy-action on: pull_request: - branches: - - "main" + branches: [main] + paths: [nexus/**] jobs: clippy: diff --git a/.github/workflows/ui-build.yml b/.github/workflows/ui-build.yml index ec59e13969..752bb9bd7f 100644 --- a/.github/workflows/ui-build.yml +++ b/.github/workflows/ui-build.yml @@ -2,11 +2,10 @@ name: Build & Test UI on: push: - branches: - - main + branches: [main] pull_request: - branches: - - main + branches: [main] + paths: [ui/**] jobs: build-test: diff --git a/.github/workflows/ui-lint.yml b/.github/workflows/ui-lint.yml index 84fe76b0bc..48428e1572 100644 --- a/.github/workflows/ui-lint.yml +++ b/.github/workflows/ui-lint.yml @@ -2,11 +2,10 @@ name: Lint UI on: push: - branches: - - main + branches: [main] pull_request: - branches: - - main + branches: [main] + paths: [ui/**] permissions: checks: write diff --git a/flow/cmd/handler.go b/flow/cmd/handler.go index 9efdc59fb5..7b03c9de67 100644 --- a/flow/cmd/handler.go +++ b/flow/cmd/handler.go @@ -268,6 +268,13 @@ func (h *FlowRequestHandler) CreateQRepFlow( } else { workflowFn = peerflow.QRepFlowWorkflow } + + if req.QrepConfig.SyncedAtColName == "" { + cfg.SyncedAtColName = "_PEERDB_SYNCED_AT" + } else { + // make them all uppercase + cfg.SyncedAtColName = strings.ToUpper(req.QrepConfig.SyncedAtColName) + } _, err := h.temporalClient.ExecuteWorkflow(ctx, workflowOptions, workflowFn, cfg, state) if err != nil { slog.Error("unable to start QRepFlow workflow", diff --git a/flow/connectors/bigquery/bigquery.go b/flow/connectors/bigquery/bigquery.go index 6806445f3d..5f966ecf7f 100644 --- a/flow/connectors/bigquery/bigquery.go +++ b/flow/connectors/bigquery/bigquery.go @@ -793,6 +793,11 @@ func (c *BigQueryConnector) NormalizeRecords(req *model.NormalizeRecordsRequest) SyncBatchID: syncBatchID, NormalizeBatchID: normalizeBatchID, UnchangedToastColumns: tableNametoUnchangedToastCols[tableName], + peerdbCols: &protos.PeerDBColumns{ + SoftDeleteColName: req.SoftDeleteColName, + SyncedAtColName: req.SyncedAtColName, + SoftDelete: req.SoftDelete, + }, } // normalize anything between last normalized batch id to last sync batchid mergeStmts := mergeGen.generateMergeStmts() @@ -961,7 +966,7 @@ func (c *BigQueryConnector) SetupNormalizedTables( } // convert the column names and types to bigquery types - columns := make([]*bigquery.FieldSchema, len(tableSchema.Columns)) + columns := make([]*bigquery.FieldSchema, len(tableSchema.Columns), len(tableSchema.Columns)+2) idx := 0 for colName, genericColType := range tableSchema.Columns { columns[idx] = &bigquery.FieldSchema{ @@ -972,6 +977,22 @@ func (c *BigQueryConnector) SetupNormalizedTables( idx++ } + if req.SoftDeleteColName != "" { + columns = append(columns, &bigquery.FieldSchema{ + Name: req.SoftDeleteColName, + Type: bigquery.BooleanFieldType, + Repeated: false, + }) + } + + if req.SyncedAtColName != "" { + columns = append(columns, &bigquery.FieldSchema{ + Name: req.SyncedAtColName, + Type: bigquery.TimestampFieldType, + Repeated: false, + }) + } + // create the table using the columns schema := bigquery.Schema(columns) err = table.Create(c.ctx, &bigquery.TableMetadata{Schema: schema}) diff --git a/flow/connectors/bigquery/merge_statement_generator.go b/flow/connectors/bigquery/merge_statement_generator.go index 2a37ef5ecb..149825c2cf 100644 --- a/flow/connectors/bigquery/merge_statement_generator.go +++ b/flow/connectors/bigquery/merge_statement_generator.go @@ -26,6 +26,8 @@ type mergeStmtGenerator struct { NormalizedTableSchema *protos.TableSchema // array of toast column combinations that are unchanged UnchangedToastColumns []string + // _PEERDB_IS_DELETED and _SYNCED_AT columns + peerdbCols *protos.PeerDBColumns } // GenerateMergeStmt generates a merge statements. @@ -39,7 +41,7 @@ func (m *mergeStmtGenerator) generateMergeStmts() []string { "CREATE TEMP TABLE %s AS (%s, %s);", tempTable, flattenedCTE, deDupedCTE) - mergeStmt := m.generateMergeStmt(tempTable) + mergeStmt := m.generateMergeStmt(tempTable, m.peerdbCols) dropTempTableStmt := fmt.Sprintf("DROP TABLE %s;", tempTable) @@ -127,7 +129,7 @@ func (m *mergeStmtGenerator) generateDeDupedCTE() string { } // generateMergeStmt generates a merge statement. -func (m *mergeStmtGenerator) generateMergeStmt(tempTable string) string { +func (m *mergeStmtGenerator) generateMergeStmt(tempTable string, peerdbCols *protos.PeerDBColumns) string { // comma separated list of column names backtickColNames := make([]string, 0, len(m.NormalizedTableSchema.Columns)) pureColNames := make([]string, 0, len(m.NormalizedTableSchema.Columns)) @@ -136,8 +138,19 @@ func (m *mergeStmtGenerator) generateMergeStmt(tempTable string) string { pureColNames = append(pureColNames, colName) } csep := strings.Join(backtickColNames, ", ") - - updateStatementsforToastCols := m.generateUpdateStatements(pureColNames, m.UnchangedToastColumns) + insertColumnsSQL := csep + fmt.Sprintf(", `%s`", peerdbCols.SyncedAtColName) + insertValuesSQL := csep + ",CURRENT_TIMESTAMP" + + updateStatementsforToastCols := m.generateUpdateStatements(pureColNames, + m.UnchangedToastColumns, peerdbCols) + if m.peerdbCols.SoftDelete { + softDeleteInsertColumnsSQL := insertColumnsSQL + fmt.Sprintf(", `%s`", peerdbCols.SoftDeleteColName) + softDeleteInsertValuesSQL := insertValuesSQL + ", TRUE" + + updateStatementsforToastCols = append(updateStatementsforToastCols, + fmt.Sprintf("WHEN NOT MATCHED AND (_peerdb_deduped._PEERDB_RECORD_TYPE = 2) THEN INSERT (%s) VALUES(%s)", + softDeleteInsertColumnsSQL, softDeleteInsertValuesSQL)) + } updateStringToastCols := strings.Join(updateStatementsforToastCols, " ") pkeySelectSQLArray := make([]string, 0, len(m.NormalizedTableSchema.PrimaryKeyColumns)) @@ -148,6 +161,16 @@ func (m *mergeStmtGenerator) generateMergeStmt(tempTable string) string { // _peerdb_target. = _peerdb_deduped. AND _peerdb_target. = _peerdb_deduped. ... pkeySelectSQL := strings.Join(pkeySelectSQLArray, " AND ") + deletePart := "DELETE" + if peerdbCols.SoftDelete { + colName := peerdbCols.SoftDeleteColName + deletePart = fmt.Sprintf("UPDATE SET %s = TRUE", colName) + if peerdbCols.SyncedAtColName != "" { + deletePart = fmt.Sprintf("%s, %s = CURRENT_TIMESTAMP", + deletePart, peerdbCols.SyncedAtColName) + } + } + return fmt.Sprintf(` MERGE %s.%s _peerdb_target USING %s _peerdb_deduped ON %s @@ -155,8 +178,9 @@ func (m *mergeStmtGenerator) generateMergeStmt(tempTable string) string { INSERT (%s) VALUES (%s) %s WHEN MATCHED AND (_peerdb_deduped._peerdb_record_type = 2) THEN - DELETE; - `, m.Dataset, m.NormalizedTable, tempTable, pkeySelectSQL, csep, csep, updateStringToastCols) + %s; + `, m.Dataset, m.NormalizedTable, tempTable, pkeySelectSQL, insertColumnsSQL, insertValuesSQL, + updateStringToastCols, deletePart) } /* @@ -174,7 +198,11 @@ and updating the other columns (not the unchanged toast columns) 6. Repeat steps 1-5 for each unique unchanged toast column group. 7. Return the list of generated update statements. */ -func (m *mergeStmtGenerator) generateUpdateStatements(allCols []string, unchangedToastCols []string) []string { +func (m *mergeStmtGenerator) generateUpdateStatements( + allCols []string, + unchangedToastCols []string, + peerdbCols *protos.PeerDBColumns, +) []string { updateStmts := make([]string, 0, len(unchangedToastCols)) for _, cols := range unchangedToastCols { @@ -184,6 +212,18 @@ func (m *mergeStmtGenerator) generateUpdateStatements(allCols []string, unchange for _, colName := range otherCols { tmpArray = append(tmpArray, fmt.Sprintf("`%s` = _peerdb_deduped.%s", colName, colName)) } + + // set the synced at column to the current timestamp + if peerdbCols.SyncedAtColName != "" { + tmpArray = append(tmpArray, fmt.Sprintf("`%s` = CURRENT_TIMESTAMP", + peerdbCols.SyncedAtColName)) + } + // set soft-deleted to false, tackles insert after soft-delete + if peerdbCols.SoftDeleteColName != "" { + tmpArray = append(tmpArray, fmt.Sprintf("`%s` = FALSE", + peerdbCols.SoftDeleteColName)) + } + ssep := strings.Join(tmpArray, ", ") updateStmt := fmt.Sprintf(`WHEN MATCHED AND (_peerdb_deduped._peerdb_record_type != 2) AND _peerdb_unchanged_toast_columns='%s' diff --git a/flow/connectors/bigquery/merge_stmt_generator_test.go b/flow/connectors/bigquery/merge_stmt_generator_test.go index 41e54114e6..47705167d6 100644 --- a/flow/connectors/bigquery/merge_stmt_generator_test.go +++ b/flow/connectors/bigquery/merge_stmt_generator_test.go @@ -4,6 +4,8 @@ import ( "reflect" "strings" "testing" + + "github.com/PeerDB-io/peer-flow/generated/protos" ) func TestGenerateUpdateStatement_WithUnchangedToastCols(t *testing.T) { @@ -16,21 +18,28 @@ func TestGenerateUpdateStatement_WithUnchangedToastCols(t *testing.T) { " AND _peerdb_unchanged_toast_columns='' " + "THEN UPDATE SET `col1` = _peerdb_deduped.col1," + " `col2` = _peerdb_deduped.col2," + - " `col3` = _peerdb_deduped.col3", + " `col3` = _peerdb_deduped.col3," + + "`synced_at`=CURRENT_TIMESTAMP," + "`deleted`=FALSE", "WHEN MATCHED AND (_peerdb_deduped._peerdb_record_type != 2)" + " AND _peerdb_unchanged_toast_columns='col2, col3' " + - "THEN UPDATE SET `col1` = _peerdb_deduped.col1", + "THEN UPDATE SET `col1` = _peerdb_deduped.col1," + + "`synced_at`=CURRENT_TIMESTAMP," + "`deleted`=FALSE", "WHEN MATCHED AND (_peerdb_deduped._peerdb_record_type != 2)" + " AND _peerdb_unchanged_toast_columns='col2'" + "THEN UPDATE SET `col1` = _peerdb_deduped.col1," + - " `col3` = _peerdb_deduped.col3", + " `col3` = _peerdb_deduped.col3," + + "`synced_at`=CURRENT_TIMESTAMP," + "`deleted`=FALSE", "WHEN MATCHED AND (_peerdb_deduped._peerdb_record_type != 2)" + " AND _peerdb_unchanged_toast_columns='col3'" + "THEN UPDATE SET `col1` = _peerdb_deduped.col1," + - " `col2` = _peerdb_deduped.col2", + " `col2` = _peerdb_deduped.col2," + "`synced_at`=CURRENT_TIMESTAMP," + "`deleted`=FALSE", } - result := m.generateUpdateStatements(allCols, unchangedToastCols) + result := m.generateUpdateStatements(allCols, unchangedToastCols, &protos.PeerDBColumns{ + SoftDelete: true, + SoftDeleteColName: "deleted", + SyncedAtColName: "synced_at", + }) for i := range expected { expected[i] = removeSpacesTabsNewlines(expected[i]) @@ -53,10 +62,17 @@ func TestGenerateUpdateStatement_NoUnchangedToastCols(t *testing.T) { "THEN UPDATE SET " + "`col1` = _peerdb_deduped.col1," + " `col2` = _peerdb_deduped.col2," + - " `col3` = _peerdb_deduped.col3", + " `col3` = _peerdb_deduped.col3," + + " `synced_at`=CURRENT_TIMESTAMP," + + "`deleted`=FALSE", } - result := m.generateUpdateStatements(allCols, unchangedToastCols) + result := m.generateUpdateStatements(allCols, unchangedToastCols, + &protos.PeerDBColumns{ + SoftDelete: true, + SoftDeleteColName: "deleted", + SyncedAtColName: "synced_at", + }) for i := range expected { expected[i] = removeSpacesTabsNewlines(expected[i]) diff --git a/flow/connectors/bigquery/qrep.go b/flow/connectors/bigquery/qrep.go index a353d432eb..bf1c603d43 100644 --- a/flow/connectors/bigquery/qrep.go +++ b/flow/connectors/bigquery/qrep.go @@ -46,7 +46,8 @@ func (c *BigQueryConnector) SyncQRepRecords( partition.PartitionId, destTable)) avroSync := &QRepAvroSyncMethod{connector: c, gcsBucket: config.StagingPath} - return avroSync.SyncQRepRecords(config.FlowJobName, destTable, partition, tblMetadata, stream) + return avroSync.SyncQRepRecords(config.FlowJobName, destTable, partition, + tblMetadata, stream, config.SyncedAtColName) } func (c *BigQueryConnector) replayTableSchemaDeltasQRep(config *protos.QRepConfig, partition *protos.QRepPartition, diff --git a/flow/connectors/bigquery/qrep_avro_sync.go b/flow/connectors/bigquery/qrep_avro_sync.go index 9bb01157fe..882f11a8c2 100644 --- a/flow/connectors/bigquery/qrep_avro_sync.go +++ b/flow/connectors/bigquery/qrep_avro_sync.go @@ -48,7 +48,7 @@ func (s *QRepAvroSyncMethod) SyncRecords( flowJobName, dstTableName, syncBatchID), ) // You will need to define your Avro schema as a string - avroSchema, err := DefineAvroSchema(dstTableName, dstTableMetadata) + avroSchema, err := DefineAvroSchema(dstTableName, dstTableMetadata, "") if err != nil { return 0, fmt.Errorf("failed to define Avro schema: %w", err) } @@ -107,6 +107,7 @@ func (s *QRepAvroSyncMethod) SyncQRepRecords( partition *protos.QRepPartition, dstTableMetadata *bigquery.TableMetadata, stream *model.QRecordStream, + syncedAtCol string, ) (int, error) { startTime := time.Now() flowLog := slog.Group("sync_metadata", @@ -115,7 +116,7 @@ func (s *QRepAvroSyncMethod) SyncQRepRecords( slog.String("destinationTable", dstTableName), ) // You will need to define your Avro schema as a string - avroSchema, err := DefineAvroSchema(dstTableName, dstTableMetadata) + avroSchema, err := DefineAvroSchema(dstTableName, dstTableMetadata, syncedAtCol) if err != nil { return 0, fmt.Errorf("failed to define Avro schema: %w", err) } @@ -137,9 +138,13 @@ func (s *QRepAvroSyncMethod) SyncQRepRecords( // Start a transaction stmts := []string{"BEGIN TRANSACTION;"} + selector := "*" + if syncedAtCol != "" { // PeerDB column + selector = "*, CURRENT_TIMESTAMP" + } // Insert the records from the staging table into the destination table - insertStmt := fmt.Sprintf("INSERT INTO `%s.%s` SELECT * FROM `%s.%s`;", - datasetID, dstTableName, datasetID, stagingTable) + insertStmt := fmt.Sprintf("INSERT INTO `%s.%s` SELECT %s FROM `%s.%s`;", + datasetID, dstTableName, selector, datasetID, stagingTable) stmts = append(stmts, insertStmt) @@ -181,11 +186,15 @@ type AvroSchema struct { func DefineAvroSchema(dstTableName string, dstTableMetadata *bigquery.TableMetadata, + syncedAtCol string, ) (*model.QRecordAvroSchemaDefinition, error) { avroFields := []AvroField{} nullableFields := make(map[string]struct{}) for _, bqField := range dstTableMetadata.Schema { + if bqField.Name == syncedAtCol { + continue + } avroType, err := GetAvroType(bqField) if err != nil { return nil, err diff --git a/flow/connectors/postgres/client.go b/flow/connectors/postgres/client.go index 77a5413de7..5e00ca8a7f 100644 --- a/flow/connectors/postgres/client.go +++ b/flow/connectors/postgres/client.go @@ -15,6 +15,7 @@ import ( "github.com/jackc/pglogrepl" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgtype" + "github.com/lib/pq/oid" "golang.org/x/exp/maps" ) @@ -58,7 +59,7 @@ const ( INSERT (%s) VALUES (%s) %s WHEN MATCHED AND src._peerdb_record_type=2 THEN - DELETE` + %s` fallbackUpsertStatementSQL = `WITH src_rank AS ( SELECT _peerdb_data,_peerdb_record_type,_peerdb_unchanged_toast_columns, RANK() OVER (PARTITION BY %s ORDER BY _peerdb_timestamp DESC) AS _peerdb_rank @@ -71,12 +72,21 @@ const ( RANK() OVER (PARTITION BY %s ORDER BY _peerdb_timestamp DESC) AS _peerdb_rank FROM %s.%s WHERE _peerdb_batch_id>$1 AND _peerdb_batch_id<=$2 AND _peerdb_destination_table_name=$3 ) - DELETE FROM %s USING src_rank WHERE %s AND src_rank._peerdb_rank=1 AND src_rank._peerdb_record_type=2` + %s src_rank WHERE %s AND src_rank._peerdb_rank=1 AND src_rank._peerdb_record_type=2` dropTableIfExistsSQL = "DROP TABLE IF EXISTS %s.%s" deleteJobMetadataSQL = "DELETE FROM %s.%s WHERE MIRROR_JOB_NAME=$1" ) +type ReplicaIdentityType rune + +const ( + ReplicaIdentityDefault ReplicaIdentityType = 'd' + ReplicaIdentityFull = 'f' + ReplicaIdentityIndex = 'i' + ReplicaIdentityNothing = 'n' +) + // getRelIDForTable returns the relation ID for a table. func (c *PostgresConnector) getRelIDForTable(schemaTable *utils.SchemaTable) (uint32, error) { var relID pgtype.Uint32 @@ -92,10 +102,10 @@ func (c *PostgresConnector) getRelIDForTable(schemaTable *utils.SchemaTable) (ui } // getReplicaIdentity returns the replica identity for a table. -func (c *PostgresConnector) isTableFullReplica(schemaTable *utils.SchemaTable) (bool, error) { +func (c *PostgresConnector) getReplicaIdentityType(schemaTable *utils.SchemaTable) (ReplicaIdentityType, error) { relID, relIDErr := c.getRelIDForTable(schemaTable) if relIDErr != nil { - return false, fmt.Errorf("failed to get relation id for table %s: %w", schemaTable, relIDErr) + return ReplicaIdentityDefault, fmt.Errorf("failed to get relation id for table %s: %w", schemaTable, relIDErr) } var replicaIdentity rune @@ -103,43 +113,76 @@ func (c *PostgresConnector) isTableFullReplica(schemaTable *utils.SchemaTable) ( `SELECT relreplident FROM pg_class WHERE oid = $1;`, relID).Scan(&replicaIdentity) if err != nil { - return false, fmt.Errorf("error getting replica identity for table %s: %w", schemaTable, err) + return ReplicaIdentityDefault, fmt.Errorf("error getting replica identity for table %s: %w", schemaTable, err) } - return string(replicaIdentity) == "f", nil + + return ReplicaIdentityType(replicaIdentity), nil } -// getPrimaryKeyColumns for table returns the primary key column for a given table -// errors if there is no primary key column or if there is more than one primary key column. -func (c *PostgresConnector) getPrimaryKeyColumns(schemaTable *utils.SchemaTable) ([]string, error) { +// getPrimaryKeyColumns returns the primary key columns for a given table. +// Errors if there is no primary key column or if there is more than one primary key column. +func (c *PostgresConnector) getPrimaryKeyColumns( + replicaIdentity ReplicaIdentityType, + schemaTable *utils.SchemaTable, +) ([]string, error) { relID, err := c.getRelIDForTable(schemaTable) if err != nil { return nil, fmt.Errorf("failed to get relation id for table %s: %w", schemaTable, err) } - // Get the primary key column name - var pkCol pgtype.Text - pkCols := make([]string, 0) + if replicaIdentity == ReplicaIdentityIndex { + return c.getReplicaIdentityIndexColumns(relID, schemaTable) + } + + // Find the primary key index OID + var pkIndexOID oid.Oid + err = c.pool.QueryRow(c.ctx, + `SELECT indexrelid FROM pg_index WHERE indrelid = $1 AND indisprimary`, + relID).Scan(&pkIndexOID) + if err != nil { + return nil, fmt.Errorf("error finding primary key index for table %s: %w", schemaTable, err) + } + + return c.getColumnNamesForIndex(pkIndexOID) +} + +// getReplicaIdentityIndexColumns returns the columns used in the replica identity index. +func (c *PostgresConnector) getReplicaIdentityIndexColumns(relID uint32, schemaTable *utils.SchemaTable) ([]string, error) { + var indexRelID oid.Oid + // Fetch the OID of the index used as the replica identity + err := c.pool.QueryRow(c.ctx, + `SELECT indexrelid FROM pg_index + WHERE indrelid = $1 AND indisreplident = true`, + relID).Scan(&indexRelID) + if err != nil { + return nil, fmt.Errorf("error finding replica identity index for table %s: %w", schemaTable, err) + } + + return c.getColumnNamesForIndex(indexRelID) +} + +// getColumnNamesForIndex returns the column names for a given index. +func (c *PostgresConnector) getColumnNamesForIndex(indexOID oid.Oid) ([]string, error) { + var col pgtype.Text + cols := make([]string, 0) rows, err := c.pool.Query(c.ctx, `SELECT a.attname FROM pg_index i JOIN pg_attribute a ON a.attrelid = i.indrelid AND a.attnum = ANY(i.indkey) - WHERE i.indrelid = $1 AND i.indisprimary ORDER BY a.attname ASC`, - relID) + WHERE i.indexrelid = $1 ORDER BY a.attname ASC`, + indexOID) if err != nil { - return nil, fmt.Errorf("error getting primary key column for table %s: %w", schemaTable, err) + return nil, fmt.Errorf("error getting columns for index %v: %w", indexOID, err) } defer rows.Close() - for { - if !rows.Next() { - break - } - err = rows.Scan(&pkCol) + + for rows.Next() { + err = rows.Scan(&col) if err != nil { - return nil, fmt.Errorf("error scanning primary key column for table %s: %w", schemaTable, err) + return nil, fmt.Errorf("error scanning column for index %v: %w", indexOID, err) } - pkCols = append(pkCols, pkCol.String) + cols = append(cols, col.String) } - - return pkCols, nil + return cols, nil } func (c *PostgresConnector) tableExists(schemaTable *utils.SchemaTable) (bool, error) { @@ -346,15 +389,28 @@ func getRawTableIdentifier(jobName string) string { return fmt.Sprintf("%s_%s", rawTablePrefix, strings.ToLower(jobName)) } -func generateCreateTableSQLForNormalizedTable(sourceTableIdentifier string, +func generateCreateTableSQLForNormalizedTable( + sourceTableIdentifier string, sourceTableSchema *protos.TableSchema, + softDeleteColName string, + syncedAtColName string, ) string { - createTableSQLArray := make([]string, 0, len(sourceTableSchema.Columns)) + createTableSQLArray := make([]string, 0, len(sourceTableSchema.Columns)+2) for columnName, genericColumnType := range sourceTableSchema.Columns { createTableSQLArray = append(createTableSQLArray, fmt.Sprintf("\"%s\" %s,", columnName, qValueKindToPostgresType(genericColumnType))) } + if softDeleteColName != "" { + createTableSQLArray = append(createTableSQLArray, + fmt.Sprintf(`"%s" BOOL DEFAULT FALSE,`, softDeleteColName)) + } + + if syncedAtColName != "" { + createTableSQLArray = append(createTableSQLArray, + fmt.Sprintf(`"%s" TIMESTAMP DEFAULT CURRENT_TIMESTAMP,`, syncedAtColName)) + } + // add composite primary key to the table if len(sourceTableSchema.PrimaryKeyColumns) > 0 { primaryKeyColsQuoted := make([]string, 0, len(sourceTableSchema.PrimaryKeyColumns)) @@ -523,17 +579,19 @@ func (c *PostgresConnector) getTableNametoUnchangedCols(flowJobName string, sync func (c *PostgresConnector) generateNormalizeStatements(destinationTableIdentifier string, unchangedToastColumns []string, rawTableIdentifier string, supportsMerge bool, + peerdbCols *protos.PeerDBColumns, ) []string { if supportsMerge { - return []string{c.generateMergeStatement(destinationTableIdentifier, unchangedToastColumns, rawTableIdentifier)} + return []string{c.generateMergeStatement(destinationTableIdentifier, unchangedToastColumns, + rawTableIdentifier, peerdbCols)} } c.logger.Warn("Postgres version is not high enough to support MERGE, falling back to UPSERT + DELETE") c.logger.Warn("TOAST columns will not be updated properly, use REPLICA IDENTITY FULL or upgrade Postgres") - return c.generateFallbackStatements(destinationTableIdentifier, rawTableIdentifier) + return c.generateFallbackStatements(destinationTableIdentifier, rawTableIdentifier, peerdbCols) } func (c *PostgresConnector) generateFallbackStatements(destinationTableIdentifier string, - rawTableIdentifier string, + rawTableIdentifier string, peerdbCols *protos.PeerDBColumns, ) []string { normalizedTableSchema := c.tableSchemaMapping[destinationTableIdentifier] columnNames := make([]string, 0, len(normalizedTableSchema.Columns)) @@ -569,20 +627,35 @@ func (c *PostgresConnector) generateFallbackStatements(destinationTableIdentifie parsedDstTable.String(), columnName, columnCast)) } deleteWhereClauseSQL := strings.TrimSuffix(strings.Join(deleteWhereClauseArray, ""), "AND ") - + deletePart := fmt.Sprintf( + "DELETE FROM %s USING", + parsedDstTable.String()) + + if peerdbCols.SoftDelete { + deletePart = fmt.Sprintf(`UPDATE %s SET "%s" = TRUE`, + parsedDstTable.String(), peerdbCols.SoftDeleteColName) + if peerdbCols.SyncedAtColName != "" { + deletePart = fmt.Sprintf(`%s, "%s" = CURRENT_TIMESTAMP`, + deletePart, peerdbCols.SyncedAtColName) + } + deletePart += " FROM" + } fallbackUpsertStatement := fmt.Sprintf(fallbackUpsertStatementSQL, strings.TrimSuffix(strings.Join(maps.Values(primaryKeyColumnCasts), ","), ","), c.metadataSchema, rawTableIdentifier, parsedDstTable.String(), insertColumnsSQL, flattenedCastsSQL, strings.Join(normalizedTableSchema.PrimaryKeyColumns, ","), updateColumnsSQL) fallbackDeleteStatement := fmt.Sprintf(fallbackDeleteStatementSQL, strings.Join(maps.Values(primaryKeyColumnCasts), ","), c.metadataSchema, - rawTableIdentifier, parsedDstTable.String(), deleteWhereClauseSQL) + rawTableIdentifier, deletePart, deleteWhereClauseSQL) return []string{fallbackUpsertStatement, fallbackDeleteStatement} } -func (c *PostgresConnector) generateMergeStatement(destinationTableIdentifier string, unchangedToastColumns []string, +func (c *PostgresConnector) generateMergeStatement( + destinationTableIdentifier string, + unchangedToastColumns []string, rawTableIdentifier string, + peerdbCols *protos.PeerDBColumns, ) string { normalizedTableSchema := c.tableSchemaMapping[destinationTableIdentifier] columnNames := maps.Keys(normalizedTableSchema.Columns) @@ -612,21 +685,60 @@ func (c *PostgresConnector) generateMergeStatement(destinationTableIdentifier st } } flattenedCastsSQL := strings.TrimSuffix(strings.Join(flattenedCastsSQLArray, ","), ",") - - insertColumnsSQL := strings.TrimSuffix(strings.Join(columnNames, ","), ",") insertValuesSQLArray := make([]string, 0, len(columnNames)) for _, columnName := range columnNames { insertValuesSQLArray = append(insertValuesSQLArray, fmt.Sprintf("src.%s", columnName)) } + + updateStatementsforToastCols := c.generateUpdateStatement(columnNames, unchangedToastColumns, peerdbCols) + // append synced_at column + columnNames = append(columnNames, fmt.Sprintf(`"%s"`, peerdbCols.SyncedAtColName)) + insertColumnsSQL := strings.Join(columnNames, ",") + // fill in synced_at column + insertValuesSQLArray = append(insertValuesSQLArray, "CURRENT_TIMESTAMP") insertValuesSQL := strings.TrimSuffix(strings.Join(insertValuesSQLArray, ","), ",") - updateStatements := c.generateUpdateStatement(columnNames, unchangedToastColumns) - return fmt.Sprintf(mergeStatementSQL, strings.Join(maps.Values(primaryKeyColumnCasts), ","), - c.metadataSchema, rawTableIdentifier, parsedDstTable.String(), flattenedCastsSQL, - strings.Join(primaryKeySelectSQLArray, " AND "), insertColumnsSQL, insertValuesSQL, updateStatements) + if peerdbCols.SoftDelete { + softDeleteInsertColumnsSQL := strings.TrimSuffix(strings.Join(append(columnNames, + fmt.Sprintf(`"%s"`, peerdbCols.SoftDeleteColName)), ","), ",") + softDeleteInsertValuesSQL := strings.Join(append(insertValuesSQLArray, "TRUE"), ",") + + updateStatementsforToastCols = append(updateStatementsforToastCols, + fmt.Sprintf("WHEN NOT MATCHED AND (src._peerdb_record_type = 2) THEN INSERT (%s) VALUES(%s)", + softDeleteInsertColumnsSQL, softDeleteInsertValuesSQL)) + } + updateStringToastCols := strings.Join(updateStatementsforToastCols, "\n") + + deletePart := "DELETE" + if peerdbCols.SoftDelete { + colName := peerdbCols.SoftDeleteColName + deletePart = fmt.Sprintf(`UPDATE SET "%s" = TRUE`, colName) + if peerdbCols.SyncedAtColName != "" { + deletePart = fmt.Sprintf(`%s, "%s" = CURRENT_TIMESTAMP`, + deletePart, peerdbCols.SyncedAtColName) + } + } + + mergeStmt := fmt.Sprintf( + mergeStatementSQL, + strings.Join(maps.Values(primaryKeyColumnCasts), ","), + c.metadataSchema, + rawTableIdentifier, + parsedDstTable.String(), + flattenedCastsSQL, + strings.Join(primaryKeySelectSQLArray, " AND "), + insertColumnsSQL, + insertValuesSQL, + updateStringToastCols, + deletePart, + ) + + return mergeStmt } -func (c *PostgresConnector) generateUpdateStatement(allCols []string, unchangedToastColsLists []string) string { +func (c *PostgresConnector) generateUpdateStatement(allCols []string, + unchangedToastColsLists []string, peerdbCols *protos.PeerDBColumns, +) []string { updateStmts := make([]string, 0, len(unchangedToastColsLists)) for _, cols := range unchangedToastColsLists { @@ -640,13 +752,24 @@ func (c *PostgresConnector) generateUpdateStatement(allCols []string, unchangedT for _, colName := range otherCols { tmpArray = append(tmpArray, fmt.Sprintf("%s=src.%s", colName, colName)) } + // set the synced at column to the current timestamp + if peerdbCols.SyncedAtColName != "" { + tmpArray = append(tmpArray, fmt.Sprintf(`"%s" = CURRENT_TIMESTAMP`, + peerdbCols.SyncedAtColName)) + } + // set soft-deleted to false, tackles insert after soft-delete + if peerdbCols.SoftDeleteColName != "" { + tmpArray = append(tmpArray, fmt.Sprintf(`"%s" = FALSE`, + peerdbCols.SoftDeleteColName)) + } + ssep := strings.Join(tmpArray, ",") updateStmt := fmt.Sprintf(`WHEN MATCHED AND src._peerdb_record_type=1 AND _peerdb_unchanged_toast_columns='%s' THEN UPDATE SET %s `, cols, ssep) updateStmts = append(updateStmts, updateStmt) } - return strings.Join(updateStmts, "\n") + return updateStmts } func (c *PostgresConnector) getCurrentLSN() (pglogrepl.LSN, error) { diff --git a/flow/connectors/postgres/postgres.go b/flow/connectors/postgres/postgres.go index 20dd2a5a71..cad914706f 100644 --- a/flow/connectors/postgres/postgres.go +++ b/flow/connectors/postgres/postgres.go @@ -442,8 +442,13 @@ func (c *PostgresConnector) NormalizeRecords(req *model.NormalizeRecordsRequest) mergeStatementsBatch := &pgx.Batch{} totalRowsAffected := 0 for destinationTableName, unchangedToastCols := range unchangedToastColsMap { + peerdbCols := protos.PeerDBColumns{ + SoftDeleteColName: req.SoftDeleteColName, + SyncedAtColName: req.SyncedAtColName, + SoftDelete: req.SoftDelete, + } normalizeStatements := c.generateNormalizeStatements(destinationTableName, unchangedToastCols, - rawTableIdentifier, supportsMerge) + rawTableIdentifier, supportsMerge, &peerdbCols) for _, normalizeStatement := range normalizeStatements { mergeStatementsBatch.Queue(normalizeStatement, normalizeBatchID, syncBatchID, destinationTableName).Exec( func(ct pgconn.CommandTag) error { @@ -553,12 +558,12 @@ func (c *PostgresConnector) getTableSchemaForTable( return nil, err } - isFullReplica, replErr := c.isTableFullReplica(schemaTable) + replicaIdentityType, replErr := c.getReplicaIdentityType(schemaTable) if replErr != nil { return nil, fmt.Errorf("error getting replica identity for table %s: %w", schemaTable, replErr) } - pKeyCols, err := c.getPrimaryKeyColumns(schemaTable) + pKeyCols, err := c.getPrimaryKeyColumns(replicaIdentityType, schemaTable) if err != nil { return nil, fmt.Errorf("error getting primary key column for table %s: %w", schemaTable, err) } @@ -576,7 +581,7 @@ func (c *PostgresConnector) getTableSchemaForTable( TableIdentifier: tableName, Columns: make(map[string]string), PrimaryKeyColumns: pKeyCols, - IsReplicaIdentityFull: isFullReplica, + IsReplicaIdentityFull: replicaIdentityType == ReplicaIdentityFull, } for _, fieldDescription := range rows.FieldDescriptions() { @@ -634,7 +639,7 @@ func (c *PostgresConnector) SetupNormalizedTables(req *protos.SetupNormalizedTab // convert the column names and types to Postgres types normalizedTableCreateSQL := generateCreateTableSQLForNormalizedTable( - parsedNormalizedTable.String(), tableSchema) + parsedNormalizedTable.String(), tableSchema, req.SoftDeleteColName, req.SyncedAtColName) _, err = createNormalizedTablesTx.Exec(c.ctx, normalizedTableCreateSQL) if err != nil { return nil, fmt.Errorf("error while creating normalized table: %w", err) @@ -726,18 +731,18 @@ func (c *PostgresConnector) EnsurePullability(req *protos.EnsurePullabilityBatch return nil, err } - isFullReplica, replErr := c.isTableFullReplica(schemaTable) + replicaIdentity, replErr := c.getReplicaIdentityType(schemaTable) if replErr != nil { return nil, fmt.Errorf("error getting replica identity for table %s: %w", schemaTable, replErr) } - pKeyCols, err := c.getPrimaryKeyColumns(schemaTable) + pKeyCols, err := c.getPrimaryKeyColumns(replicaIdentity, schemaTable) if err != nil { return nil, fmt.Errorf("error getting primary key column for table %s: %w", schemaTable, err) } // we only allow no primary key if the table has REPLICA IDENTITY FULL - if len(pKeyCols) == 0 && !isFullReplica { + if len(pKeyCols) == 0 && !(replicaIdentity == ReplicaIdentityFull) { return nil, fmt.Errorf("table %s has no primary keys and does not have REPLICA IDENTITY FULL", schemaTable) } diff --git a/flow/connectors/postgres/qrep.go b/flow/connectors/postgres/qrep.go index be8daa903d..ce114b702b 100644 --- a/flow/connectors/postgres/qrep.go +++ b/flow/connectors/postgres/qrep.go @@ -471,7 +471,8 @@ func (c *PostgresConnector) SyncQRepRecords( stagingTableSync := &QRepStagingTableSync{connector: c} return stagingTableSync.SyncQRepRecords( - config.FlowJobName, dstTable, partition, stream, config.WriteMode) + config.FlowJobName, dstTable, partition, stream, + config.WriteMode, config.SyncedAtColName) } // SetupQRepMetadataTables function for postgres connector diff --git a/flow/connectors/postgres/qrep_sync_method.go b/flow/connectors/postgres/qrep_sync_method.go index a54769e3d8..6725032411 100644 --- a/flow/connectors/postgres/qrep_sync_method.go +++ b/flow/connectors/postgres/qrep_sync_method.go @@ -35,6 +35,7 @@ func (s *QRepStagingTableSync) SyncQRepRecords( partition *protos.QRepPartition, stream *model.QRecordStream, writeMode *protos.QRepWriteMode, + syncedAtCol string, ) (int, error) { syncLog := slog.Group("sync-qrep-log", slog.String(string(shared.FlowNameKey), flowJobName), @@ -81,6 +82,19 @@ func (s *QRepStagingTableSync) SyncQRepRecords( if err != nil { return -1, fmt.Errorf("failed to copy records into destination table: %v", err) } + + if syncedAtCol != "" { + updateSyncedAtStmt := fmt.Sprintf( + `UPDATE %s SET "%s" = CURRENT_TIMESTAMP WHERE "%s" IS NULL;`, + pgx.Identifier{dstTableName.Schema, dstTableName.Table}.Sanitize(), + syncedAtCol, + syncedAtCol, + ) + _, err = tx.Exec(context.Background(), updateSyncedAtStmt) + if err != nil { + return -1, fmt.Errorf("failed to update synced_at column: %v", err) + } + } } else { // Step 2.1: Create a temp staging table stagingTableName := fmt.Sprintf("_peerdb_staging_%s", shared.RandomString(8)) @@ -128,16 +142,18 @@ func (s *QRepStagingTableSync) SyncQRepRecords( } selectStrArray = append(selectStrArray, fmt.Sprintf(`"%s"`, col)) } - + setClauseArray = append(setClauseArray, + fmt.Sprintf(`"%s" = CURRENT_TIMESTAMP`, syncedAtCol)) setClause := strings.Join(setClauseArray, ",") - selectStr := strings.Join(selectStrArray, ",") + selectSQL := strings.Join(selectStrArray, ",") // Step 2.3: Perform the upsert operation, ON CONFLICT UPDATE upsertStmt := fmt.Sprintf( - "INSERT INTO %s (%s) SELECT %s FROM %s ON CONFLICT (%s) DO UPDATE SET %s;", + `INSERT INTO %s (%s, "%s") SELECT %s, CURRENT_TIMESTAMP FROM %s ON CONFLICT (%s) DO UPDATE SET %s;`, dstTableIdentifier.Sanitize(), - selectStr, - selectStr, + selectSQL, + syncedAtCol, + selectSQL, stagingTableIdentifier.Sanitize(), strings.Join(writeMode.UpsertKeyColumns, ", "), setClause, diff --git a/flow/connectors/snowflake/qrep.go b/flow/connectors/snowflake/qrep.go index b099a54ff4..98d20b63ff 100644 --- a/flow/connectors/snowflake/qrep.go +++ b/flow/connectors/snowflake/qrep.go @@ -249,7 +249,7 @@ func (c *SnowflakeConnector) createExternalStage(stageName string, config *proto } func (c *SnowflakeConnector) ConsolidateQRepPartitions(config *protos.QRepConfig) error { - c.logger.Error("Consolidating partitions") + c.logger.Info("Consolidating partitions") destTable := config.DestinationTableIdentifier stageName := c.getStageNameForJob(config.FlowJobName) @@ -272,7 +272,7 @@ func (c *SnowflakeConnector) ConsolidateQRepPartitions(config *protos.QRepConfig // CleanupQRepFlow function for snowflake connector func (c *SnowflakeConnector) CleanupQRepFlow(config *protos.QRepConfig) error { - c.logger.Error("Cleaning up flow job") + c.logger.Info("Cleaning up flow job") return c.dropStage(config.StagingPath, config.FlowJobName) } diff --git a/flow/connectors/snowflake/qrep_avro_sync.go b/flow/connectors/snowflake/qrep_avro_sync.go index eb83b554b1..7184898ae3 100644 --- a/flow/connectors/snowflake/qrep_avro_sync.go +++ b/flow/connectors/snowflake/qrep_avro_sync.go @@ -300,6 +300,7 @@ func (s *SnowflakeAvroSyncMethod) putFileToStage(avroFile *avro.AvroFile, stage func (c *SnowflakeConnector) GetCopyTransformation( dstTableName string, + syncedAtCol string, ) (*CopyInfo, error) { colInfo, colsErr := c.getColsFromTable(dstTableName) if colsErr != nil { @@ -310,6 +311,10 @@ func (c *SnowflakeConnector) GetCopyTransformation( columnOrder := make([]string, 0, len(colInfo.ColumnMap)) for colName, colType := range colInfo.ColumnMap { columnOrder = append(columnOrder, fmt.Sprintf("\"%s\"", colName)) + if colName == syncedAtCol { + transformations = append(transformations, fmt.Sprintf("CURRENT_TIMESTAMP AS \"%s\"", colName)) + continue + } switch colType { case "GEOGRAPHY": transformations = append(transformations, @@ -354,7 +359,7 @@ func CopyStageToDestination( } } - copyTransformation, err := connector.GetCopyTransformation(dstTableName) + copyTransformation, err := connector.GetCopyTransformation(dstTableName, config.SyncedAtColName) if err != nil { return fmt.Errorf("failed to get copy transformation: %w", err) } diff --git a/flow/connectors/snowflake/snowflake.go b/flow/connectors/snowflake/snowflake.go index 90b29e4765..f92ed3e33e 100644 --- a/flow/connectors/snowflake/snowflake.go +++ b/flow/connectors/snowflake/snowflake.go @@ -751,7 +751,7 @@ func generateCreateTableSQLForNormalizedTable( softDeleteColName string, syncedAtColName string, ) string { - createTableSQLArray := make([]string, 0, len(sourceTableSchema.Columns)) + createTableSQLArray := make([]string, 0, len(sourceTableSchema.Columns)+2) for columnName, genericColumnType := range sourceTableSchema.Columns { columnNameUpper := strings.ToUpper(columnName) sfColType, err := qValueKindToSnowflakeType(qvalue.QValueKind(genericColumnType)) @@ -847,17 +847,21 @@ func (c *SnowflakeConnector) generateAndExecuteMergeStatement( for _, columnName := range columnNames { quotedUpperColNames = append(quotedUpperColNames, fmt.Sprintf(`"%s"`, strings.ToUpper(columnName))) } + // append synced_at column + quotedUpperColNames = append(quotedUpperColNames, + fmt.Sprintf(`"%s"`, strings.ToUpper(normalizeReq.SyncedAtColName)), + ) insertColumnsSQL := strings.TrimSuffix(strings.Join(quotedUpperColNames, ","), ",") insertValuesSQLArray := make([]string, 0, len(columnNames)) for _, columnName := range columnNames { quotedUpperColumnName := fmt.Sprintf(`"%s"`, strings.ToUpper(columnName)) - insertValuesSQLArray = append(insertValuesSQLArray, fmt.Sprintf("SOURCE.%s,", quotedUpperColumnName)) + insertValuesSQLArray = append(insertValuesSQLArray, fmt.Sprintf("SOURCE.%s", quotedUpperColumnName)) } - - insertValuesSQL := strings.TrimSuffix(strings.Join(insertValuesSQLArray, ""), ",") - + // fill in synced_at column + insertValuesSQLArray = append(insertValuesSQLArray, "CURRENT_TIMESTAMP") + insertValuesSQL := strings.Join(insertValuesSQLArray, ",") updateStatementsforToastCols := c.generateUpdateStatements(normalizeReq.SyncedAtColName, normalizeReq.SoftDeleteColName, normalizeReq.SoftDelete, columnNames, unchangedToastColumns) @@ -866,10 +870,9 @@ func (c *SnowflakeConnector) generateAndExecuteMergeStatement( // with soft-delete, we want the row to be in the destination with SOFT_DELETE true // the current merge statement doesn't do that, so we add another case to insert the DeleteRecord if normalizeReq.SoftDelete { - softDeleteInsertColumnsSQL := strings.TrimSuffix(strings.Join(append(quotedUpperColNames, - normalizeReq.SoftDeleteColName), ","), ",") - softDeleteInsertValuesSQL := strings.Join(append(insertValuesSQLArray, "TRUE"), "") - + softDeleteInsertColumnsSQL := strings.Join(append(quotedUpperColNames, + normalizeReq.SoftDeleteColName), ",") + softDeleteInsertValuesSQL := insertValuesSQL + ",TRUE" updateStatementsforToastCols = append(updateStatementsforToastCols, fmt.Sprintf("WHEN NOT MATCHED AND (SOURCE._PEERDB_RECORD_TYPE = 2) THEN INSERT (%s) VALUES(%s)", softDeleteInsertColumnsSQL, softDeleteInsertValuesSQL)) diff --git a/flow/e2e/bigquery/peer_flow_bq_test.go b/flow/e2e/bigquery/peer_flow_bq_test.go index de3ddae7e5..b28577f4d3 100644 --- a/flow/e2e/bigquery/peer_flow_bq_test.go +++ b/flow/e2e/bigquery/peer_flow_bq_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/PeerDB-io/peer-flow/e2e" + "github.com/PeerDB-io/peer-flow/model/qvalue" "github.com/PeerDB-io/peer-flow/shared" peerflow "github.com/PeerDB-io/peer-flow/workflows" "github.com/jackc/pgx/v5/pgxpool" @@ -51,6 +52,50 @@ func (s PeerFlowE2ETestSuiteBQ) attachSuffix(input string) string { return fmt.Sprintf("%s_%s", input, s.bqSuffix) } +func (s *PeerFlowE2ETestSuiteBQ) checkPeerdbColumns(dstQualified string, softDelete bool) error { + qualifiedTableName := fmt.Sprintf("`%s.%s`", s.bqHelper.Config.DatasetId, dstQualified) + selector := "`_PEERDB_SYNCED_AT`" + if softDelete { + selector += ", `_PEERDB_IS_DELETED`" + } + query := fmt.Sprintf("SELECT %s FROM %s", + selector, qualifiedTableName) + + recordBatch, err := s.bqHelper.ExecuteAndProcessQuery(query) + if err != nil { + return err + } + + recordCount := 0 + + for _, record := range recordBatch.Records { + for _, entry := range record.Entries { + if entry.Kind == qvalue.QValueKindBoolean { + isDeleteVal, ok := entry.Value.(bool) + if !(ok && isDeleteVal) { + return fmt.Errorf("peerdb column failed: _PEERDB_IS_DELETED is not true") + } + recordCount += 1 + } + + if entry.Kind == qvalue.QValueKindTimestamp { + _, ok := entry.Value.(time.Time) + if !ok { + return fmt.Errorf("peerdb column failed: _PEERDB_SYNCED_AT is not valid") + } + + recordCount += 1 + } + } + } + + if recordCount == 0 { + return fmt.Errorf("peerdb column check failed: no records found") + } + + return nil +} + // setupBigQuery sets up the bigquery connection. func setupBigQuery(t *testing.T) *BigQueryTestHelper { bqHelper, err := NewBigQueryTestHelper() @@ -1095,3 +1140,66 @@ func (s PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_Toast_2_BQ() { env.AssertExpectations(s.t) } + +func (s PeerFlowE2ETestSuiteBQ) Test_Columns_BQ() { + env := e2e.NewTemporalTestWorkflowEnvironment() + e2e.RegisterWorkflowsAndActivities(env, s.t) + + srcTableName := s.attachSchemaSuffix("test_peerdb_cols") + dstTableName := "test_peerdb_cols_dst" + _, err := s.pool.Exec(context.Background(), fmt.Sprintf(` + CREATE TABLE IF NOT EXISTS %s ( + id SERIAL PRIMARY KEY, + key TEXT NOT NULL, + value TEXT NOT NULL + ); + `, srcTableName)) + require.NoError(s.t, err) + + connectionGen := e2e.FlowConnectionGenerationConfig{ + FlowJobName: s.attachSuffix("test_peerdb_cols_mirror"), + TableNameMapping: map[string]string{srcTableName: dstTableName}, + PostgresPort: e2e.PostgresPort, + Destination: s.bqHelper.Peer, + SoftDelete: true, + } + + flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() + require.NoError(s.t, err) + + limits := peerflow.CDCFlowLimits{ + ExitAfterRecords: 2, + MaxBatchSize: 100, + } + + go func() { + e2e.SetupCDCFlowStatusQuery(env, connectionGen) + // insert 1 row into the source table + testKey := fmt.Sprintf("test_key_%d", 1) + testValue := fmt.Sprintf("test_value_%d", 1) + _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` + INSERT INTO %s(key, value) VALUES ($1, $2) + `, srcTableName), testKey, testValue) + require.NoError(s.t, err) + + // delete that row + _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` + DELETE FROM %s WHERE id=1 + `, srcTableName)) + require.NoError(s.t, err) + }() + + env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) + + // Verify workflow completes without error + s.True(env.IsWorkflowCompleted()) + err = env.GetWorkflowError() + + // allow only continue as new error + require.Contains(s.t, err.Error(), "continue as new") + + err = s.checkPeerdbColumns(dstTableName, true) + require.NoError(s.t, err) + + env.AssertExpectations(s.t) +} diff --git a/flow/e2e/bigquery/qrep_flow_bq_test.go b/flow/e2e/bigquery/qrep_flow_bq_test.go index f520014b04..ca74a412c3 100644 --- a/flow/e2e/bigquery/qrep_flow_bq_test.go +++ b/flow/e2e/bigquery/qrep_flow_bq_test.go @@ -10,7 +10,7 @@ import ( ) func (s PeerFlowE2ETestSuiteBQ) setupSourceTable(tableName string, rowCount int) { - err := e2e.CreateSourceTableQRep(s.pool, s.bqSuffix, tableName) + err := e2e.CreateTableForQRep(s.pool, s.bqSuffix, tableName) require.NoError(s.t, err) err = e2e.PopulateSourceTable(s.pool, s.bqSuffix, tableName, rowCount) require.NoError(s.t, err) @@ -64,6 +64,8 @@ func (s PeerFlowE2ETestSuiteBQ) Test_Complete_QRep_Flow_Avro() { tblName, query, s.bqHelper.Peer, + "", + false, "") require.NoError(s.t, err) e2e.RunQrepFlowWorkflow(env, qrepConfig) @@ -78,3 +80,38 @@ func (s PeerFlowE2ETestSuiteBQ) Test_Complete_QRep_Flow_Avro() { env.AssertExpectations(s.t) } + +func (s PeerFlowE2ETestSuiteBQ) Test_PeerDB_Columns_QRep_BQ() { + env := e2e.NewTemporalTestWorkflowEnvironment() + e2e.RegisterWorkflowsAndActivities(env, s.t) + + numRows := 10 + + tblName := "test_columns_bq_qrep" + s.setupSourceTable(tblName, numRows) + + query := fmt.Sprintf("SELECT * FROM e2e_test_%s.%s WHERE updated_at BETWEEN {{.start}} AND {{.end}}", + s.bqSuffix, tblName) + + qrepConfig, err := e2e.CreateQRepWorkflowConfig("test_qrep_flow_avro", + fmt.Sprintf("e2e_test_%s.%s", s.bqSuffix, tblName), + tblName, + query, + s.bqHelper.Peer, + "", + true, + "_PEERDB_SYNCED_AT") + require.NoError(s.t, err) + e2e.RunQrepFlowWorkflow(env, qrepConfig) + + // Verify workflow completes without error + s.True(env.IsWorkflowCompleted()) + + err = env.GetWorkflowError() + require.NoError(s.t, err) + + err = s.checkPeerdbColumns(tblName, false) + require.NoError(s.t, err) + + env.AssertExpectations(s.t) +} diff --git a/flow/e2e/congen.go b/flow/e2e/congen.go index ac28879f45..e881dd5ead 100644 --- a/flow/e2e/congen.go +++ b/flow/e2e/congen.go @@ -171,6 +171,7 @@ type FlowConnectionGenerationConfig struct { PostgresPort int Destination *protos.Peer CdcStagingPath string + SoftDelete bool } // GenerateSnowflakePeer generates a snowflake peer config for testing. @@ -201,7 +202,10 @@ func (c *FlowConnectionGenerationConfig) GenerateFlowConnectionConfigs() (*proto ret.Source = GeneratePostgresPeer(c.PostgresPort) ret.Destination = c.Destination ret.CdcStagingPath = c.CdcStagingPath - ret.SoftDeleteColName = "_PEERDB_IS_DELETED" + ret.SoftDelete = c.SoftDelete + if ret.SoftDelete { + ret.SoftDeleteColName = "_PEERDB_IS_DELETED" + } ret.SyncedAtColName = "_PEERDB_SYNCED_AT" return ret, nil } diff --git a/flow/e2e/postgres/peer_flow_pg_test.go b/flow/e2e/postgres/peer_flow_pg_test.go index 2720891fb6..da050ccf64 100644 --- a/flow/e2e/postgres/peer_flow_pg_test.go +++ b/flow/e2e/postgres/peer_flow_pg_test.go @@ -8,6 +8,7 @@ import ( "github.com/PeerDB-io/peer-flow/generated/protos" "github.com/PeerDB-io/peer-flow/model/qvalue" peerflow "github.com/PeerDB-io/peer-flow/workflows" + "github.com/jackc/pgx/v5/pgtype" ) func (s *PeerFlowE2ETestSuitePG) attachSchemaSuffix(tableName string) string { @@ -18,6 +19,27 @@ func (s *PeerFlowE2ETestSuitePG) attachSuffix(input string) string { return fmt.Sprintf("%s_%s", input, postgresSuffix) } +func (s *PeerFlowE2ETestSuitePG) checkPeerdbColumns(dstSchemaQualified string, rowID int8) error { + query := fmt.Sprintf(`SELECT "_PEERDB_IS_DELETED","_PEERDB_SYNCED_AT" FROM %s WHERE id = %d`, + dstSchemaQualified, rowID) + var isDeleted pgtype.Bool + var syncedAt pgtype.Timestamp + err := s.pool.QueryRow(context.Background(), query).Scan(&isDeleted, &syncedAt) + if err != nil { + return fmt.Errorf("failed to query row: %w", err) + } + + if !isDeleted.Bool { + return fmt.Errorf("isDeleted is not true") + } + + if !syncedAt.Valid { + return fmt.Errorf("syncedAt is not valid") + } + + return nil +} + func (s *PeerFlowE2ETestSuitePG) Test_Simple_Flow_PG() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env, s.T()) @@ -474,3 +496,67 @@ func (s *PeerFlowE2ETestSuitePG) Test_Composite_PKey_Toast_2_PG() { env.AssertExpectations(s.T()) } + +func (s *PeerFlowE2ETestSuitePG) Test_PeerDB_Columns() { + env := s.NewTestWorkflowEnvironment() + e2e.RegisterWorkflowsAndActivities(env, s.T()) + + srcTableName := s.attachSchemaSuffix("test_peerdb_cols") + dstTableName := s.attachSchemaSuffix("test_peerdb_cols_dst") + + _, err := s.pool.Exec(context.Background(), fmt.Sprintf(` + CREATE TABLE IF NOT EXISTS %s ( + id SERIAL PRIMARY KEY, + key TEXT NOT NULL, + value TEXT NOT NULL + ); + `, srcTableName)) + s.NoError(err) + + connectionGen := e2e.FlowConnectionGenerationConfig{ + FlowJobName: s.attachSuffix("test_peerdb_cols_mirror"), + TableNameMapping: map[string]string{srcTableName: dstTableName}, + PostgresPort: e2e.PostgresPort, + Destination: s.peer, + SoftDelete: true, + } + + flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() + s.NoError(err) + + limits := peerflow.CDCFlowLimits{ + ExitAfterRecords: 2, + MaxBatchSize: 100, + } + + go func() { + e2e.SetupCDCFlowStatusQuery(env, connectionGen) + // insert 1 row into the source table + testKey := fmt.Sprintf("test_key_%d", 1) + testValue := fmt.Sprintf("test_value_%d", 1) + _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` + INSERT INTO %s(key, value) VALUES ($1, $2) + `, srcTableName), testKey, testValue) + s.NoError(err) + + // delete that row + _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` + DELETE FROM %s WHERE id=1 + `, srcTableName)) + s.NoError(err) + fmt.Println("Inserted and deleted a row for peerdb column check") + }() + + env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) + + // Verify workflow completes without error + s.True(env.IsWorkflowCompleted()) + + err = env.GetWorkflowError() + // allow only continue as new error + s.Error(err) + s.Contains(err.Error(), "continue as new") + checkErr := s.checkPeerdbColumns(dstTableName, 1) + s.NoError(checkErr) + env.AssertExpectations(s.T()) +} diff --git a/flow/e2e/postgres/qrep_flow_pg_test.go b/flow/e2e/postgres/qrep_flow_pg_test.go index 192863e397..1c86c973b9 100644 --- a/flow/e2e/postgres/qrep_flow_pg_test.go +++ b/flow/e2e/postgres/qrep_flow_pg_test.go @@ -10,6 +10,7 @@ import ( connpostgres "github.com/PeerDB-io/peer-flow/connectors/postgres" "github.com/PeerDB-io/peer-flow/e2e" "github.com/PeerDB-io/peer-flow/generated/protos" + "github.com/jackc/pgx/v5/pgtype" "github.com/jackc/pgx/v5/pgxpool" "github.com/joho/godotenv" "github.com/stretchr/testify/suite" @@ -67,7 +68,7 @@ func (s *PeerFlowE2ETestSuitePG) TearDownSuite() { } func (s *PeerFlowE2ETestSuitePG) setupSourceTable(tableName string, rowCount int) { - err := e2e.CreateSourceTableQRep(s.pool, postgresSuffix, tableName) + err := e2e.CreateTableForQRep(s.pool, postgresSuffix, tableName) s.NoError(err) err = e2e.PopulateSourceTable(s.pool, postgresSuffix, tableName, rowCount) s.NoError(err) @@ -134,6 +135,27 @@ func (s *PeerFlowE2ETestSuitePG) compareQuery(srcSchemaQualified, dstSchemaQuali return nil } +func (s *PeerFlowE2ETestSuitePG) checkSyncedAt(dstSchemaQualified string) error { + query := fmt.Sprintf(`SELECT "_PEERDB_SYNCED_AT" FROM %s`, dstSchemaQualified) + + rows, _ := s.pool.Query(context.Background(), query) + + defer rows.Close() + for rows.Next() { + var syncedAt pgtype.Timestamp + err := rows.Scan(&syncedAt) + if err != nil { + return err + } + + if !syncedAt.Valid { + return fmt.Errorf("synced_at is not valid") + } + } + + return rows.Err() +} + func (s *PeerFlowE2ETestSuitePG) Test_Complete_QRep_Flow_Multi_Insert_PG() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env, s.T()) @@ -146,8 +168,8 @@ func (s *PeerFlowE2ETestSuitePG) Test_Complete_QRep_Flow_Multi_Insert_PG() { //nolint:gosec dstTable := "test_qrep_flow_avro_pg_2" - // the name is misleading, but this is the destination table - err := e2e.CreateSourceTableQRep(s.pool, postgresSuffix, dstTable) + + err := e2e.CreateTableForQRep(s.pool, postgresSuffix, dstTable) s.NoError(err) srcSchemaQualified := fmt.Sprintf("%s_%s.%s", "e2e_test", postgresSuffix, srcTable) @@ -165,6 +187,8 @@ func (s *PeerFlowE2ETestSuitePG) Test_Complete_QRep_Flow_Multi_Insert_PG() { query, postgresPeer, "", + true, + "", ) s.NoError(err) @@ -183,3 +207,52 @@ func (s *PeerFlowE2ETestSuitePG) Test_Complete_QRep_Flow_Multi_Insert_PG() { env.AssertExpectations(s.T()) } + +func (s *PeerFlowE2ETestSuitePG) Test_Setup_Destination_And_PeerDB_Columns_QRep_PG() { + env := s.NewTestWorkflowEnvironment() + e2e.RegisterWorkflowsAndActivities(env, s.T()) + + numRows := 10 + + //nolint:gosec + srcTable := "test_qrep_columns_pg_1" + s.setupSourceTable(srcTable, numRows) + + //nolint:gosec + dstTable := "test_qrep_columns_pg_2" + + srcSchemaQualified := fmt.Sprintf("%s_%s.%s", "e2e_test", postgresSuffix, srcTable) + dstSchemaQualified := fmt.Sprintf("%s_%s.%s", "e2e_test", postgresSuffix, dstTable) + + query := fmt.Sprintf("SELECT * FROM e2e_test_%s.%s WHERE updated_at BETWEEN {{.start}} AND {{.end}}", + postgresSuffix, srcTable) + + postgresPeer := e2e.GeneratePostgresPeer(e2e.PostgresPort) + + qrepConfig, err := e2e.CreateQRepWorkflowConfig( + "test_qrep_columns_pg", + srcSchemaQualified, + dstSchemaQualified, + query, + postgresPeer, + "", + true, + "_PEERDB_SYNCED_AT", + ) + s.NoError(err) + + e2e.RunQrepFlowWorkflow(env, qrepConfig) + + // Verify workflow completes without error + s.True(env.IsWorkflowCompleted()) + + err = env.GetWorkflowError() + s.NoError(err) + + err = s.checkSyncedAt(dstSchemaQualified) + if err != nil { + s.FailNow(err.Error()) + } + + env.AssertExpectations(s.T()) +} diff --git a/flow/e2e/s3/qrep_flow_s3_test.go b/flow/e2e/s3/qrep_flow_s3_test.go index 62523e1adf..fda57ced09 100644 --- a/flow/e2e/s3/qrep_flow_s3_test.go +++ b/flow/e2e/s3/qrep_flow_s3_test.go @@ -30,7 +30,7 @@ func TestPeerFlowE2ETestSuiteS3(t *testing.T) { } func (s *PeerFlowE2ETestSuiteS3) setupSourceTable(tableName string, rowCount int) { - err := e2e.CreateSourceTableQRep(s.pool, s3Suffix, tableName) + err := e2e.CreateTableForQRep(s.pool, s3Suffix, tableName) s.NoError(err) err = e2e.PopulateSourceTable(s.pool, s3Suffix, tableName, rowCount) s.NoError(err) @@ -106,6 +106,8 @@ func (s *PeerFlowE2ETestSuiteS3) Test_Complete_QRep_Flow_S3() { query, s.s3Helper.GetPeer(), "stage", + false, + "", ) s.NoError(err) qrepConfig.StagingPath = s.s3Helper.s3Config.Url @@ -152,6 +154,8 @@ func (s *PeerFlowE2ETestSuiteS3) Test_Complete_QRep_Flow_S3_CTID() { query, s.s3Helper.GetPeer(), "stage", + false, + "", ) s.NoError(err) qrepConfig.StagingPath = s.s3Helper.s3Config.Url diff --git a/flow/e2e/snowflake/peer_flow_sf_test.go b/flow/e2e/snowflake/peer_flow_sf_test.go index 3e6f0c2bc0..8d521dbb72 100644 --- a/flow/e2e/snowflake/peer_flow_sf_test.go +++ b/flow/e2e/snowflake/peer_flow_sf_test.go @@ -198,6 +198,72 @@ func (s PeerFlowE2ETestSuiteSF) Test_Complete_Simple_Flow_SF() { env.AssertExpectations(s.t) } +func (s PeerFlowE2ETestSuiteSF) Test_Flow_ReplicaIdentity_Index_No_Pkey() { + env := e2e.NewTemporalTestWorkflowEnvironment() + e2e.RegisterWorkflowsAndActivities(env, s.t) + + srcTableName := s.attachSchemaSuffix("test_replica_identity_no_pkey") + dstTableName := fmt.Sprintf("%s.%s", s.sfHelper.testSchemaName, "test_replica_identity_no_pkey") + + // Create a table without a primary key and create a named unique index + _, err := s.pool.Exec(context.Background(), fmt.Sprintf(` + CREATE TABLE IF NOT EXISTS %s ( + id SERIAL, + key TEXT NOT NULL, + value TEXT NOT NULL + ); + CREATE UNIQUE INDEX unique_idx_on_id_key ON %s (id, key); + ALTER TABLE %s REPLICA IDENTITY USING INDEX unique_idx_on_id_key; + `, srcTableName, srcTableName, srcTableName)) + require.NoError(s.t, err) + + connectionGen := e2e.FlowConnectionGenerationConfig{ + FlowJobName: s.attachSuffix("test_simple_flow"), + TableNameMapping: map[string]string{srcTableName: dstTableName}, + PostgresPort: e2e.PostgresPort, + Destination: s.sfHelper.Peer, + } + + flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() + require.NoError(s.t, err) + + limits := peerflow.CDCFlowLimits{ + ExitAfterRecords: 20, + MaxBatchSize: 100, + } + + // in a separate goroutine, wait for PeerFlowStatusQuery to finish setup + // and then insert 20 rows into the source table + go func() { + e2e.SetupCDCFlowStatusQuery(env, connectionGen) + // insert 20 rows into the source table + for i := 0; i < 20; i++ { + testKey := fmt.Sprintf("test_key_%d", i) + testValue := fmt.Sprintf("test_value_%d", i) + _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` + INSERT INTO %s (id, key, value) VALUES ($1, $2, $3) + `, srcTableName), i, testKey, testValue) + require.NoError(s.t, err) + } + fmt.Println("Inserted 20 rows into the source table") + }() + + env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) + + // Verify workflow completes without error + s.True(env.IsWorkflowCompleted()) + err = env.GetWorkflowError() + + // allow only continue as new error + require.Contains(s.t, err.Error(), "continue as new") + + count, err := s.sfHelper.CountRows("test_replica_identity_no_pkey") + require.NoError(s.t, err) + s.Equal(20, count) + + env.AssertExpectations(s.t) +} + func (s PeerFlowE2ETestSuiteSF) Test_Invalid_Geo_SF_Avro_CDC() { env := e2e.NewTemporalTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env, s.t) @@ -1176,8 +1242,9 @@ func (s PeerFlowE2ETestSuiteSF) Test_Column_Exclusion() { Exclude: []string{"c2"}, }, }, - Source: e2e.GeneratePostgresPeer(e2e.PostgresPort), - CdcStagingPath: connectionGen.CdcStagingPath, + Source: e2e.GeneratePostgresPeer(e2e.PostgresPort), + CdcStagingPath: connectionGen.CdcStagingPath, + SyncedAtColName: "_PEERDB_SYNCED_AT", } limits := peerflow.CDCFlowLimits{ @@ -1221,7 +1288,7 @@ func (s PeerFlowE2ETestSuiteSF) Test_Column_Exclusion() { for _, field := range sfRows.Schema.Fields { require.NotEqual(s.t, field.Name, "c2") } - s.Equal(4, len(sfRows.Schema.Fields)) + s.Equal(5, len(sfRows.Schema.Fields)) s.Equal(10, len(sfRows.Records)) } @@ -1260,6 +1327,7 @@ func (s PeerFlowE2ETestSuiteSF) Test_Soft_Delete_Basic() { CdcStagingPath: connectionGen.CdcStagingPath, SoftDelete: true, SoftDeleteColName: "_PEERDB_IS_DELETED", + SyncedAtColName: "_PEERDB_SYNCED_AT", } limits := peerflow.CDCFlowLimits{ @@ -1346,6 +1414,7 @@ func (s PeerFlowE2ETestSuiteSF) Test_Soft_Delete_IUD_Same_Batch() { CdcStagingPath: connectionGen.CdcStagingPath, SoftDelete: true, SoftDeleteColName: "_PEERDB_IS_DELETED", + SyncedAtColName: "_PEERDB_SYNCED_AT", } limits := peerflow.CDCFlowLimits{ @@ -1428,6 +1497,7 @@ func (s PeerFlowE2ETestSuiteSF) Test_Soft_Delete_UD_Same_Batch() { CdcStagingPath: connectionGen.CdcStagingPath, SoftDelete: true, SoftDeleteColName: "_PEERDB_IS_DELETED", + SyncedAtColName: "_PEERDB_SYNCED_AT", } limits := peerflow.CDCFlowLimits{ @@ -1513,6 +1583,7 @@ func (s PeerFlowE2ETestSuiteSF) Test_Soft_Delete_Insert_After_Delete() { CdcStagingPath: connectionGen.CdcStagingPath, SoftDelete: true, SoftDeleteColName: "_PEERDB_IS_DELETED", + SyncedAtColName: "_PEERDB_SYNCED_AT", } limits := peerflow.CDCFlowLimits{ diff --git a/flow/e2e/snowflake/qrep_flow_sf_test.go b/flow/e2e/snowflake/qrep_flow_sf_test.go index 49ed3614b9..3ac7fee713 100644 --- a/flow/e2e/snowflake/qrep_flow_sf_test.go +++ b/flow/e2e/snowflake/qrep_flow_sf_test.go @@ -13,7 +13,7 @@ import ( ) func (s PeerFlowE2ETestSuiteSF) setupSourceTable(tableName string, numRows int) { - err := e2e.CreateSourceTableQRep(s.pool, s.pgSuffix, tableName) + err := e2e.CreateTableForQRep(s.pool, s.pgSuffix, tableName) require.NoError(s.t, err) err = e2e.PopulateSourceTable(s.pool, s.pgSuffix, tableName, numRows) require.NoError(s.t, err) @@ -77,6 +77,8 @@ func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF() { query, s.sfHelper.Peer, "", + false, + "", ) require.NoError(s.t, err) @@ -116,6 +118,8 @@ func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF_Upsert_Simple() query, s.sfHelper.Peer, "", + false, + "", ) qrepConfig.WriteMode = &protos.QRepWriteMode{ WriteType: protos.QRepWriteType_QREP_WRITE_MODE_UPSERT, @@ -159,6 +163,8 @@ func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF_S3() { query, s.sfHelper.Peer, "", + false, + "", ) require.NoError(s.t, err) qrepConfig.StagingPath = fmt.Sprintf("s3://peerdb-test-bucket/avro/%s", uuid.New()) @@ -199,6 +205,8 @@ func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF_Upsert_XMIN() { query, s.sfHelper.Peer, "", + false, + "", ) qrepConfig.WriteMode = &protos.QRepWriteMode{ WriteType: protos.QRepWriteType_QREP_WRITE_MODE_UPSERT, @@ -247,6 +255,8 @@ func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF_S3_Integration() sfPeer, "", + false, + "", ) require.NoError(s.t, err) qrepConfig.StagingPath = fmt.Sprintf("s3://peerdb-test-bucket/avro/%s", uuid.New()) @@ -264,3 +274,48 @@ func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF_S3_Integration() env.AssertExpectations(s.t) } + +func (s PeerFlowE2ETestSuiteSF) Test_PeerDB_Columns_QRep_SF() { + env := e2e.NewTemporalTestWorkflowEnvironment() + e2e.RegisterWorkflowsAndActivities(env, s.t) + + numRows := 10 + + tblName := "test_qrep_columns_sf" + s.setupSourceTable(tblName, numRows) + + dstSchemaQualified := fmt.Sprintf("%s.%s", s.sfHelper.testSchemaName, tblName) + + query := fmt.Sprintf("SELECT * FROM e2e_test_%s.%s WHERE updated_at BETWEEN {{.start}} AND {{.end}}", + s.pgSuffix, tblName) + + qrepConfig, err := e2e.CreateQRepWorkflowConfig( + "test_columns_qrep_sf", + fmt.Sprintf("e2e_test_%s.%s", s.pgSuffix, tblName), + dstSchemaQualified, + query, + s.sfHelper.Peer, + "", + true, + "_PEERDB_SYNCED_AT", + ) + qrepConfig.WriteMode = &protos.QRepWriteMode{ + WriteType: protos.QRepWriteType_QREP_WRITE_MODE_UPSERT, + UpsertKeyColumns: []string{"id"}, + } + require.NoError(s.t, err) + + e2e.RunQrepFlowWorkflow(env, qrepConfig) + + // Verify workflow completes without error + s.True(env.IsWorkflowCompleted()) + + err = env.GetWorkflowError() + require.NoError(s.t, err) + + err = s.sfHelper.checkSyncedAt(fmt.Sprintf(`SELECT "_PEERDB_SYNCED_AT" FROM %s.%s`, + s.sfHelper.testSchemaName, tblName)) + require.NoError(s.t, err) + + env.AssertExpectations(s.t) +} diff --git a/flow/e2e/snowflake/snowflake_helper.go b/flow/e2e/snowflake/snowflake_helper.go index 38fefeddc0..0401d34f58 100644 --- a/flow/e2e/snowflake/snowflake_helper.go +++ b/flow/e2e/snowflake/snowflake_helper.go @@ -6,6 +6,7 @@ import ( "fmt" "math/big" "os" + "time" connsnowflake "github.com/PeerDB-io/peer-flow/connectors/snowflake" "github.com/PeerDB-io/peer-flow/e2e" @@ -175,3 +176,25 @@ func (s *SnowflakeTestHelper) RunIntQuery(query string) (int, error) { return 0, fmt.Errorf("failed to execute query: %s, returned value of type %s", query, rec.Entries[0].Kind) } } + +// runs a query that returns an int result +func (s *SnowflakeTestHelper) checkSyncedAt(query string) error { + recordBatch, err := s.testClient.ExecuteAndProcessQuery(query) + if err != nil { + return err + } + + for _, record := range recordBatch.Records { + for _, entry := range record.Entries { + if entry.Kind != qvalue.QValueKindTimestamp { + return fmt.Errorf("synced_at column check failed: _PEERDB_SYNCED_AT is not timestamp") + } + _, ok := entry.Value.(time.Time) + if !ok { + return fmt.Errorf("synced_at column failed: _PEERDB_SYNCED_AT is not valid") + } + } + } + + return nil +} diff --git a/flow/e2e/test_utils.go b/flow/e2e/test_utils.go index 7c12240580..0aa1c12242 100644 --- a/flow/e2e/test_utils.go +++ b/flow/e2e/test_utils.go @@ -123,7 +123,7 @@ func NormalizeFlowCountQuery(env *testsuite.TestWorkflowEnvironment, } } -func CreateSourceTableQRep(pool *pgxpool.Pool, suffix string, tableName string) error { +func CreateTableForQRep(pool *pgxpool.Pool, suffix string, tableName string) error { tblFields := []string{ "id UUID NOT NULL PRIMARY KEY", "card_id UUID", @@ -287,6 +287,8 @@ func CreateQRepWorkflowConfig( query string, dest *protos.Peer, stagingPath string, + setupDst bool, + syncedAtCol string, ) (*protos.QRepConfig, error) { connectionGen := QRepFlowConnectionGenerationConfig{ FlowJobName: flowJobName, @@ -304,6 +306,8 @@ func CreateQRepWorkflowConfig( return nil, err } qrepConfig.InitialCopyOnly = true + qrepConfig.SyncedAtColName = syncedAtCol + qrepConfig.SetupWatermarkTableOnDestination = setupDst return qrepConfig, nil } diff --git a/flow/generated/protos/flow.pb.go b/flow/generated/protos/flow.pb.go index a4a8ff581c..4be91a690f 100644 --- a/flow/generated/protos/flow.pb.go +++ b/flow/generated/protos/flow.pb.go @@ -2669,7 +2669,8 @@ type QRepConfig struct { SetupWatermarkTableOnDestination bool `protobuf:"varint,17,opt,name=setup_watermark_table_on_destination,json=setupWatermarkTableOnDestination,proto3" json:"setup_watermark_table_on_destination,omitempty"` // create new tables with "_peerdb_resync" suffix, perform initial load and then swap the new table with the old ones // to be used after the old mirror is dropped - DstTableFullResync bool `protobuf:"varint,18,opt,name=dst_table_full_resync,json=dstTableFullResync,proto3" json:"dst_table_full_resync,omitempty"` + DstTableFullResync bool `protobuf:"varint,18,opt,name=dst_table_full_resync,json=dstTableFullResync,proto3" json:"dst_table_full_resync,omitempty"` + SyncedAtColName string `protobuf:"bytes,19,opt,name=synced_at_col_name,json=syncedAtColName,proto3" json:"synced_at_col_name,omitempty"` } func (x *QRepConfig) Reset() { @@ -2830,6 +2831,13 @@ func (x *QRepConfig) GetDstTableFullResync() bool { return false } +func (x *QRepConfig) GetSyncedAtColName() string { + if x != nil { + return x.SyncedAtColName + } + return "" +} + type QRepPartition struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3286,6 +3294,69 @@ func (x *QRepFlowState) GetDisableWaitForNewRows() bool { return false } +type PeerDBColumns struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SoftDeleteColName string `protobuf:"bytes,1,opt,name=soft_delete_col_name,json=softDeleteColName,proto3" json:"soft_delete_col_name,omitempty"` + SyncedAtColName string `protobuf:"bytes,2,opt,name=synced_at_col_name,json=syncedAtColName,proto3" json:"synced_at_col_name,omitempty"` + SoftDelete bool `protobuf:"varint,3,opt,name=soft_delete,json=softDelete,proto3" json:"soft_delete,omitempty"` +} + +func (x *PeerDBColumns) Reset() { + *x = PeerDBColumns{} + if protoimpl.UnsafeEnabled { + mi := &file_flow_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeerDBColumns) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeerDBColumns) ProtoMessage() {} + +func (x *PeerDBColumns) ProtoReflect() protoreflect.Message { + mi := &file_flow_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeerDBColumns.ProtoReflect.Descriptor instead. +func (*PeerDBColumns) Descriptor() ([]byte, []int) { + return file_flow_proto_rawDescGZIP(), []int{48} +} + +func (x *PeerDBColumns) GetSoftDeleteColName() string { + if x != nil { + return x.SoftDeleteColName + } + return "" +} + +func (x *PeerDBColumns) GetSyncedAtColName() string { + if x != nil { + return x.SyncedAtColName + } + return "" +} + +func (x *PeerDBColumns) GetSoftDelete() bool { + if x != nil { + return x.SoftDelete + } + return false +} + var File_flow_proto protoreflect.FileDescriptor var file_flow_proto_rawDesc = []byte{ @@ -3837,7 +3908,7 @@ var file_flow_proto_rawDesc = []byte{ 0x69, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x75, 0x70, 0x73, 0x65, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x75, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x43, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x99, 0x07, 0x0a, 0x0a, 0x51, 0x52, 0x65, 0x70, 0x43, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0xc6, 0x07, 0x0a, 0x0a, 0x51, 0x52, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x22, 0x0a, 0x0d, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, @@ -3895,94 +3966,106 @@ var file_flow_proto_rawDesc = []byte{ 0x0a, 0x15, 0x64, 0x73, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x79, 0x6e, - 0x63, 0x22, 0x97, 0x01, 0x0a, 0x0d, 0x51, 0x52, 0x65, 0x70, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, - 0x6c, 0x6f, 0x77, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x75, 0x6c, - 0x6c, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x66, 0x75, 0x6c, 0x6c, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6b, 0x0a, 0x12, 0x51, - 0x52, 0x65, 0x70, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x74, 0x63, - 0x68, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x07, 0x62, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x0a, - 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x51, - 0x52, 0x65, 0x70, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, - 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x50, 0x0a, 0x12, 0x51, 0x52, 0x65, 0x70, - 0x50, 0x61, 0x72, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x3a, - 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, - 0x2e, 0x51, 0x52, 0x65, 0x70, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, - 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2c, 0x0a, 0x0d, 0x44, 0x72, - 0x6f, 0x70, 0x46, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x66, - 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x54, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x74, - 0x61, 0x41, 0x64, 0x64, 0x65, 0x64, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, - 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, - 0x0b, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x22, 0xa2, - 0x01, 0x0a, 0x10, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, - 0x6c, 0x74, 0x61, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x72, 0x63, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x72, 0x63, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x73, 0x74, - 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x64, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x42, 0x0a, 0x0d, 0x61, 0x64, 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, - 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x41, 0x64, 0x64, 0x65, 0x64, 0x43, - 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, 0x0c, 0x61, 0x64, 0x64, 0x65, 0x64, 0x43, 0x6f, 0x6c, 0x75, - 0x6d, 0x6e, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x49, 0x6e, - 0x70, 0x75, 0x74, 0x12, 0x5a, 0x0a, 0x17, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, - 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x15, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, - 0x4d, 0x0a, 0x13, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, - 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x11, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x22, 0xe9, - 0x01, 0x0a, 0x0d, 0x51, 0x52, 0x65, 0x70, 0x46, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x41, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, - 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x51, 0x52, 0x65, 0x70, 0x50, 0x61, 0x72, 0x74, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x18, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x6e, 0x75, 0x6d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x21, 0x0a, - 0x0c, 0x6e, 0x65, 0x65, 0x64, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x73, 0x52, 0x65, 0x73, 0x79, 0x6e, 0x63, - 0x12, 0x38, 0x0a, 0x19, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x77, 0x61, 0x69, 0x74, - 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x61, 0x69, 0x74, - 0x46, 0x6f, 0x72, 0x4e, 0x65, 0x77, 0x52, 0x6f, 0x77, 0x73, 0x2a, 0x50, 0x0a, 0x0c, 0x51, 0x52, - 0x65, 0x70, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x51, 0x52, - 0x45, 0x50, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x4d, 0x55, 0x4c, - 0x54, 0x49, 0x5f, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x10, 0x00, 0x12, 0x1f, 0x0a, 0x1b, 0x51, - 0x52, 0x45, 0x50, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x54, - 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x41, 0x56, 0x52, 0x4f, 0x10, 0x01, 0x2a, 0x66, 0x0a, 0x0d, - 0x51, 0x52, 0x65, 0x70, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, - 0x16, 0x51, 0x52, 0x45, 0x50, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x45, - 0x5f, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x51, 0x52, 0x45, - 0x50, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x50, 0x53, - 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x51, 0x52, 0x45, 0x50, 0x5f, 0x57, 0x52, - 0x49, 0x54, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, - 0x54, 0x45, 0x10, 0x02, 0x42, 0x76, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x65, 0x65, 0x72, - 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x42, 0x09, 0x46, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x10, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0xa2, 0x02, 0x03, 0x50, 0x58, 0x58, 0xaa, 0x02, 0x0a, 0x50, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, 0x6c, 0x6f, 0x77, 0xca, 0x02, 0x0a, 0x50, 0x65, 0x65, 0x72, - 0x64, 0x62, 0x46, 0x6c, 0x6f, 0x77, 0xe2, 0x02, 0x16, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, - 0x6c, 0x6f, 0x77, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, - 0x02, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x63, 0x12, 0x2b, 0x0a, 0x12, 0x73, 0x79, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x63, + 0x6f, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, + 0x79, 0x6e, 0x63, 0x65, 0x64, 0x41, 0x74, 0x43, 0x6f, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x97, + 0x01, 0x0a, 0x0d, 0x51, 0x52, 0x65, 0x70, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, + 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x66, 0x75, 0x6c, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x50, + 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6b, 0x0a, 0x12, 0x51, 0x52, 0x65, 0x70, + 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x19, + 0x0a, 0x08, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x07, 0x62, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, + 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x51, 0x52, 0x65, 0x70, + 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x50, 0x0a, 0x12, 0x51, 0x52, 0x65, 0x70, 0x50, 0x61, 0x72, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x3a, 0x0a, 0x0a, 0x70, + 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x51, 0x52, + 0x65, 0x70, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, 0x72, + 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2c, 0x0a, 0x0d, 0x44, 0x72, 0x6f, 0x70, 0x46, + 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x6c, 0x6f, 0x77, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x6c, 0x6f, + 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x54, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x41, 0x64, + 0x64, 0x65, 0x64, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x22, 0xa2, 0x01, 0x0a, 0x10, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, + 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x72, 0x63, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x72, 0x63, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x73, 0x74, 0x5f, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x64, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0d, + 0x61, 0x64, 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, + 0x77, 0x2e, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x41, 0x64, 0x64, 0x65, 0x64, 0x43, 0x6f, 0x6c, 0x75, + 0x6d, 0x6e, 0x52, 0x0c, 0x61, 0x64, 0x64, 0x65, 0x64, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, + 0x22, 0xc8, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x12, 0x5a, 0x0a, 0x17, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x46, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x15, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x4d, 0x0a, 0x13, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x64, 0x65, 0x6c, + 0x74, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x65, 0x65, 0x72, + 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x22, 0xe9, 0x01, 0x0a, 0x0d, + 0x51, 0x52, 0x65, 0x70, 0x46, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, + 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x51, 0x52, 0x65, 0x70, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x38, 0x0a, 0x18, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x16, 0x6e, 0x75, 0x6d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, + 0x65, 0x64, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x73, 0x52, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x38, 0x0a, + 0x19, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, + 0x72, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, + 0x4e, 0x65, 0x77, 0x52, 0x6f, 0x77, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x0d, 0x50, 0x65, 0x65, 0x72, + 0x44, 0x42, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x6f, 0x66, + 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x12, 0x73, 0x79, + 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x63, 0x6f, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x79, 0x6e, 0x63, 0x65, 0x64, 0x41, 0x74, + 0x43, 0x6f, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x5f, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x6f, + 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x2a, 0x50, 0x0a, 0x0c, 0x51, 0x52, 0x65, 0x70, + 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x51, 0x52, 0x45, 0x50, + 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x4d, 0x55, 0x4c, 0x54, 0x49, + 0x5f, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x10, 0x00, 0x12, 0x1f, 0x0a, 0x1b, 0x51, 0x52, 0x45, + 0x50, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x54, 0x4f, 0x52, + 0x41, 0x47, 0x45, 0x5f, 0x41, 0x56, 0x52, 0x4f, 0x10, 0x01, 0x2a, 0x66, 0x0a, 0x0d, 0x51, 0x52, + 0x65, 0x70, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x51, + 0x52, 0x45, 0x50, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x41, + 0x50, 0x50, 0x45, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x51, 0x52, 0x45, 0x50, 0x5f, + 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, + 0x54, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x51, 0x52, 0x45, 0x50, 0x5f, 0x57, 0x52, 0x49, 0x54, + 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, 0x54, 0x45, + 0x10, 0x02, 0x42, 0x76, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, + 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x42, 0x09, 0x46, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x10, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x73, 0xa2, 0x02, 0x03, 0x50, 0x58, 0x58, 0xaa, 0x02, 0x0a, 0x50, 0x65, 0x65, + 0x72, 0x64, 0x62, 0x46, 0x6c, 0x6f, 0x77, 0xca, 0x02, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, + 0x46, 0x6c, 0x6f, 0x77, 0xe2, 0x02, 0x16, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, 0x6c, 0x6f, + 0x77, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0a, + 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -3998,7 +4081,7 @@ func file_flow_proto_rawDescGZIP() []byte { } var file_flow_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_flow_proto_msgTypes = make([]protoimpl.MessageInfo, 60) +var file_flow_proto_msgTypes = make([]protoimpl.MessageInfo, 61) var file_flow_proto_goTypes = []interface{}{ (QRepSyncMode)(0), // 0: peerdb_flow.QRepSyncMode (QRepWriteType)(0), // 1: peerdb_flow.QRepWriteType @@ -4050,74 +4133,75 @@ var file_flow_proto_goTypes = []interface{}{ (*TableSchemaDelta)(nil), // 47: peerdb_flow.TableSchemaDelta (*ReplayTableSchemaDeltaInput)(nil), // 48: peerdb_flow.ReplayTableSchemaDeltaInput (*QRepFlowState)(nil), // 49: peerdb_flow.QRepFlowState - nil, // 50: peerdb_flow.FlowConnectionConfigs.SrcTableIdNameMappingEntry - nil, // 51: peerdb_flow.FlowConnectionConfigs.TableNameSchemaMappingEntry - nil, // 52: peerdb_flow.CreateTablesFromExistingInput.NewToExistingTableMappingEntry - nil, // 53: peerdb_flow.SyncFlowOptions.RelationMessageMappingEntry - nil, // 54: peerdb_flow.StartFlowInput.RelationMessageMappingEntry - nil, // 55: peerdb_flow.EnsurePullabilityBatchOutput.TableIdentifierMappingEntry - nil, // 56: peerdb_flow.SetupReplicationInput.TableNameMappingEntry - nil, // 57: peerdb_flow.CreateRawTableInput.TableNameMappingEntry - nil, // 58: peerdb_flow.TableSchema.ColumnsEntry - nil, // 59: peerdb_flow.GetTableSchemaBatchOutput.TableNameSchemaMappingEntry - nil, // 60: peerdb_flow.SetupNormalizedTableBatchInput.TableNameSchemaMappingEntry - nil, // 61: peerdb_flow.SetupNormalizedTableBatchOutput.TableExistsMappingEntry - (*Peer)(nil), // 62: peerdb_peers.Peer - (*timestamppb.Timestamp)(nil), // 63: google.protobuf.Timestamp + (*PeerDBColumns)(nil), // 50: peerdb_flow.PeerDBColumns + nil, // 51: peerdb_flow.FlowConnectionConfigs.SrcTableIdNameMappingEntry + nil, // 52: peerdb_flow.FlowConnectionConfigs.TableNameSchemaMappingEntry + nil, // 53: peerdb_flow.CreateTablesFromExistingInput.NewToExistingTableMappingEntry + nil, // 54: peerdb_flow.SyncFlowOptions.RelationMessageMappingEntry + nil, // 55: peerdb_flow.StartFlowInput.RelationMessageMappingEntry + nil, // 56: peerdb_flow.EnsurePullabilityBatchOutput.TableIdentifierMappingEntry + nil, // 57: peerdb_flow.SetupReplicationInput.TableNameMappingEntry + nil, // 58: peerdb_flow.CreateRawTableInput.TableNameMappingEntry + nil, // 59: peerdb_flow.TableSchema.ColumnsEntry + nil, // 60: peerdb_flow.GetTableSchemaBatchOutput.TableNameSchemaMappingEntry + nil, // 61: peerdb_flow.SetupNormalizedTableBatchInput.TableNameSchemaMappingEntry + nil, // 62: peerdb_flow.SetupNormalizedTableBatchOutput.TableExistsMappingEntry + (*Peer)(nil), // 63: peerdb_peers.Peer + (*timestamppb.Timestamp)(nil), // 64: google.protobuf.Timestamp } var file_flow_proto_depIdxs = []int32{ 3, // 0: peerdb_flow.RelationMessage.columns:type_name -> peerdb_flow.RelationMessageColumn - 62, // 1: peerdb_flow.FlowConnectionConfigs.source:type_name -> peerdb_peers.Peer - 62, // 2: peerdb_flow.FlowConnectionConfigs.destination:type_name -> peerdb_peers.Peer + 63, // 1: peerdb_flow.FlowConnectionConfigs.source:type_name -> peerdb_peers.Peer + 63, // 2: peerdb_flow.FlowConnectionConfigs.destination:type_name -> peerdb_peers.Peer 28, // 3: peerdb_flow.FlowConnectionConfigs.table_schema:type_name -> peerdb_flow.TableSchema 5, // 4: peerdb_flow.FlowConnectionConfigs.table_mappings:type_name -> peerdb_flow.TableMapping - 50, // 5: peerdb_flow.FlowConnectionConfigs.src_table_id_name_mapping:type_name -> peerdb_flow.FlowConnectionConfigs.SrcTableIdNameMappingEntry - 51, // 6: peerdb_flow.FlowConnectionConfigs.table_name_schema_mapping:type_name -> peerdb_flow.FlowConnectionConfigs.TableNameSchemaMappingEntry - 62, // 7: peerdb_flow.FlowConnectionConfigs.metadata_peer:type_name -> peerdb_peers.Peer + 51, // 5: peerdb_flow.FlowConnectionConfigs.src_table_id_name_mapping:type_name -> peerdb_flow.FlowConnectionConfigs.SrcTableIdNameMappingEntry + 52, // 6: peerdb_flow.FlowConnectionConfigs.table_name_schema_mapping:type_name -> peerdb_flow.FlowConnectionConfigs.TableNameSchemaMappingEntry + 63, // 7: peerdb_flow.FlowConnectionConfigs.metadata_peer:type_name -> peerdb_peers.Peer 0, // 8: peerdb_flow.FlowConnectionConfigs.snapshot_sync_mode:type_name -> peerdb_flow.QRepSyncMode 0, // 9: peerdb_flow.FlowConnectionConfigs.cdc_sync_mode:type_name -> peerdb_flow.QRepSyncMode 28, // 10: peerdb_flow.RenameTableOption.table_schema:type_name -> peerdb_flow.TableSchema - 62, // 11: peerdb_flow.RenameTablesInput.peer:type_name -> peerdb_peers.Peer + 63, // 11: peerdb_flow.RenameTablesInput.peer:type_name -> peerdb_peers.Peer 7, // 12: peerdb_flow.RenameTablesInput.rename_table_options:type_name -> peerdb_flow.RenameTableOption - 62, // 13: peerdb_flow.CreateTablesFromExistingInput.peer:type_name -> peerdb_peers.Peer - 52, // 14: peerdb_flow.CreateTablesFromExistingInput.new_to_existing_table_mapping:type_name -> peerdb_flow.CreateTablesFromExistingInput.NewToExistingTableMappingEntry - 53, // 15: peerdb_flow.SyncFlowOptions.relation_message_mapping:type_name -> peerdb_flow.SyncFlowOptions.RelationMessageMappingEntry - 63, // 16: peerdb_flow.LastSyncState.last_synced_at:type_name -> google.protobuf.Timestamp + 63, // 13: peerdb_flow.CreateTablesFromExistingInput.peer:type_name -> peerdb_peers.Peer + 53, // 14: peerdb_flow.CreateTablesFromExistingInput.new_to_existing_table_mapping:type_name -> peerdb_flow.CreateTablesFromExistingInput.NewToExistingTableMappingEntry + 54, // 15: peerdb_flow.SyncFlowOptions.relation_message_mapping:type_name -> peerdb_flow.SyncFlowOptions.RelationMessageMappingEntry + 64, // 16: peerdb_flow.LastSyncState.last_synced_at:type_name -> google.protobuf.Timestamp 14, // 17: peerdb_flow.StartFlowInput.last_sync_state:type_name -> peerdb_flow.LastSyncState 6, // 18: peerdb_flow.StartFlowInput.flow_connection_configs:type_name -> peerdb_flow.FlowConnectionConfigs 12, // 19: peerdb_flow.StartFlowInput.sync_flow_options:type_name -> peerdb_flow.SyncFlowOptions - 54, // 20: peerdb_flow.StartFlowInput.relation_message_mapping:type_name -> peerdb_flow.StartFlowInput.RelationMessageMappingEntry + 55, // 20: peerdb_flow.StartFlowInput.relation_message_mapping:type_name -> peerdb_flow.StartFlowInput.RelationMessageMappingEntry 6, // 21: peerdb_flow.StartNormalizeInput.flow_connection_configs:type_name -> peerdb_flow.FlowConnectionConfigs - 62, // 22: peerdb_flow.GetLastSyncedIDInput.peer_connection_config:type_name -> peerdb_peers.Peer - 62, // 23: peerdb_flow.EnsurePullabilityInput.peer_connection_config:type_name -> peerdb_peers.Peer - 62, // 24: peerdb_flow.EnsurePullabilityBatchInput.peer_connection_config:type_name -> peerdb_peers.Peer + 63, // 22: peerdb_flow.GetLastSyncedIDInput.peer_connection_config:type_name -> peerdb_peers.Peer + 63, // 23: peerdb_flow.EnsurePullabilityInput.peer_connection_config:type_name -> peerdb_peers.Peer + 63, // 24: peerdb_flow.EnsurePullabilityBatchInput.peer_connection_config:type_name -> peerdb_peers.Peer 20, // 25: peerdb_flow.TableIdentifier.postgres_table_identifier:type_name -> peerdb_flow.PostgresTableIdentifier 21, // 26: peerdb_flow.EnsurePullabilityOutput.table_identifier:type_name -> peerdb_flow.TableIdentifier - 55, // 27: peerdb_flow.EnsurePullabilityBatchOutput.table_identifier_mapping:type_name -> peerdb_flow.EnsurePullabilityBatchOutput.TableIdentifierMappingEntry - 62, // 28: peerdb_flow.SetupReplicationInput.peer_connection_config:type_name -> peerdb_peers.Peer - 56, // 29: peerdb_flow.SetupReplicationInput.table_name_mapping:type_name -> peerdb_flow.SetupReplicationInput.TableNameMappingEntry - 62, // 30: peerdb_flow.SetupReplicationInput.destination_peer:type_name -> peerdb_peers.Peer - 62, // 31: peerdb_flow.CreateRawTableInput.peer_connection_config:type_name -> peerdb_peers.Peer - 57, // 32: peerdb_flow.CreateRawTableInput.table_name_mapping:type_name -> peerdb_flow.CreateRawTableInput.TableNameMappingEntry + 56, // 27: peerdb_flow.EnsurePullabilityBatchOutput.table_identifier_mapping:type_name -> peerdb_flow.EnsurePullabilityBatchOutput.TableIdentifierMappingEntry + 63, // 28: peerdb_flow.SetupReplicationInput.peer_connection_config:type_name -> peerdb_peers.Peer + 57, // 29: peerdb_flow.SetupReplicationInput.table_name_mapping:type_name -> peerdb_flow.SetupReplicationInput.TableNameMappingEntry + 63, // 30: peerdb_flow.SetupReplicationInput.destination_peer:type_name -> peerdb_peers.Peer + 63, // 31: peerdb_flow.CreateRawTableInput.peer_connection_config:type_name -> peerdb_peers.Peer + 58, // 32: peerdb_flow.CreateRawTableInput.table_name_mapping:type_name -> peerdb_flow.CreateRawTableInput.TableNameMappingEntry 0, // 33: peerdb_flow.CreateRawTableInput.cdc_sync_mode:type_name -> peerdb_flow.QRepSyncMode - 58, // 34: peerdb_flow.TableSchema.columns:type_name -> peerdb_flow.TableSchema.ColumnsEntry - 62, // 35: peerdb_flow.GetTableSchemaBatchInput.peer_connection_config:type_name -> peerdb_peers.Peer - 59, // 36: peerdb_flow.GetTableSchemaBatchOutput.table_name_schema_mapping:type_name -> peerdb_flow.GetTableSchemaBatchOutput.TableNameSchemaMappingEntry - 62, // 37: peerdb_flow.SetupNormalizedTableInput.peer_connection_config:type_name -> peerdb_peers.Peer + 59, // 34: peerdb_flow.TableSchema.columns:type_name -> peerdb_flow.TableSchema.ColumnsEntry + 63, // 35: peerdb_flow.GetTableSchemaBatchInput.peer_connection_config:type_name -> peerdb_peers.Peer + 60, // 36: peerdb_flow.GetTableSchemaBatchOutput.table_name_schema_mapping:type_name -> peerdb_flow.GetTableSchemaBatchOutput.TableNameSchemaMappingEntry + 63, // 37: peerdb_flow.SetupNormalizedTableInput.peer_connection_config:type_name -> peerdb_peers.Peer 28, // 38: peerdb_flow.SetupNormalizedTableInput.source_table_schema:type_name -> peerdb_flow.TableSchema - 62, // 39: peerdb_flow.SetupNormalizedTableBatchInput.peer_connection_config:type_name -> peerdb_peers.Peer - 60, // 40: peerdb_flow.SetupNormalizedTableBatchInput.table_name_schema_mapping:type_name -> peerdb_flow.SetupNormalizedTableBatchInput.TableNameSchemaMappingEntry - 61, // 41: peerdb_flow.SetupNormalizedTableBatchOutput.table_exists_mapping:type_name -> peerdb_flow.SetupNormalizedTableBatchOutput.TableExistsMappingEntry - 63, // 42: peerdb_flow.TimestampPartitionRange.start:type_name -> google.protobuf.Timestamp - 63, // 43: peerdb_flow.TimestampPartitionRange.end:type_name -> google.protobuf.Timestamp + 63, // 39: peerdb_flow.SetupNormalizedTableBatchInput.peer_connection_config:type_name -> peerdb_peers.Peer + 61, // 40: peerdb_flow.SetupNormalizedTableBatchInput.table_name_schema_mapping:type_name -> peerdb_flow.SetupNormalizedTableBatchInput.TableNameSchemaMappingEntry + 62, // 41: peerdb_flow.SetupNormalizedTableBatchOutput.table_exists_mapping:type_name -> peerdb_flow.SetupNormalizedTableBatchOutput.TableExistsMappingEntry + 64, // 42: peerdb_flow.TimestampPartitionRange.start:type_name -> google.protobuf.Timestamp + 64, // 43: peerdb_flow.TimestampPartitionRange.end:type_name -> google.protobuf.Timestamp 37, // 44: peerdb_flow.TIDPartitionRange.start:type_name -> peerdb_flow.TID 37, // 45: peerdb_flow.TIDPartitionRange.end:type_name -> peerdb_flow.TID 35, // 46: peerdb_flow.PartitionRange.int_range:type_name -> peerdb_flow.IntPartitionRange 36, // 47: peerdb_flow.PartitionRange.timestamp_range:type_name -> peerdb_flow.TimestampPartitionRange 38, // 48: peerdb_flow.PartitionRange.tid_range:type_name -> peerdb_flow.TIDPartitionRange 1, // 49: peerdb_flow.QRepWriteMode.write_type:type_name -> peerdb_flow.QRepWriteType - 62, // 50: peerdb_flow.QRepConfig.source_peer:type_name -> peerdb_peers.Peer - 62, // 51: peerdb_flow.QRepConfig.destination_peer:type_name -> peerdb_peers.Peer + 63, // 50: peerdb_flow.QRepConfig.source_peer:type_name -> peerdb_peers.Peer + 63, // 51: peerdb_flow.QRepConfig.destination_peer:type_name -> peerdb_peers.Peer 0, // 52: peerdb_flow.QRepConfig.sync_mode:type_name -> peerdb_flow.QRepSyncMode 40, // 53: peerdb_flow.QRepConfig.write_mode:type_name -> peerdb_flow.QRepWriteMode 39, // 54: peerdb_flow.QRepPartition.range:type_name -> peerdb_flow.PartitionRange @@ -4723,6 +4807,18 @@ func file_flow_proto_init() { return nil } } + file_flow_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeerDBColumns); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_flow_proto_msgTypes[6].OneofWrappers = []interface{}{} file_flow_proto_msgTypes[19].OneofWrappers = []interface{}{ @@ -4739,7 +4835,7 @@ func file_flow_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_flow_proto_rawDesc, NumEnums: 2, - NumMessages: 60, + NumMessages: 61, NumExtensions: 0, NumServices: 0, }, diff --git a/flow/go.mod b/flow/go.mod index a3575510c5..356eedb562 100644 --- a/flow/go.mod +++ b/flow/go.mod @@ -9,7 +9,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.0.2 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.2.0 - github.com/aws/aws-sdk-go v1.49.4 + github.com/aws/aws-sdk-go v1.49.5 github.com/cenkalti/backoff/v4 v4.2.1 github.com/cockroachdb/pebble v0.0.0-20231210175914-b4d301aeb46a github.com/google/uuid v1.5.0 @@ -37,7 +37,7 @@ require ( golang.org/x/sync v0.5.0 google.golang.org/api v0.154.0 google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 - google.golang.org/grpc v1.60.0 + google.golang.org/grpc v1.60.1 google.golang.org/protobuf v1.31.0 ) @@ -89,7 +89,7 @@ require ( github.com/aws/aws-sdk-go-v2 v1.24.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.16.12 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.8 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9 // indirect @@ -97,7 +97,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.47.6 // indirect github.com/aws/smithy-go v1.19.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/danieljoos/wincred v1.2.1 // indirect @@ -147,7 +147,7 @@ require ( github.com/xrash/smetrics v0.0.0-20231213231151-1d8dd44e695e // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.16.0 + golang.org/x/crypto v0.17.0 golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611 golang.org/x/mod v0.14.0 // indirect golang.org/x/net v0.19.0 // indirect diff --git a/flow/go.sum b/flow/go.sum index cb1247fe0c..98802c7e49 100644 --- a/flow/go.sum +++ b/flow/go.sum @@ -62,8 +62,8 @@ github.com/apache/arrow/go/v12 v12.0.1 h1:JsR2+hzYYjgSUkBSaahpqCetqZMr76djX80fF/ github.com/apache/arrow/go/v12 v12.0.1/go.mod h1:weuTY7JvTG/HDPtMQxEUp7pU73vkLWMLpY67QwZ/WWw= github.com/apache/thrift v0.19.0 h1:sOqkWPzMj7w6XaYbJQG7m4sGqVolaW/0D28Ln7yPzMk= github.com/apache/thrift v0.19.0/go.mod h1:SUALL216IiaOw2Oy+5Vs9lboJ/t9g40C+G07Dc0QC1I= -github.com/aws/aws-sdk-go v1.49.4 h1:qiXsqEeLLhdLgUIyfr5ot+N/dGPWALmtM1SetRmbUlY= -github.com/aws/aws-sdk-go v1.49.4/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.5 h1:y2yfBlwjPDi3/sBVKeznYEdDy6wIhjA2L5NCBMLUIYA= +github.com/aws/aws-sdk-go v1.49.5/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk= github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= @@ -74,8 +74,8 @@ github.com/aws/aws-sdk-go-v2/credentials v1.16.12 h1:v/WgB8NxprNvr5inKIiVVrXPuuT github.com/aws/aws-sdk-go-v2/credentials v1.16.12/go.mod h1:X21k0FjEJe+/pauud82HYiQbEr9jRKY3kXEIQ4hXeTQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 h1:w98BT5w+ao1/r5sUuiH6JkVzjowOKeOJRHERyy1vh58= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10/go.mod h1:K2WGI7vUvkIv1HoNbfBA1bvIZ+9kL3YVmWxeKuLQsiw= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7 h1:FnLf60PtjXp8ZOzQfhJVsqF0OtYKQZWQfqOLshh8YXg= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7/go.mod h1:tDVvl8hyU6E9B8TrnNrZQEVkQlB8hjJwcgpPhgtlnNg= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.8 h1:7wCngExMTAW2Bjf0Y92uWap6ZUcenLLWI5T3VJiQneU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.8/go.mod h1:XVrAWYYM4ZRwOCOuLoUiao5hbLqNutEdqwCR3ZvkXgc= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 h1:v+HbZaCGmOwnTTVS86Fleq0vPzOd7tnJGbFhP0stNLs= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9/go.mod h1:Xjqy+Nyj7VDLBtCMkQYOw1QYfAEZCVLrfI0ezve8wd4= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 h1:N94sVhRACtXyVcjXxrwK1SKFIJrA9pOJ5yu2eSHnmls= @@ -92,8 +92,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 h1:Nf2sHxjMJ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9/go.mod h1:idky4TER38YIjr2cADF1/ugFMKvZV7p//pVeV5LZbF0= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9 h1:iEAeF6YC3l4FzlJPP9H3Ko1TXpdjdqWffxXjp8SY6uk= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9/go.mod h1:kjsXoK23q9Z/tLBrckZLLyvjhZoS+AGrzqzUfEClvMM= -github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5 h1:Keso8lIOS+IzI2MkPZyK6G0LYcK3My2LQ+T5bxghEAY= -github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5/go.mod h1:vADO6Jn+Rq4nDtfwNjhgR84qkZwiC6FqCaXdw/kYwjA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.47.6 h1:bkmlzokzTJyrFNA0J+EPlsF8x4/wp+9D45HTHO/ZUiY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.47.6/go.mod h1:vADO6Jn+Rq4nDtfwNjhgR84qkZwiC6FqCaXdw/kYwjA= github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 h1:ldSFWz9tEHAwHNmjx2Cvy1MjP5/L9kNoR0skc6wyOOM= github.com/aws/aws-sdk-go-v2/service/sso v1.18.5/go.mod h1:CaFfXLYL376jgbP7VKC96uFcU8Rlavak0UlAwk1Dlhc= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 h1:2k9KmFawS63euAkY4/ixVNsYYwrwnd5fIvgEKkfZFNM= @@ -423,8 +423,8 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611 h1:qCEDpW1G+vcj3Y7Fy52pEM1AWm3abj8WimGYejI3SC4= golang.org/x/exp v0.0.0-20231214170342-aacd6d4b4611/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= @@ -548,8 +548,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.60.0 h1:6FQAR0kM31P6MRdeluor2w2gPaS4SVNrD/DNTxrQ15k= -google.golang.org/grpc v1.60.0/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/flow/workflows/qrep_flow.go b/flow/workflows/qrep_flow.go index 2373427c8e..644f61a611 100644 --- a/flow/workflows/qrep_flow.go +++ b/flow/workflows/qrep_flow.go @@ -125,6 +125,7 @@ func (q *QRepFlowExecution) SetupWatermarkTableOnDestination(ctx workflow.Contex TableNameSchemaMapping: map[string]*protos.TableSchema{ q.config.DestinationTableIdentifier: tblSchemaOutput.TableNameSchemaMapping[q.config.WatermarkTable], }, + SyncedAtColName: q.config.SyncedAtColName, } future = workflow.ExecuteActivity(ctx, flowable.CreateNormalizedTable, setupConfig) diff --git a/flow/workflows/snapshot_flow.go b/flow/workflows/snapshot_flow.go index 527fde5720..bc8448b4b7 100644 --- a/flow/workflows/snapshot_flow.go +++ b/flow/workflows/snapshot_flow.go @@ -176,6 +176,7 @@ func (s *SnapshotFlowExecution) cloneTable( NumRowsPerPartition: numRowsPerPartition, MaxParallelWorkers: numWorkers, StagingPath: s.config.SnapshotStagingPath, + SyncedAtColName: s.config.SyncedAtColName, WriteMode: &protos.QRepWriteMode{ WriteType: protos.QRepWriteType_QREP_WRITE_MODE_APPEND, }, diff --git a/nexus/Cargo.lock b/nexus/Cargo.lock index 3c243f2958..652a9ebd11 100644 --- a/nexus/Cargo.lock +++ b/nexus/Cargo.lock @@ -1934,8 +1934,6 @@ dependencies = [ "peer-cursor", "pgerror", "pgwire", - "pkcs1", - "pkcs8", "pt", "reqwest", "rsa", @@ -2590,9 +2588,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ "async-compression", "base64 0.21.5", @@ -3317,9 +3315,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" +checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" dependencies = [ "deranged", "itoa", @@ -3339,9 +3337,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" +checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" dependencies = [ "time-core", ] @@ -4161,9 +4159,9 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winnow" -version = "0.5.28" +version = "0.5.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c830786f7720c2fd27a1a0e27a709dbd3c4d009b56d098fc742d4f4eab91fe2" +checksum = "9b5c3db89721d50d0e2a673f5043fc4722f76dcc352d7b1ab8b8288bed4ed2c5" dependencies = [ "memchr", ] diff --git a/nexus/peer-snowflake/Cargo.toml b/nexus/peer-snowflake/Cargo.toml index a878be492a..6e16ff63cf 100644 --- a/nexus/peer-snowflake/Cargo.toml +++ b/nexus/peer-snowflake/Cargo.toml @@ -20,9 +20,7 @@ dashmap = "5.0" pgwire = "0.17" sha2 = "0.10" pt = { path = "../pt" } -pkcs8 = { version = "0.10.2", features = ["std", "pem", "encryption"] } -pkcs1 = "0.7.5" -rsa = "0.9.2" +rsa = { version = "0.9.2", features = ["pem", "pkcs5"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" futures = "0.3" diff --git a/nexus/peer-snowflake/src/auth.rs b/nexus/peer-snowflake/src/auth.rs index 482272aecf..64bb0f0458 100644 --- a/nexus/peer-snowflake/src/auth.rs +++ b/nexus/peer-snowflake/src/auth.rs @@ -6,9 +6,9 @@ use std::{ use anyhow::Context; use base64::prelude::{Engine as _, BASE64_STANDARD}; use jsonwebtoken::{encode as jwt_encode, Algorithm, EncodingKey, Header}; -use pkcs1::EncodeRsaPrivateKey; -use pkcs8::{DecodePrivateKey, EncodePublicKey}; -use rsa::{RsaPrivateKey, RsaPublicKey}; +use rsa::RsaPrivateKey; +use rsa::pkcs1::EncodeRsaPrivateKey; +use rsa::pkcs8::{DecodePrivateKey, EncodePublicKey}; use secrecy::{Secret, SecretString}; use serde::Serialize; use sha2::{Digest, Sha256}; @@ -91,7 +91,7 @@ impl SnowflakeAuth { #[tracing::instrument(name = "peer_sflake::gen_public_key_fp", skip_all)] fn gen_public_key_fp(private_key: &RsaPrivateKey) -> anyhow::Result { - let public_key = EncodePublicKey::to_public_key_der(&RsaPublicKey::from(private_key))?; + let public_key = private_key.to_public_key().to_public_key_der()?; let res = format!( "SHA256:{}", BASE64_STANDARD.encode(Sha256::new_with_prefix(public_key.as_bytes()).finalize()) @@ -102,7 +102,7 @@ impl SnowflakeAuth { #[tracing::instrument(name = "peer_sflake::auth_refresh_jwt", skip_all)] fn refresh_jwt(&mut self) -> anyhow::Result<()> { let private_key_jwt: EncodingKey = EncodingKey::from_rsa_der( - EncodeRsaPrivateKey::to_pkcs1_der(&self.private_key)?.as_bytes(), + self.private_key.to_pkcs1_der()?.as_bytes(), ); self.last_refreshed = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs(); info!( diff --git a/nexus/pt/src/peerdb_flow.rs b/nexus/pt/src/peerdb_flow.rs index 798e09c99b..50b1541e0d 100644 --- a/nexus/pt/src/peerdb_flow.rs +++ b/nexus/pt/src/peerdb_flow.rs @@ -472,6 +472,8 @@ pub struct QRepConfig { /// to be used after the old mirror is dropped #[prost(bool, tag="18")] pub dst_table_full_resync: bool, + #[prost(string, tag="19")] + pub synced_at_col_name: ::prost::alloc::string::String, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -541,6 +543,16 @@ pub struct QRepFlowState { #[prost(bool, tag="4")] pub disable_wait_for_new_rows: bool, } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PeerDbColumns { + #[prost(string, tag="1")] + pub soft_delete_col_name: ::prost::alloc::string::String, + #[prost(string, tag="2")] + pub synced_at_col_name: ::prost::alloc::string::String, + #[prost(bool, tag="3")] + pub soft_delete: bool, +} /// protos for qrep #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] diff --git a/nexus/pt/src/peerdb_flow.serde.rs b/nexus/pt/src/peerdb_flow.serde.rs index 0436bf3345..1ebf981cd4 100644 --- a/nexus/pt/src/peerdb_flow.serde.rs +++ b/nexus/pt/src/peerdb_flow.serde.rs @@ -2476,6 +2476,138 @@ impl<'de> serde::Deserialize<'de> for PartitionRange { deserializer.deserialize_struct("peerdb_flow.PartitionRange", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for PeerDbColumns { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if !self.soft_delete_col_name.is_empty() { + len += 1; + } + if !self.synced_at_col_name.is_empty() { + len += 1; + } + if self.soft_delete { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("peerdb_flow.PeerDBColumns", len)?; + if !self.soft_delete_col_name.is_empty() { + struct_ser.serialize_field("softDeleteColName", &self.soft_delete_col_name)?; + } + if !self.synced_at_col_name.is_empty() { + struct_ser.serialize_field("syncedAtColName", &self.synced_at_col_name)?; + } + if self.soft_delete { + struct_ser.serialize_field("softDelete", &self.soft_delete)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for PeerDbColumns { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "soft_delete_col_name", + "softDeleteColName", + "synced_at_col_name", + "syncedAtColName", + "soft_delete", + "softDelete", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + SoftDeleteColName, + SyncedAtColName, + SoftDelete, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "softDeleteColName" | "soft_delete_col_name" => Ok(GeneratedField::SoftDeleteColName), + "syncedAtColName" | "synced_at_col_name" => Ok(GeneratedField::SyncedAtColName), + "softDelete" | "soft_delete" => Ok(GeneratedField::SoftDelete), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = PeerDbColumns; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct peerdb_flow.PeerDBColumns") + } + + fn visit_map(self, mut map: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut soft_delete_col_name__ = None; + let mut synced_at_col_name__ = None; + let mut soft_delete__ = None; + while let Some(k) = map.next_key()? { + match k { + GeneratedField::SoftDeleteColName => { + if soft_delete_col_name__.is_some() { + return Err(serde::de::Error::duplicate_field("softDeleteColName")); + } + soft_delete_col_name__ = Some(map.next_value()?); + } + GeneratedField::SyncedAtColName => { + if synced_at_col_name__.is_some() { + return Err(serde::de::Error::duplicate_field("syncedAtColName")); + } + synced_at_col_name__ = Some(map.next_value()?); + } + GeneratedField::SoftDelete => { + if soft_delete__.is_some() { + return Err(serde::de::Error::duplicate_field("softDelete")); + } + soft_delete__ = Some(map.next_value()?); + } + GeneratedField::__SkipField__ => { + let _ = map.next_value::()?; + } + } + } + Ok(PeerDbColumns { + soft_delete_col_name: soft_delete_col_name__.unwrap_or_default(), + synced_at_col_name: synced_at_col_name__.unwrap_or_default(), + soft_delete: soft_delete__.unwrap_or_default(), + }) + } + } + deserializer.deserialize_struct("peerdb_flow.PeerDBColumns", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for PostgresTableIdentifier { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result @@ -2636,6 +2768,9 @@ impl serde::Serialize for QRepConfig { if self.dst_table_full_resync { len += 1; } + if !self.synced_at_col_name.is_empty() { + len += 1; + } let mut struct_ser = serializer.serialize_struct("peerdb_flow.QRepConfig", len)?; if !self.flow_job_name.is_empty() { struct_ser.serialize_field("flowJobName", &self.flow_job_name)?; @@ -2693,6 +2828,9 @@ impl serde::Serialize for QRepConfig { if self.dst_table_full_resync { struct_ser.serialize_field("dstTableFullResync", &self.dst_table_full_resync)?; } + if !self.synced_at_col_name.is_empty() { + struct_ser.serialize_field("syncedAtColName", &self.synced_at_col_name)?; + } struct_ser.end() } } @@ -2738,6 +2876,8 @@ impl<'de> serde::Deserialize<'de> for QRepConfig { "setupWatermarkTableOnDestination", "dst_table_full_resync", "dstTableFullResync", + "synced_at_col_name", + "syncedAtColName", ]; #[allow(clippy::enum_variant_names)] @@ -2760,6 +2900,7 @@ impl<'de> serde::Deserialize<'de> for QRepConfig { NumRowsPerPartition, SetupWatermarkTableOnDestination, DstTableFullResync, + SyncedAtColName, __SkipField__, } impl<'de> serde::Deserialize<'de> for GeneratedField { @@ -2800,6 +2941,7 @@ impl<'de> serde::Deserialize<'de> for QRepConfig { "numRowsPerPartition" | "num_rows_per_partition" => Ok(GeneratedField::NumRowsPerPartition), "setupWatermarkTableOnDestination" | "setup_watermark_table_on_destination" => Ok(GeneratedField::SetupWatermarkTableOnDestination), "dstTableFullResync" | "dst_table_full_resync" => Ok(GeneratedField::DstTableFullResync), + "syncedAtColName" | "synced_at_col_name" => Ok(GeneratedField::SyncedAtColName), _ => Ok(GeneratedField::__SkipField__), } } @@ -2837,6 +2979,7 @@ impl<'de> serde::Deserialize<'de> for QRepConfig { let mut num_rows_per_partition__ = None; let mut setup_watermark_table_on_destination__ = None; let mut dst_table_full_resync__ = None; + let mut synced_at_col_name__ = None; while let Some(k) = map.next_key()? { match k { GeneratedField::FlowJobName => { @@ -2957,6 +3100,12 @@ impl<'de> serde::Deserialize<'de> for QRepConfig { } dst_table_full_resync__ = Some(map.next_value()?); } + GeneratedField::SyncedAtColName => { + if synced_at_col_name__.is_some() { + return Err(serde::de::Error::duplicate_field("syncedAtColName")); + } + synced_at_col_name__ = Some(map.next_value()?); + } GeneratedField::__SkipField__ => { let _ = map.next_value::()?; } @@ -2981,6 +3130,7 @@ impl<'de> serde::Deserialize<'de> for QRepConfig { num_rows_per_partition: num_rows_per_partition__.unwrap_or_default(), setup_watermark_table_on_destination: setup_watermark_table_on_destination__.unwrap_or_default(), dst_table_full_resync: dst_table_full_resync__.unwrap_or_default(), + synced_at_col_name: synced_at_col_name__.unwrap_or_default(), }) } } diff --git a/protos/flow.proto b/protos/flow.proto index 88d87b9835..57ceef506f 100644 --- a/protos/flow.proto +++ b/protos/flow.proto @@ -321,6 +321,8 @@ message QRepConfig { // create new tables with "_peerdb_resync" suffix, perform initial load and then swap the new table with the old ones // to be used after the old mirror is dropped bool dst_table_full_resync = 18; + + string synced_at_col_name = 19; } message QRepPartition { @@ -364,3 +366,9 @@ message QRepFlowState { bool needs_resync = 3; bool disable_wait_for_new_rows = 4; } + +message PeerDBColumns { + string soft_delete_col_name = 1; + string synced_at_col_name = 2; + bool soft_delete = 3; +} \ No newline at end of file diff --git a/ui/app/api/mirrors/route.ts b/ui/app/api/mirrors/route.ts index f9ca2f1911..65e5e3fe6c 100644 --- a/ui/app/api/mirrors/route.ts +++ b/ui/app/api/mirrors/route.ts @@ -3,15 +3,6 @@ import prisma from '@/app/utils/prisma'; export const dynamic = 'force-dynamic'; -const stringifyConfig = (flowArray: any[]) => { - flowArray.forEach((flow) => { - if (flow.config_proto) { - flow.config_proto = new TextDecoder().decode(flow.config_proto); - } - }); - return flowArray; -}; - export async function GET(request: Request) { const mirrors = await prisma.flows.findMany({ distinct: 'name', @@ -21,7 +12,8 @@ export async function GET(request: Request) { }, }); - const flows = mirrors?.map((mirror) => { + // using any as type because of the way prisma returns data + const flows = mirrors?.map((mirror: any) => { let newMirror: any = { ...mirror, sourcePeer: getTruePeer(mirror.sourcePeer), @@ -29,5 +21,5 @@ export async function GET(request: Request) { }; return newMirror; }); - return new Response(JSON.stringify(stringifyConfig(flows))); + return new Response(JSON.stringify(flows)); } diff --git a/ui/app/mirrors/create/cdc/cdc.tsx b/ui/app/mirrors/create/cdc/cdc.tsx index 65f2158ee6..63155acdf2 100644 --- a/ui/app/mirrors/create/cdc/cdc.tsx +++ b/ui/app/mirrors/create/cdc/cdc.tsx @@ -66,12 +66,6 @@ export default function CDCConfigForm({ if (mirrorConfig.source != undefined && mirrorConfig.destination != undefined) return ( <> - {normalSettings.map((setting, id) => { return ( paramDisplayCondition(setting) && ( @@ -112,6 +106,13 @@ export default function CDCConfigForm({ /> ); })} + + ); } diff --git a/ui/app/mirrors/create/cdc/columnbox.tsx b/ui/app/mirrors/create/cdc/columnbox.tsx new file mode 100644 index 0000000000..b68560419f --- /dev/null +++ b/ui/app/mirrors/create/cdc/columnbox.tsx @@ -0,0 +1,80 @@ +'use client'; +import { TableMapRow } from '@/app/dto/MirrorsDTO'; +import { Checkbox } from '@/lib/Checkbox'; +import { Label } from '@/lib/Label'; +import { RowWithCheckbox } from '@/lib/Layout'; +import { Dispatch, SetStateAction } from 'react'; + +interface ColumnProps { + columns: string[]; + tableRow: TableMapRow; + rows: TableMapRow[]; + setRows: Dispatch>; +} +export default function ColumnBox({ + columns, + tableRow, + rows, + setRows, +}: ColumnProps) { + const handleColumnExclusion = ( + source: string, + column: string, + include: boolean + ) => { + const currRows = [...rows]; + const rowOfSource = currRows.find((row) => row.source === source); + if (rowOfSource) { + if (include) { + const updatedExclude = rowOfSource.exclude.filter( + (col) => col !== column + ); + rowOfSource.exclude = updatedExclude; + } else { + rowOfSource.exclude.push(column); + } + } + setRows(currRows); + }; + + const columnExclusion = new Set(tableRow.exclude); + return columns.map((column) => { + const [columnName, columnType, isPkeyStr] = column.split(':'); + const isPkey = isPkeyStr === 'true'; + return ( + + {columnName} +

+ {columnType} +

+ + } + action={ + + handleColumnExclusion(tableRow.source, columnName, state) + } + /> + } + /> + ); + }); +} diff --git a/ui/app/mirrors/create/cdc/schemabox.tsx b/ui/app/mirrors/create/cdc/schemabox.tsx index 4195fae83d..d9b15a703c 100644 --- a/ui/app/mirrors/create/cdc/schemabox.tsx +++ b/ui/app/mirrors/create/cdc/schemabox.tsx @@ -7,9 +7,16 @@ import { Label } from '@/lib/Label'; import { RowWithCheckbox } from '@/lib/Layout'; import { SearchField } from '@/lib/SearchField'; import { TextField } from '@/lib/TextField'; -import { Dispatch, SetStateAction, useCallback, useState } from 'react'; +import { + Dispatch, + SetStateAction, + useCallback, + useMemo, + useState, +} from 'react'; import { BarLoader } from 'react-spinners/'; import { fetchColumns, fetchTables } from '../handlers'; +import ColumnBox from './columnbox'; import { expandableStyle, schemaBoxStyle, tableBoxStyle } from './styles'; interface SchemaBoxProps { @@ -36,6 +43,20 @@ const SchemaBox = ({ const [columnsLoading, setColumnsLoading] = useState(false); const [expandedSchemas, setExpandedSchemas] = useState([]); const [tableQuery, setTableQuery] = useState(''); + const [schemaLoadedSet, setSchemaLoadedSet] = useState>( + new Set() + ); + + const [handlingAll, setHandlingAll] = useState(false); + + const searchedTables = useMemo(() => { + const tableQueryLower = tableQuery.toLowerCase(); + return rows.filter( + (row) => + row.schema === schema && + row.source.toLowerCase().includes(tableQueryLower) + ); + }, [schema, rows, tableQuery]); const schemaIsExpanded = useCallback( (schema: string) => { @@ -74,11 +95,13 @@ const SchemaBox = ({ const addTableColumns = (table: string) => { const schemaName = table.split('.')[0]; const tableName = table.split('.')[1]; + fetchColumns(sourcePeer, schemaName, tableName, setColumnsLoading).then( - (res) => + (res) => { setTableColumns((prev) => { return [...prev, { tableName: table, columns: res }]; - }) + }); + } ); }; @@ -93,47 +116,34 @@ const SchemaBox = ({ ?.columns; }; - const handleColumnExclusion = ( - source: string, - column: string, - include: boolean - ) => { - const currRows = [...rows]; - const rowOfSource = currRows.find((row) => row.source === source); - if (rowOfSource) { - if (include) { - const updatedExclude = rowOfSource.exclude.filter( - (col) => col !== column - ); - rowOfSource.exclude = updatedExclude; - } else { - rowOfSource.exclude.push(column); - } - } - setRows(currRows); - }; - const handleSelectAll = ( - e: React.MouseEvent + e: React.MouseEvent, + schemaName: string ) => { + setHandlingAll(true); const newRows = [...rows]; for (const row of newRows) { - row.selected = e.currentTarget.checked; - if (e.currentTarget.checked) addTableColumns(row.source); - else removeTableColumns(row.source); + if (row.schema === schemaName) { + row.selected = e.currentTarget.checked; + if (e.currentTarget.checked) addTableColumns(row.source); + else removeTableColumns(row.source); + } } setRows(newRows); + setHandlingAll(false); }; const handleSchemaClick = (schemaName: string) => { if (!schemaIsExpanded(schemaName)) { - setTablesLoading(true); setExpandedSchemas((curr) => [...curr, schemaName]); - fetchTables(sourcePeer, schemaName, peerType).then((tableRows) => { - const newRows = [...rows, ...tableRows]; - setRows(newRows); - setTablesLoading(false); - }); + if (!schemaLoadedSet.has(schemaName)) { + setTablesLoading(true); + setSchemaLoadedSet((loaded) => new Set(loaded).add(schemaName)); + fetchTables(sourcePeer, schemaName, peerType).then((tableRows) => { + setRows((value) => [...value, ...tableRows]); + setTablesLoading(false); + }); + } } else { setExpandedSchemas((curr) => curr.filter((expandedSchema) => expandedSchema != schemaName) @@ -158,7 +168,10 @@ const SchemaBox = ({
- handleSelectAll(e)} /> + handleSelectAll(e, schema)} + /> @@ -173,139 +186,96 @@ const SchemaBox = ({ />
- {schemaIsExpanded(schema) && ( + {/* TABLE BOX */} + {handlingAll && } + {!handlingAll && schemaIsExpanded(schema) && (
- {rows.filter((row) => row.schema === schema).length ? ( - rows - .filter( - (row) => - row.schema === schema && - row.source.toLowerCase().includes(tableQuery.toLowerCase()) - ) - .map((row) => { - const columns = getTableColumns(row.source); - return ( -
+ {searchedTables.length ? ( + searchedTables.map((row) => { + const columns = getTableColumns(row.source); + return ( +
+
+ + {row.source} + + } + action={ + + handleTableSelect(state, row.source) + } + /> + } + /> +
- - {row.source} - - } - action={ - - handleTableSelect(state, row.source) - } - /> +

Target Table:

+ ) => + updateDestination(row.source, e.target.value) } /> +
+
-
+ + {columns ? ( + -
-
- {row.selected && ( -
+ ) : columnsLoading ? ( + + ) : ( - {columns ? ( - columns.map((column) => { - const columnName = column.split(':')[0]; - const columnType = column.split(':')[1]; - const isPkey = column.split(':')[2] === 'true'; - return ( - - {columnName} -

- {columnType} -

- - } - action={ - col == columnName - ) - } - onCheckedChange={(state: boolean) => - handleColumnExclusion( - row.source, - columnName, - state - ) - } - /> - } - /> - ); - }) - ) : columnsLoading ? ( - - ) : ( - - )} -
- )} -
- ); - }) + )} +
+ )} + + ); + }) ) : tablesLoading ? ( ) : ( diff --git a/ui/app/mirrors/create/cdc/tablemapping.tsx b/ui/app/mirrors/create/cdc/tablemapping.tsx index 85c889cb4b..2cccea321f 100644 --- a/ui/app/mirrors/create/cdc/tablemapping.tsx +++ b/ui/app/mirrors/create/cdc/tablemapping.tsx @@ -55,7 +55,7 @@ const TableMapping = ({ /> -
+
{allSchemas ? ( allSchemas ?.filter((schema) => { diff --git a/ui/app/mirrors/page.tsx b/ui/app/mirrors/page.tsx index 7cfe894294..c98b402857 100644 --- a/ui/app/mirrors/page.tsx +++ b/ui/app/mirrors/page.tsx @@ -31,7 +31,7 @@ export default function Mirrors() { let qrepFlows = flows?.filter((flow) => { if (flow.config_proto && flow.query_string) { - let config = QRepConfig.decode(flow.config_proto); + let config = QRepConfig.decode(flow.config_proto.data); const watermarkCol = config.watermarkColumn.toLowerCase(); return watermarkCol !== 'xmin' && watermarkCol !== 'ctid'; } @@ -40,7 +40,7 @@ export default function Mirrors() { let xminFlows = flows?.filter((flow) => { if (flow.config_proto && flow.query_string) { - let config = QRepConfig.decode(flow.config_proto); + let config = QRepConfig.decode(flow.config_proto.data); return config.watermarkColumn.toLowerCase() === 'xmin'; } return false; diff --git a/ui/grpc_generated/flow.ts b/ui/grpc_generated/flow.ts index 845b4d627f..094d97765a 100644 --- a/ui/grpc_generated/flow.ts +++ b/ui/grpc_generated/flow.ts @@ -430,6 +430,7 @@ export interface QRepConfig { * to be used after the old mirror is dropped */ dstTableFullResync: boolean; + syncedAtColName: string; } export interface QRepPartition { @@ -474,6 +475,12 @@ export interface QRepFlowState { disableWaitForNewRows: boolean; } +export interface PeerDBColumns { + softDeleteColName: string; + syncedAtColName: string; + softDelete: boolean; +} + function createBaseTableNameMapping(): TableNameMapping { return { sourceTableName: "", destinationTableName: "" }; } @@ -5301,6 +5308,7 @@ function createBaseQRepConfig(): QRepConfig { numRowsPerPartition: 0, setupWatermarkTableOnDestination: false, dstTableFullResync: false, + syncedAtColName: "", }; } @@ -5360,6 +5368,9 @@ export const QRepConfig = { if (message.dstTableFullResync === true) { writer.uint32(144).bool(message.dstTableFullResync); } + if (message.syncedAtColName !== "") { + writer.uint32(154).string(message.syncedAtColName); + } return writer; }, @@ -5496,6 +5507,13 @@ export const QRepConfig = { message.dstTableFullResync = reader.bool(); continue; + case 19: + if (tag !== 154) { + break; + } + + message.syncedAtColName = reader.string(); + continue; } if ((tag & 7) === 4 || tag === 0) { break; @@ -5529,6 +5547,7 @@ export const QRepConfig = { ? Boolean(object.setupWatermarkTableOnDestination) : false, dstTableFullResync: isSet(object.dstTableFullResync) ? Boolean(object.dstTableFullResync) : false, + syncedAtColName: isSet(object.syncedAtColName) ? String(object.syncedAtColName) : "", }; }, @@ -5588,6 +5607,9 @@ export const QRepConfig = { if (message.dstTableFullResync === true) { obj.dstTableFullResync = message.dstTableFullResync; } + if (message.syncedAtColName !== "") { + obj.syncedAtColName = message.syncedAtColName; + } return obj; }, @@ -5620,6 +5642,7 @@ export const QRepConfig = { message.numRowsPerPartition = object.numRowsPerPartition ?? 0; message.setupWatermarkTableOnDestination = object.setupWatermarkTableOnDestination ?? false; message.dstTableFullResync = object.dstTableFullResync ?? false; + message.syncedAtColName = object.syncedAtColName ?? ""; return message; }, }; @@ -6257,6 +6280,95 @@ export const QRepFlowState = { }, }; +function createBasePeerDBColumns(): PeerDBColumns { + return { softDeleteColName: "", syncedAtColName: "", softDelete: false }; +} + +export const PeerDBColumns = { + encode(message: PeerDBColumns, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.softDeleteColName !== "") { + writer.uint32(10).string(message.softDeleteColName); + } + if (message.syncedAtColName !== "") { + writer.uint32(18).string(message.syncedAtColName); + } + if (message.softDelete === true) { + writer.uint32(24).bool(message.softDelete); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): PeerDBColumns { + const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBasePeerDBColumns(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.softDeleteColName = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.syncedAtColName = reader.string(); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.softDelete = reader.bool(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): PeerDBColumns { + return { + softDeleteColName: isSet(object.softDeleteColName) ? String(object.softDeleteColName) : "", + syncedAtColName: isSet(object.syncedAtColName) ? String(object.syncedAtColName) : "", + softDelete: isSet(object.softDelete) ? Boolean(object.softDelete) : false, + }; + }, + + toJSON(message: PeerDBColumns): unknown { + const obj: any = {}; + if (message.softDeleteColName !== "") { + obj.softDeleteColName = message.softDeleteColName; + } + if (message.syncedAtColName !== "") { + obj.syncedAtColName = message.syncedAtColName; + } + if (message.softDelete === true) { + obj.softDelete = message.softDelete; + } + return obj; + }, + + create, I>>(base?: I): PeerDBColumns { + return PeerDBColumns.fromPartial(base ?? ({} as any)); + }, + fromPartial, I>>(object: I): PeerDBColumns { + const message = createBasePeerDBColumns(); + message.softDeleteColName = object.softDeleteColName ?? ""; + message.syncedAtColName = object.syncedAtColName ?? ""; + message.softDelete = object.softDelete ?? false; + return message; + }, +}; + declare const self: any | undefined; declare const window: any | undefined; declare const global: any | undefined;