From 62e2e5ce3cde5bfe192798e4e5a1e7b6bcd23cfb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Tue, 21 Nov 2023 22:42:48 +0000 Subject: [PATCH 1/8] peerdb-server.Dockerfile: rust 1.74 (#698) https://blog.rust-lang.org/2023/07/13/Rust-1.71.0.html https://blog.rust-lang.org/2023/08/24/Rust-1.72.0.html https://blog.rust-lang.org/2023/10/05/Rust-1.73.0.html https://blog.rust-lang.org/2023/11/16/Rust-1.74.0.html No vital changes here --- stacks/peerdb-server.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stacks/peerdb-server.Dockerfile b/stacks/peerdb-server.Dockerfile index 934af979f1..b63006d1ca 100644 --- a/stacks/peerdb-server.Dockerfile +++ b/stacks/peerdb-server.Dockerfile @@ -1,6 +1,6 @@ # syntax=docker/dockerfile:1 -FROM lukemathwalker/cargo-chef:latest-rust-1.70-slim-bookworm as chef +FROM lukemathwalker/cargo-chef:latest-rust-1.74-slim-bookworm as chef WORKDIR /root FROM chef as planner From c2117e26a9b157422d79d27ecb3f81da1a671ea4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 22 Nov 2023 18:46:31 +0000 Subject: [PATCH 2/8] temporalio: update images (#701) Also clean up peerdb-ui.Dockerfile a bit --- dev-peerdb.sh | 2 +- docker-compose-dev.yml | 6 +++--- docker-compose.yml | 6 +++--- stacks/peerdb-ui.Dockerfile | 8 ++------ 4 files changed, 9 insertions(+), 13 deletions(-) diff --git a/dev-peerdb.sh b/dev-peerdb.sh index 09e93defe5..d8b0e15137 100755 --- a/dev-peerdb.sh +++ b/dev-peerdb.sh @@ -7,5 +7,5 @@ then exit 1 fi -docker compose -f docker-compose-dev.yml up --build\ +docker compose -f docker-compose-dev.yml up --build \ --no-attach temporal --no-attach pyroscope --no-attach temporal-ui diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index 3c955bef88..63cd979ed5 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -64,7 +64,7 @@ services: - POSTGRES_PWD=postgres - POSTGRES_SEEDS=catalog - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development-sql.yaml - image: temporalio/auto-setup:1.22.1 + image: temporalio/auto-setup:1.22 ports: - 7233:7233 volumes: @@ -85,7 +85,7 @@ services: environment: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CLI_ADDRESS=temporal:7233 - image: temporalio/admin-tools:1.22.1 + image: temporalio/admin-tools:1.22 stdin_open: true tty: true healthcheck: @@ -102,7 +102,7 @@ services: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CORS_ORIGINS=http://localhost:3000 - TEMPORAL_CSRF_COOKIE_INSECURE=true - image: temporalio/ui:2.17.2 + image: temporalio/ui:2.21.3 ports: - 8085:8080 diff --git a/docker-compose.yml b/docker-compose.yml index 0686913a8e..2a087ded38 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -57,7 +57,7 @@ services: - POSTGRES_PWD=postgres - POSTGRES_SEEDS=catalog - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development-sql.yaml - image: temporalio/auto-setup:1.22.1 + image: temporalio/auto-setup:1.22 ports: - 7233:7233 volumes: @@ -72,7 +72,7 @@ services: environment: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CLI_ADDRESS=temporal:7233 - image: temporalio/admin-tools:1.22.1 + image: temporalio/admin-tools:1.22 stdin_open: true tty: true healthcheck: @@ -89,7 +89,7 @@ services: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CORS_ORIGINS=http://localhost:3000 - TEMPORAL_CSRF_COOKIE_INSECURE=true - image: temporalio/ui:2.17.2 + image: temporalio/ui:2.21.3 ports: - 8085:8080 diff --git a/stacks/peerdb-ui.Dockerfile b/stacks/peerdb-ui.Dockerfile index e3aaf18a96..8630beb1dd 100644 --- a/stacks/peerdb-ui.Dockerfile +++ b/stacks/peerdb-ui.Dockerfile @@ -7,6 +7,7 @@ RUN apt-get update && \ apt-get install -y openssl && \ mkdir /app && \ chown -R node:node /app +ENV NEXT_TELEMETRY_DISABLED 1 USER node WORKDIR /app @@ -16,16 +17,11 @@ COPY --chown=node:node ui/package.json ui/package-lock.json . RUN npm ci COPY --chown=node:node ui/ . -# Prisma -RUN npx prisma generate - -ENV NEXT_TELEMETRY_DISABLED 1 -RUN npm run build +RUN npx prisma generate && npm run build # Builder stage FROM base AS runner ENV NODE_ENV production -ENV NEXT_TELEMETRY_DISABLED 1 COPY --from=builder /app/public ./public From 2de985b81c763feee3c518f061ec5061699eee6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 22 Nov 2023 19:13:22 +0000 Subject: [PATCH 3/8] nexus cleanup (#702) 1. fix typo: QueryAssocation to QueryAssociation 2. remove unnecessary allocations for two-item keys_to_ignore 3. make NexusQueryParser get_peers_bridge/parse_simple_sql async; all callers are async 4. use with_capacity whenever straightforward to do so 5. change peer-snowflake SQLStatement parameters to struct since it serializes to a constant object --- nexus/analyzer/src/lib.rs | 15 ++++------- nexus/catalog/src/lib.rs | 2 +- nexus/parser/src/lib.rs | 20 ++++++-------- nexus/peer-bigquery/src/stream.rs | 2 +- nexus/peer-snowflake/src/lib.rs | 43 +++++++++++++------------------ nexus/server/src/cursor.rs | 4 +-- nexus/server/src/main.rs | 16 ++++++------ nexus/value/src/lib.rs | 4 +-- 8 files changed, 45 insertions(+), 61 deletions(-) diff --git a/nexus/analyzer/src/lib.rs b/nexus/analyzer/src/lib.rs index 0824abe2f2..2e64012a35 100644 --- a/nexus/analyzer/src/lib.rs +++ b/nexus/analyzer/src/lib.rs @@ -40,13 +40,13 @@ impl<'a> PeerExistanceAnalyzer<'a> { } #[derive(Debug, Clone)] -pub enum QueryAssocation { +pub enum QueryAssociation { Peer(Box), Catalog, } impl<'a> StatementAnalyzer for PeerExistanceAnalyzer<'a> { - type Output = QueryAssocation; + type Output = QueryAssociation; fn analyze(&self, statement: &Statement) -> anyhow::Result { let mut peers_touched: HashSet = HashSet::new(); @@ -78,9 +78,9 @@ impl<'a> StatementAnalyzer for PeerExistanceAnalyzer<'a> { anyhow::bail!("queries touching multiple peers are not supported") } else if let Some(peer_name) = peers_touched.iter().next() { let peer = self.peers.get(peer_name).unwrap(); - Ok(QueryAssocation::Peer(Box::new(peer.clone()))) + Ok(QueryAssociation::Peer(Box::new(peer.clone()))) } else { - Ok(QueryAssocation::Catalog) + Ok(QueryAssociation::Catalog) } } } @@ -785,14 +785,9 @@ fn parse_db_options( }) .unwrap_or_default(); - let keys_to_ignore: HashSet = vec!["metadata_db", "unnest_columns"] - .into_iter() - .map(|s| s.to_string()) - .collect(); - let mut eventhubs: HashMap = HashMap::new(); for (key, _) in opts { - if keys_to_ignore.contains(key) { + if matches!(key, "metadata_db" | "unnest_columns") { continue; } diff --git a/nexus/catalog/src/lib.rs b/nexus/catalog/src/lib.rs index 15fccf2233..c1851432e0 100644 --- a/nexus/catalog/src/lib.rs +++ b/nexus/catalog/src/lib.rs @@ -211,7 +211,7 @@ impl Catalog { let rows = self.pg.query(&stmt, &[]).await?; - let mut peers = HashMap::new(); + let mut peers = HashMap::with_capacity(rows.len()); for row in rows { let name: &str = row.get(1); diff --git a/nexus/parser/src/lib.rs b/nexus/parser/src/lib.rs index f5b2aac340..02c8cb5a27 100644 --- a/nexus/parser/src/lib.rs +++ b/nexus/parser/src/lib.rs @@ -2,7 +2,7 @@ use std::{collections::HashMap, sync::Arc}; use analyzer::{ CursorEvent, PeerCursorAnalyzer, PeerDDL, PeerDDLAnalyzer, PeerExistanceAnalyzer, - QueryAssocation, StatementAnalyzer, + QueryAssociation, StatementAnalyzer, }; use async_trait::async_trait; use catalog::Catalog; @@ -27,7 +27,7 @@ pub enum NexusStatement { }, PeerQuery { stmt: Statement, - assoc: QueryAssocation, + assoc: QueryAssociation, }, PeerCursor { stmt: Statement, @@ -96,13 +96,9 @@ impl NexusQueryParser { Self { catalog } } - pub fn get_peers_bridge(&self) -> PgWireResult> { - let peers = tokio::task::block_in_place(move || { - tokio::runtime::Handle::current().block_on(async move { - let catalog = self.catalog.lock().await; - catalog.get_peers().await - }) - }); + pub async fn get_peers_bridge(&self) -> PgWireResult> { + let catalog = self.catalog.lock().await; + let peers = catalog.get_peers().await; peers.map_err(|e| { PgWireError::UserError(Box::new(ErrorInfo::new( @@ -113,7 +109,7 @@ impl NexusQueryParser { }) } - pub fn parse_simple_sql(&self, sql: &str) -> PgWireResult { + pub async fn parse_simple_sql(&self, sql: &str) -> PgWireResult { let mut stmts = Parser::parse_sql(&DIALECT, sql).map_err(|e| PgWireError::ApiError(Box::new(e)))?; if stmts.len() > 1 { @@ -131,7 +127,7 @@ impl NexusQueryParser { }) } else { let stmt = stmts.remove(0); - let peers = self.get_peers_bridge()?; + let peers = self.get_peers_bridge().await?; let nexus_stmt = NexusStatement::new(peers, &stmt)?; Ok(NexusParsedStatement { statement: nexus_stmt, @@ -162,7 +158,7 @@ impl QueryParser for NexusQueryParser { }) } else { let stmt = stmts.remove(0); - let peers = self.get_peers_bridge()?; + let peers = self.get_peers_bridge().await?; let nexus_stmt = NexusStatement::new(peers, &stmt)?; Ok(NexusParsedStatement { statement: nexus_stmt, diff --git a/nexus/peer-bigquery/src/stream.rs b/nexus/peer-bigquery/src/stream.rs index d4acce7fd0..02851ae2cd 100644 --- a/nexus/peer-bigquery/src/stream.rs +++ b/nexus/peer-bigquery/src/stream.rs @@ -101,7 +101,7 @@ impl BqRecordStream { } pub fn convert_result_set_item(&self, result_set: &ResultSet) -> anyhow::Result { - let mut values = Vec::new(); + let mut values = Vec::with_capacity(self.schema.fields.len()); for field in &self.schema.fields { let field_type = &field.r#type; let field_name = &field.name; diff --git a/nexus/peer-snowflake/src/lib.rs b/nexus/peer-snowflake/src/lib.rs index 7bb5b18790..a4eeeacb91 100644 --- a/nexus/peer-snowflake/src/lib.rs +++ b/nexus/peer-snowflake/src/lib.rs @@ -7,7 +7,7 @@ use pgwire::error::{ErrorInfo, PgWireError, PgWireResult}; use sqlparser::dialect::GenericDialect; use sqlparser::parser; use std::cmp::min; -use std::{collections::HashMap, time::Duration}; +use std::time::Duration; use stream::SnowflakeDataType; use auth::SnowflakeAuth; @@ -36,6 +36,15 @@ const TIME_OUTPUT_FORMAT: &str = "HH:MI:SS.FF"; const TIMESTAMP_OUTPUT_FORMAT: &str = "YYYY-MM-DDTHH24:MI:SS.FF"; const TIMESTAMP_TZ_OUTPUT_FORMAT: &str = "YYYY-MM-DDTHH24:MI:SS.FFTZHTZM"; +#[derive(Debug, Serialize)] +struct SQLStatementParameters<'a> { + pub date_output_format: &'a str, + pub time_output_format: &'a str, + pub timestamp_ltz_output_format: &'a str, + pub timestamp_ntz_output_format: &'a str, + pub timestamp_tz_output_format: &'a str, +} + #[derive(Debug, Serialize)] struct SQLStatement<'a> { statement: &'a str, @@ -43,7 +52,7 @@ struct SQLStatement<'a> { database: &'a str, warehouse: &'a str, role: &'a str, - parameters: HashMap, + parameters: SQLStatementParameters<'a>, } #[allow(non_snake_case)] @@ -147,28 +156,6 @@ impl SnowflakeQueryExecutor { #[async_recursion] #[tracing::instrument(name = "peer_sflake::process_query", skip_all)] async fn process_query(&self, query_str: &str) -> anyhow::Result { - let mut parameters = HashMap::new(); - parameters.insert( - "date_output_format".to_string(), - DATE_OUTPUT_FORMAT.to_string(), - ); - parameters.insert( - "time_output_format".to_string(), - TIME_OUTPUT_FORMAT.to_string(), - ); - parameters.insert( - "timestamp_ltz_output_format".to_string(), - TIMESTAMP_TZ_OUTPUT_FORMAT.to_string(), - ); - parameters.insert( - "timestamp_ntz_output_format".to_string(), - TIMESTAMP_OUTPUT_FORMAT.to_string(), - ); - parameters.insert( - "timestamp_tz_output_format".to_string(), - TIMESTAMP_TZ_OUTPUT_FORMAT.to_string(), - ); - let mut auth = self.auth.clone(); let jwt = auth.get_jwt()?; let secret = jwt.expose_secret().clone(); @@ -186,7 +173,13 @@ impl SnowflakeQueryExecutor { database: &self.config.database, warehouse: &self.config.warehouse, role: &self.config.role, - parameters, + parameters: SQLStatementParameters { + date_output_format: DATE_OUTPUT_FORMAT, + time_output_format: TIME_OUTPUT_FORMAT, + timestamp_ltz_output_format: TIMESTAMP_TZ_OUTPUT_FORMAT, + timestamp_ntz_output_format: TIMESTAMP_OUTPUT_FORMAT, + timestamp_tz_output_format: TIMESTAMP_TZ_OUTPUT_FORMAT, + }, }) .send() .await diff --git a/nexus/server/src/cursor.rs b/nexus/server/src/cursor.rs index 36fee27c3c..58d0e6a0c0 100644 --- a/nexus/server/src/cursor.rs +++ b/nexus/server/src/cursor.rs @@ -20,8 +20,8 @@ impl PeerCursors { self.cursors.insert(name, peer); } - pub fn remove_cursor(&mut self, name: String) { - self.cursors.remove(&name); + pub fn remove_cursor(&mut self, name: &str) { + self.cursors.remove(name); } pub fn get_peer(&self, name: &str) -> Option<&Peer> { diff --git a/nexus/server/src/main.rs b/nexus/server/src/main.rs index 1e38b2e979..c218f9f0c7 100644 --- a/nexus/server/src/main.rs +++ b/nexus/server/src/main.rs @@ -4,7 +4,7 @@ use std::{ time::Duration, }; -use analyzer::{PeerDDL, QueryAssocation}; +use analyzer::{PeerDDL, QueryAssociation}; use async_trait::async_trait; use bytes::{BufMut, BytesMut}; use catalog::{Catalog, CatalogConfig, WorkflowDetails}; @@ -141,7 +141,7 @@ impl NexusBackend { } peer_cursor::CursorModification::Closed(cursors) => { for cursor_name in cursors { - peer_cursors.remove_cursor(cursor_name); + peer_cursors.remove_cursor(&cursor_name); } Ok(vec![Response::Execution(Tag::new_for_execution( "CLOSE CURSOR", @@ -826,7 +826,7 @@ impl NexusBackend { NexusStatement::PeerQuery { stmt, assoc } => { // get the query executor let executor = match assoc { - QueryAssocation::Peer(peer) => { + QueryAssociation::Peer(peer) => { tracing::info!("handling peer[{}] query: {}", peer.name, stmt); peer_holder = Some(peer.clone()); self.get_peer_executor(&peer).await.map_err(|err| { @@ -835,7 +835,7 @@ impl NexusBackend { })) })? } - QueryAssocation::Catalog => { + QueryAssociation::Catalog => { tracing::info!("handling catalog query: {}", stmt); let catalog = self.catalog.lock().await; catalog.get_executor() @@ -961,7 +961,7 @@ impl SimpleQueryHandler for NexusBackend { where C: ClientInfo + Unpin + Send + Sync, { - let parsed = self.query_parser.parse_simple_sql(sql)?; + let parsed = self.query_parser.parse_simple_sql(sql).await?; let nexus_stmt = parsed.statement; self.handle_query(nexus_stmt).await } @@ -1039,7 +1039,7 @@ impl ExtendedQueryHandler for NexusBackend { sql = sql.replace(&format!("${}", i + 1), ¶meter_to_string(portal, i)?); } - let parsed = self.query_parser.parse_simple_sql(&sql)?; + let parsed = self.query_parser.parse_simple_sql(&sql).await?; let nexus_stmt = parsed.statement; let result = self.handle_query(nexus_stmt).await?; if result.is_empty() { @@ -1077,7 +1077,7 @@ impl ExtendedQueryHandler for NexusBackend { NexusStatement::Empty => Ok(DescribeResponse::no_data()), NexusStatement::PeerQuery { stmt, assoc } => { let schema: Option = match assoc { - QueryAssocation::Peer(peer) => { + QueryAssociation::Peer(peer) => { // if the peer is of type bigquery, let us route the query to bq. match &peer.config { Some(Config::BigqueryConfig(_)) => { @@ -1124,7 +1124,7 @@ impl ExtendedQueryHandler for NexusBackend { } } } - QueryAssocation::Catalog => { + QueryAssociation::Catalog => { let catalog = self.catalog.lock().await; let executor = catalog.get_executor(); executor.describe(stmt).await? diff --git a/nexus/value/src/lib.rs b/nexus/value/src/lib.rs index f6dbe0687b..3e6e0a1cbd 100644 --- a/nexus/value/src/lib.rs +++ b/nexus/value/src/lib.rs @@ -203,7 +203,7 @@ impl Value { } } serde_json::Value::Object(map) => { - let mut hstore = HashMap::new(); + let mut hstore = HashMap::with_capacity(map.len()); for (key, value) in map { hstore.insert(key.clone(), value.to_string()); } @@ -253,7 +253,7 @@ impl Value { Value::Uuid(u) => serde_json::Value::String(u.to_string()), Value::Enum(s) => serde_json::Value::String(s.clone()), Value::Hstore(map) => { - let mut object = serde_json::Map::new(); + let mut object = serde_json::Map::with_capacity(map.len()); for (key, value) in map { object.insert(key.clone(), serde_json::Value::String(value.clone())); } From 7bf1f187e866609f4cb46b5b2a257cd7cee999a1 Mon Sep 17 00:00:00 2001 From: Amogh Bharadwaj Date: Thu, 23 Nov 2023 19:31:45 +0530 Subject: [PATCH 4/8] New Graph UI (#684) A proposal from my end for a more polished UI for our Sync History. Screenshot 2023-11-19 at 10 07 37 AM - Hovering on the bars reveals a proper tooltip box with rows synced and at what time Screenshot 2023-11-19 at 10 31 57 AM --- .../[mirrorId]/aggregatedCountsByInterval.ts | 6 ++- ui/app/mirrors/edit/[mirrorId]/cdcGraph.tsx | 47 ++++--------------- 2 files changed, 15 insertions(+), 38 deletions(-) diff --git a/ui/app/mirrors/edit/[mirrorId]/aggregatedCountsByInterval.ts b/ui/app/mirrors/edit/[mirrorId]/aggregatedCountsByInterval.ts index b2fdf7b1fb..b9b3216625 100644 --- a/ui/app/mirrors/edit/[mirrorId]/aggregatedCountsByInterval.ts +++ b/ui/app/mirrors/edit/[mirrorId]/aggregatedCountsByInterval.ts @@ -36,7 +36,7 @@ function aggregateCountsByInterval( // Iterate through the timestamps and populate the aggregatedCounts object for (let { timestamp, count } of timestamps) { - const date = roundUpToNearestNMinutes(timestamp, 15); + const date = roundUpToNearestNMinutes(timestamp, 1); const formattedTimestamp = moment(date).format(timeUnit); if (!aggregatedCounts[formattedTimestamp]) { @@ -64,6 +64,10 @@ function aggregateCountsByInterval( currentTimestamp.setHours(currentTimestamp.getHours() - 1); } else if (interval === '15min') { currentTimestamp.setMinutes(currentTimestamp.getMinutes() - 15); + } else if (interval === '1min') { + currentTimestamp.setMinutes(currentTimestamp.getMinutes() - 1); + } else if (interval === '5min') { + currentTimestamp.setMinutes(currentTimestamp.getMinutes() - 5); } else if (interval === 'month') { currentTimestamp.setMonth(currentTimestamp.getMonth() - 1); } else if (interval === 'day') { diff --git a/ui/app/mirrors/edit/[mirrorId]/cdcGraph.tsx b/ui/app/mirrors/edit/[mirrorId]/cdcGraph.tsx index cdc6d91a37..951baeaa3c 100644 --- a/ui/app/mirrors/edit/[mirrorId]/cdcGraph.tsx +++ b/ui/app/mirrors/edit/[mirrorId]/cdcGraph.tsx @@ -1,5 +1,6 @@ 'use client'; import { Label } from '@/lib/Label'; +import { BarChart } from '@tremor/react'; import moment from 'moment'; import { useEffect, useState } from 'react'; import ReactSelect from 'react-select'; @@ -51,15 +52,15 @@ function CdcGraph({ syncs }: { syncs: SyncStatusRow[] }) {
-
- {counts.map((count, i) => ( - - ))} -
+ ({ + name: formatGraphLabel(new Date(count[0]), aggregateType), + 'Rows synced at a point in time': count[1], + }))} + index='name' + categories={['Rows synced at a point in time']} + /> ); } @@ -80,32 +81,4 @@ function formatGraphLabel(date: Date, aggregateType: String): string { } } -type GraphBarProps = { - count: number; - label: string; -}; - -function GraphBar({ label, count }: GraphBarProps) { - let color = - count && count > 0 ? 'bg-positive-fill-normal' : 'bg-base-border-subtle'; - let classNames = `relative w-10 h-24 rounded ${color}`; - return ( -
-
-
-
{label}
-
{numberWithCommas(count)}
-
-
-
- ); -} - -function numberWithCommas(x: number): string { - return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ','); -} - export default CdcGraph; From 08e72c1796e5994919ffd37d8285c7fe891a78d5 Mon Sep 17 00:00:00 2001 From: Kevin K Biju <52661649+heavycrystal@users.noreply.github.com> Date: Thu, 23 Nov 2023 20:34:45 +0530 Subject: [PATCH 5/8] test fixing with a side of BigQuery features (#673) - Tests have now been unparallelized, will parallelize later through a different mechanism. - BigQuery supports local staging, using it for all tests now for better cleanup. - Added option to compress via Snappy and Deflate along with some cleanup, doesn't work for BQ yet --- .github/workflows/flow.yml | 2 +- flow/connectors/bigquery/bigquery.go | 9 +- flow/connectors/bigquery/qrep_avro_sync.go | 162 ++++--- flow/connectors/s3/qrep.go | 3 +- .../snowflake/avro_file_writer_test.go | 66 ++- flow/connectors/snowflake/qrep_avro_sync.go | 8 +- flow/connectors/utils/avro/avro_writer.go | 63 +-- flow/e2e/bigquery/peer_flow_bq_test.go | 389 +++++------------ flow/e2e/bigquery/qrep_flow_bq_test.go | 46 +- flow/e2e/congen.go | 1 - flow/e2e/snowflake/peer_flow_sf_test.go | 411 +++++------------- flow/e2e/test_utils.go | 4 +- flow/generated/protos/flow.pb.go | 58 ++- flow/go.sum | 2 + flow/model/qrecord_batch.go | 17 +- flow/workflows/qrep_flow.go | 25 +- nexus/pt/src/peerdb_flow.rs | 6 +- nexus/pt/src/peerdb_flow.serde.rs | 18 + protos/flow.proto | 5 +- ui/grpc_generated/flow.ts | 22 +- 20 files changed, 520 insertions(+), 797 deletions(-) diff --git a/.github/workflows/flow.yml b/.github/workflows/flow.yml index e5df71cbf6..3085d14df4 100644 --- a/.github/workflows/flow.yml +++ b/.github/workflows/flow.yml @@ -120,4 +120,4 @@ jobs: PEERDB_CATALOG_USER: postgres PEERDB_CATALOG_PASSWORD: postgres PEERDB_CATALOG_DATABASE: postgres - PEERDB_CDC_IDLE_TIMEOUT_SECONDS: 3 + PEERDB_CDC_IDLE_TIMEOUT_SECONDS: 10 diff --git a/flow/connectors/bigquery/bigquery.go b/flow/connectors/bigquery/bigquery.go index 776b6cc828..c216ac5233 100644 --- a/flow/connectors/bigquery/bigquery.go +++ b/flow/connectors/bigquery/bigquery.go @@ -1242,20 +1242,13 @@ func (c *BigQueryConnector) grabJobsUpdateLock() (func() error, error) { // grab an advisory lock based on the mirror jobs table hash mjTbl := fmt.Sprintf("%s.%s", c.datasetID, MirrorJobsTable) - _, err = tx.Exec(c.ctx, "SELECT pg_advisory_lock(hashtext($1))", mjTbl) - + _, err = tx.Exec(c.ctx, "SELECT pg_advisory_xact_lock(hashtext($1))", mjTbl) if err != nil { err = tx.Rollback(c.ctx) return nil, fmt.Errorf("failed to grab lock on %s: %w", mjTbl, err) } return func() error { - // release the lock - _, err := tx.Exec(c.ctx, "SELECT pg_advisory_unlock(hashtext($1))", mjTbl) - if err != nil { - return fmt.Errorf("failed to release lock on %s: %w", mjTbl, err) - } - err = tx.Commit(c.ctx) if err != nil { return fmt.Errorf("failed to commit transaction: %w", err) diff --git a/flow/connectors/bigquery/qrep_avro_sync.go b/flow/connectors/bigquery/qrep_avro_sync.go index 8cb8af79ce..97c043ac04 100644 --- a/flow/connectors/bigquery/qrep_avro_sync.go +++ b/flow/connectors/bigquery/qrep_avro_sync.go @@ -1,19 +1,18 @@ package connbigquery import ( - "bytes" - "context" "encoding/json" "fmt" + "os" "strings" "time" "cloud.google.com/go/bigquery" "github.com/PeerDB-io/peer-flow/connectors/utils" + avro "github.com/PeerDB-io/peer-flow/connectors/utils/avro" "github.com/PeerDB-io/peer-flow/generated/protos" "github.com/PeerDB-io/peer-flow/model" "github.com/PeerDB-io/peer-flow/model/qvalue" - "github.com/linkedin/goavro/v2" log "github.com/sirupsen/logrus" "go.temporal.io/sdk/activity" ) @@ -44,13 +43,13 @@ func (s *QRepAvroSyncMethod) SyncRecords( flowJobName, dstTableName, syncBatchID), ) // You will need to define your Avro schema as a string - avroSchema, nullable, err := DefineAvroSchema(dstTableName, dstTableMetadata) + avroSchema, err := DefineAvroSchema(dstTableName, dstTableMetadata) if err != nil { return 0, fmt.Errorf("failed to define Avro schema: %w", err) } stagingTable := fmt.Sprintf("%s_%s_staging", dstTableName, fmt.Sprint(syncBatchID)) - numRecords, err := s.writeToStage(fmt.Sprint(syncBatchID), dstTableName, avroSchema, stagingTable, stream, nullable) + numRecords, err := s.writeToStage(fmt.Sprint(syncBatchID), dstTableName, avroSchema, stagingTable, stream) if err != nil { return -1, fmt.Errorf("failed to push to avro stage: %v", err) } @@ -106,7 +105,7 @@ func (s *QRepAvroSyncMethod) SyncQRepRecords( startTime := time.Now() // You will need to define your Avro schema as a string - avroSchema, nullable, err := DefineAvroSchema(dstTableName, dstTableMetadata) + avroSchema, err := DefineAvroSchema(dstTableName, dstTableMetadata) if err != nil { return 0, fmt.Errorf("failed to define Avro schema: %w", err) } @@ -114,10 +113,12 @@ func (s *QRepAvroSyncMethod) SyncQRepRecords( "flowName": flowJobName, }).Infof("Obtained Avro schema for destination table %s and partition ID %s", dstTableName, partition.PartitionId) - fmt.Printf("Avro schema: %s\n", avroSchema) + log.WithFields(log.Fields{ + "flowName": flowJobName, + }).Infof("Avro schema: %v\n", avroSchema) // create a staging table name with partitionID replace hyphens with underscores stagingTable := fmt.Sprintf("%s_%s_staging", dstTableName, strings.ReplaceAll(partition.PartitionId, "-", "_")) - numRecords, err := s.writeToStage(partition.PartitionId, flowJobName, avroSchema, stagingTable, stream, nullable) + numRecords, err := s.writeToStage(partition.PartitionId, flowJobName, avroSchema, stagingTable, stream) if err != nil { return -1, fmt.Errorf("failed to push to avro stage: %v", err) } @@ -182,14 +183,15 @@ type AvroSchema struct { Fields []AvroField `json:"fields"` } -func DefineAvroSchema(dstTableName string, dstTableMetadata *bigquery.TableMetadata) (string, map[string]bool, error) { +func DefineAvroSchema(dstTableName string, + dstTableMetadata *bigquery.TableMetadata) (*model.QRecordAvroSchemaDefinition, error) { avroFields := []AvroField{} nullableFields := map[string]bool{} for _, bqField := range dstTableMetadata.Schema { avroType, err := GetAvroType(bqField) if err != nil { - return "", nil, err + return nil, err } // If a field is nullable, its Avro type should be ["null", actualType] @@ -212,10 +214,13 @@ func DefineAvroSchema(dstTableName string, dstTableMetadata *bigquery.TableMetad avroSchemaJSON, err := json.Marshal(avroSchema) if err != nil { - return "", nil, fmt.Errorf("failed to marshal Avro schema to JSON: %v", err) + return nil, fmt.Errorf("failed to marshal Avro schema to JSON: %v", err) } - return string(avroSchemaJSON), nullableFields, nil + return &model.QRecordAvroSchemaDefinition{ + Schema: string(avroSchemaJSON), + NullableFields: nullableFields, + }, nil } func GetAvroType(bqField *bigquery.FieldSchema) (interface{}, error) { @@ -306,10 +311,9 @@ func GetAvroType(bqField *bigquery.FieldSchema) (interface{}, error) { func (s *QRepAvroSyncMethod) writeToStage( syncID string, objectFolder string, - avroSchema string, + avroSchema *model.QRecordAvroSchemaDefinition, stagingTable string, stream *model.QRecordStream, - nullable map[string]bool, ) (int, error) { shutdown := utils.HeartbeatRoutine(s.connector.ctx, time.Minute, func() string { @@ -320,95 +324,75 @@ func (s *QRepAvroSyncMethod) writeToStage( defer func() { shutdown <- true }() - ctx := context.Background() - bucket := s.connector.storageClient.Bucket(s.gcsBucket) - gcsObjectName := fmt.Sprintf("%s/%s.avro", objectFolder, syncID) - - obj := bucket.Object(gcsObjectName) - w := obj.NewWriter(ctx) - - // Create OCF Writer - var ocfFileContents bytes.Buffer - ocfWriter, err := goavro.NewOCFWriter(goavro.OCFConfig{ - W: &ocfFileContents, - Schema: avroSchema, - }) - if err != nil { - return 0, fmt.Errorf("failed to create OCF writer: %w", err) - } - schema, err := stream.Schema() - if err != nil { - log.WithFields(log.Fields{ - "partitonOrBatchID": syncID, - }).Errorf("failed to get schema from stream: %v", err) - return 0, fmt.Errorf("failed to get schema from stream: %w", err) - } + var avroFilePath string + numRecords, err := func() (int, error) { + ocfWriter := avro.NewPeerDBOCFWriter(s.connector.ctx, stream, avroSchema, + avro.CompressNone, qvalue.QDWHTypeBigQuery) + if s.gcsBucket != "" { + bucket := s.connector.storageClient.Bucket(s.gcsBucket) + avroFilePath = fmt.Sprintf("%s/%s.avro", objectFolder, syncID) + obj := bucket.Object(avroFilePath) + w := obj.NewWriter(s.connector.ctx) + + numRecords, err := ocfWriter.WriteOCF(w) + if err != nil { + return 0, fmt.Errorf("failed to write records to Avro file on GCS: %w", err) + } + return numRecords, err + } else { + tmpDir, err := os.MkdirTemp("", "peerdb-avro") + if err != nil { + return 0, fmt.Errorf("failed to create temp dir: %w", err) + } - activity.RecordHeartbeat(s.connector.ctx, fmt.Sprintf( - "Obtained staging bucket %s and schema of rows. Now writing records to OCF file.", - gcsObjectName), - ) - numRecords := 0 - // Write each QRecord to the OCF file - for qRecordOrErr := range stream.Records { - if numRecords > 0 && numRecords%10000 == 0 { - activity.RecordHeartbeat(s.connector.ctx, fmt.Sprintf( - "Written %d records to OCF file for staging bucket %s.", - numRecords, gcsObjectName), - ) - } - if qRecordOrErr.Err != nil { + avroFilePath = fmt.Sprintf("%s/%s.avro", tmpDir, syncID) log.WithFields(log.Fields{ "batchOrPartitionID": syncID, - }).Errorf("[bq_avro] failed to get record from stream: %v", qRecordOrErr.Err) - return 0, fmt.Errorf("[bq_avro] failed to get record from stream: %w", qRecordOrErr.Err) - } - - qRecord := qRecordOrErr.Record - avroConverter := model.NewQRecordAvroConverter( - qRecord, - qvalue.QDWHTypeBigQuery, - &nullable, - schema.GetColumnNames(), - ) - avroMap, err := avroConverter.Convert() - if err != nil { - return 0, fmt.Errorf("failed to convert QRecord to Avro compatible map: %w", err) - } - - err = ocfWriter.Append([]interface{}{avroMap}) - if err != nil { - return 0, fmt.Errorf("failed to write record to OCF file: %w", err) + }).Infof("writing records to local file %s", avroFilePath) + numRecords, err := ocfWriter.WriteRecordsToAvroFile(avroFilePath) + if err != nil { + return 0, fmt.Errorf("failed to write records to local Avro file: %w", err) + } + return numRecords, err } - numRecords++ - } - activity.RecordHeartbeat(s.connector.ctx, fmt.Sprintf( - "Writing OCF contents to BigQuery for partition/batch ID %s", - syncID), - ) - // Write OCF contents to GCS - if _, err = w.Write(ocfFileContents.Bytes()); err != nil { - return 0, fmt.Errorf("failed to write OCF file to GCS: %w", err) + }() + if err != nil { + return 0, err } - - if err := w.Close(); err != nil { - return 0, fmt.Errorf("failed to close GCS object writer: %w", err) + if numRecords == 0 { + return 0, nil } + log.WithFields(log.Fields{ + "batchOrPartitionID": syncID, + }).Infof("wrote %d records to file %s", numRecords, avroFilePath) - // write this file to bigquery - gcsRef := bigquery.NewGCSReference(fmt.Sprintf("gs://%s/%s", s.gcsBucket, gcsObjectName)) - gcsRef.SourceFormat = bigquery.Avro bqClient := s.connector.client datasetID := s.connector.datasetID - loader := bqClient.Dataset(datasetID).Table(stagingTable).LoaderFrom(gcsRef) + var avroRef bigquery.LoadSource + if s.gcsBucket != "" { + gcsRef := bigquery.NewGCSReference(fmt.Sprintf("gs://%s/%s", s.gcsBucket, avroFilePath)) + gcsRef.SourceFormat = bigquery.Avro + gcsRef.Compression = bigquery.Deflate + avroRef = gcsRef + } else { + fh, err := os.Open(avroFilePath) + if err != nil { + return 0, fmt.Errorf("failed to read local Avro file: %w", err) + } + localRef := bigquery.NewReaderSource(fh) + localRef.SourceFormat = bigquery.Avro + avroRef = localRef + } + + loader := bqClient.Dataset(datasetID).Table(stagingTable).LoaderFrom(avroRef) loader.UseAvroLogicalTypes = true - job, err := loader.Run(ctx) + job, err := loader.Run(s.connector.ctx) if err != nil { return 0, fmt.Errorf("failed to run BigQuery load job: %w", err) } - status, err := job.Wait(ctx) + status, err := job.Wait(s.connector.ctx) if err != nil { return 0, fmt.Errorf("failed to wait for BigQuery load job: %w", err) } @@ -417,6 +401,6 @@ func (s *QRepAvroSyncMethod) writeToStage( return 0, fmt.Errorf("failed to load Avro file into BigQuery table: %w", err) } log.Printf("Pushed into %s/%s", - gcsObjectName, syncID) + avroFilePath, syncID) return numRecords, nil } diff --git a/flow/connectors/s3/qrep.go b/flow/connectors/s3/qrep.go index b34f9a2cf3..1f1cf881da 100644 --- a/flow/connectors/s3/qrep.go +++ b/flow/connectors/s3/qrep.go @@ -7,6 +7,7 @@ import ( avro "github.com/PeerDB-io/peer-flow/connectors/utils/avro" "github.com/PeerDB-io/peer-flow/generated/protos" "github.com/PeerDB-io/peer-flow/model" + "github.com/PeerDB-io/peer-flow/model/qvalue" log "github.com/sirupsen/logrus" ) @@ -62,7 +63,7 @@ func (c *S3Connector) writeToAvroFile( } s3AvroFileKey := fmt.Sprintf("%s/%s/%s.avro", s3o.Prefix, jobName, partitionID) - writer := avro.NewPeerDBOCFWriter(c.ctx, stream, avroSchema) + writer := avro.NewPeerDBOCFWriter(c.ctx, stream, avroSchema, avro.CompressNone, qvalue.QDWHTypeSnowflake) numRecords, err := writer.WriteRecordsToS3(s3o.Bucket, s3AvroFileKey, c.creds) if err != nil { return 0, fmt.Errorf("failed to write records to S3: %w", err) diff --git a/flow/connectors/snowflake/avro_file_writer_test.go b/flow/connectors/snowflake/avro_file_writer_test.go index 77310c45db..76b70f478f 100644 --- a/flow/connectors/snowflake/avro_file_writer_test.go +++ b/flow/connectors/snowflake/avro_file_writer_test.go @@ -1,6 +1,7 @@ package connsnowflake import ( + "context" "fmt" "math/big" "os" @@ -142,7 +143,64 @@ func TestWriteRecordsToAvroFileHappyPath(t *testing.T) { fmt.Printf("[test] avroSchema: %v\n", avroSchema) // Call function - writer := avro.NewPeerDBOCFWriter(nil, records, avroSchema) + writer := avro.NewPeerDBOCFWriter(context.Background(), + records, avroSchema, avro.CompressNone, qvalue.QDWHTypeSnowflake) + _, err = writer.WriteRecordsToAvroFile(tmpfile.Name()) + require.NoError(t, err, "expected WriteRecordsToAvroFile to complete without errors") + + // Check file is not empty + info, err := tmpfile.Stat() + require.NoError(t, err) + require.NotZero(t, info.Size(), "expected file to not be empty") +} + +func TestWriteRecordsToZstdAvroFileHappyPath(t *testing.T) { + // Create temporary file + tmpfile, err := os.CreateTemp("", "example_*.avro.zst") + require.NoError(t, err) + + defer os.Remove(tmpfile.Name()) // clean up + defer tmpfile.Close() // close file after test ends + + // Define sample data + records, schema := generateRecords(t, true, 10, false) + + avroSchema, err := model.GetAvroSchemaDefinition("not_applicable", schema) + require.NoError(t, err) + + fmt.Printf("[test] avroSchema: %v\n", avroSchema) + + // Call function + writer := avro.NewPeerDBOCFWriter(context.Background(), + records, avroSchema, avro.CompressZstd, qvalue.QDWHTypeSnowflake) + _, err = writer.WriteRecordsToAvroFile(tmpfile.Name()) + require.NoError(t, err, "expected WriteRecordsToAvroFile to complete without errors") + + // Check file is not empty + info, err := tmpfile.Stat() + require.NoError(t, err) + require.NotZero(t, info.Size(), "expected file to not be empty") +} + +func TestWriteRecordsToDeflateAvroFileHappyPath(t *testing.T) { + // Create temporary file + tmpfile, err := os.CreateTemp("", "example_*.avro.zz") + require.NoError(t, err) + + defer os.Remove(tmpfile.Name()) // clean up + defer tmpfile.Close() // close file after test ends + + // Define sample data + records, schema := generateRecords(t, true, 10, false) + + avroSchema, err := model.GetAvroSchemaDefinition("not_applicable", schema) + require.NoError(t, err) + + fmt.Printf("[test] avroSchema: %v\n", avroSchema) + + // Call function + writer := avro.NewPeerDBOCFWriter(context.Background(), + records, avroSchema, avro.CompressDeflate, qvalue.QDWHTypeSnowflake) _, err = writer.WriteRecordsToAvroFile(tmpfile.Name()) require.NoError(t, err, "expected WriteRecordsToAvroFile to complete without errors") @@ -168,7 +226,8 @@ func TestWriteRecordsToAvroFileNonNull(t *testing.T) { fmt.Printf("[test] avroSchema: %v\n", avroSchema) // Call function - writer := avro.NewPeerDBOCFWriter(nil, records, avroSchema) + writer := avro.NewPeerDBOCFWriter(context.Background(), + records, avroSchema, avro.CompressNone, qvalue.QDWHTypeSnowflake) _, err = writer.WriteRecordsToAvroFile(tmpfile.Name()) require.NoError(t, err, "expected WriteRecordsToAvroFile to complete without errors") @@ -195,7 +254,8 @@ func TestWriteRecordsToAvroFileAllNulls(t *testing.T) { fmt.Printf("[test] avroSchema: %v\n", avroSchema) // Call function - writer := avro.NewPeerDBOCFWriter(nil, records, avroSchema) + writer := avro.NewPeerDBOCFWriter(context.Background(), + records, avroSchema, avro.CompressNone, qvalue.QDWHTypeSnowflake) _, err = writer.WriteRecordsToAvroFile(tmpfile.Name()) require.NoError(t, err, "expected WriteRecordsToAvroFile to complete without errors") diff --git a/flow/connectors/snowflake/qrep_avro_sync.go b/flow/connectors/snowflake/qrep_avro_sync.go index b41e97a192..7d540c9f2f 100644 --- a/flow/connectors/snowflake/qrep_avro_sync.go +++ b/flow/connectors/snowflake/qrep_avro_sync.go @@ -274,7 +274,8 @@ func (s *SnowflakeAvroSyncMethod) writeToAvroFile( ) (int, string, error) { var numRecords int if s.config.StagingPath == "" { - ocfWriter := avro.NewPeerDBOCFWriterWithCompression(s.connector.ctx, stream, avroSchema) + ocfWriter := avro.NewPeerDBOCFWriter(s.connector.ctx, stream, avroSchema, avro.CompressZstd, + qvalue.QDWHTypeSnowflake) tmpDir, err := os.MkdirTemp("", "peerdb-avro") if err != nil { return 0, "", fmt.Errorf("failed to create temp dir: %w", err) @@ -292,13 +293,14 @@ func (s *SnowflakeAvroSyncMethod) writeToAvroFile( return numRecords, localFilePath, nil } else if strings.HasPrefix(s.config.StagingPath, "s3://") { - ocfWriter := avro.NewPeerDBOCFWriter(s.connector.ctx, stream, avroSchema) + ocfWriter := avro.NewPeerDBOCFWriter(s.connector.ctx, stream, avroSchema, avro.CompressZstd, + qvalue.QDWHTypeSnowflake) s3o, err := utils.NewS3BucketAndPrefix(s.config.StagingPath) if err != nil { return 0, "", fmt.Errorf("failed to parse staging path: %w", err) } - s3AvroFileKey := fmt.Sprintf("%s/%s/%s.avro", s3o.Prefix, s.config.FlowJobName, partitionID) + s3AvroFileKey := fmt.Sprintf("%s/%s/%s.avro.zst", s3o.Prefix, s.config.FlowJobName, partitionID) log.WithFields(log.Fields{ "flowName": flowJobName, "partitionID": partitionID, diff --git a/flow/connectors/utils/avro/avro_writer.go b/flow/connectors/utils/avro/avro_writer.go index 0b4cf09d7e..36c8858aa4 100644 --- a/flow/connectors/utils/avro/avro_writer.go +++ b/flow/connectors/utils/avro/avro_writer.go @@ -13,56 +13,67 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/klauspost/compress/flate" + "github.com/klauspost/compress/snappy" "github.com/klauspost/compress/zstd" "github.com/linkedin/goavro/v2" log "github.com/sirupsen/logrus" uber_atomic "go.uber.org/atomic" ) +type AvroCompressionCodec int64 + +const ( + CompressNone AvroCompressionCodec = iota + CompressZstd + CompressDeflate + CompressSnappy +) + type PeerDBOCFWriter struct { - ctx context.Context - stream *model.QRecordStream - avroSchema *model.QRecordAvroSchemaDefinition - compress bool - writer io.WriteCloser + ctx context.Context + stream *model.QRecordStream + avroSchema *model.QRecordAvroSchemaDefinition + avroCompressionCodec AvroCompressionCodec + writer io.WriteCloser + targetDWH qvalue.QDWHType } func NewPeerDBOCFWriter( ctx context.Context, stream *model.QRecordStream, avroSchema *model.QRecordAvroSchemaDefinition, + avroCompressionCodec AvroCompressionCodec, + targetDWH qvalue.QDWHType, ) *PeerDBOCFWriter { return &PeerDBOCFWriter{ - ctx: ctx, - stream: stream, - avroSchema: avroSchema, - compress: false, - } -} - -func NewPeerDBOCFWriterWithCompression( - ctx context.Context, - stream *model.QRecordStream, - avroSchema *model.QRecordAvroSchemaDefinition, -) *PeerDBOCFWriter { - return &PeerDBOCFWriter{ - ctx: ctx, - stream: stream, - avroSchema: avroSchema, - compress: true, + ctx: ctx, + stream: stream, + avroSchema: avroSchema, + avroCompressionCodec: avroCompressionCodec, + targetDWH: targetDWH, } } func (p *PeerDBOCFWriter) initWriteCloser(w io.Writer) error { var err error - if p.compress { + switch p.avroCompressionCodec { + case CompressNone: + p.writer = &nopWriteCloser{w} + case CompressZstd: p.writer, err = zstd.NewWriter(w) if err != nil { return fmt.Errorf("error while initializing zstd encoding writer: %w", err) } - } else { - p.writer = &nopWriteCloser{w} + case CompressDeflate: + p.writer, err = flate.NewWriter(w, -1) + if err != nil { + return fmt.Errorf("error while initializing deflate encoding writer: %w", err) + } + case CompressSnappy: + p.writer = snappy.NewBufferedWriter(w) } + return nil } @@ -115,7 +126,7 @@ func (p *PeerDBOCFWriter) writeRecordsToOCFWriter(ocfWriter *goavro.OCFWriter) ( qRecord := qRecordOrErr.Record avroConverter := model.NewQRecordAvroConverter( qRecord, - qvalue.QDWHTypeSnowflake, + p.targetDWH, &p.avroSchema.NullableFields, colNames, ) diff --git a/flow/e2e/bigquery/peer_flow_bq_test.go b/flow/e2e/bigquery/peer_flow_bq_test.go index be7f45ef4a..90fbb552dc 100644 --- a/flow/e2e/bigquery/peer_flow_bq_test.go +++ b/flow/e2e/bigquery/peer_flow_bq_test.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" "testing" + "time" "github.com/PeerDB-io/peer-flow/e2e" "github.com/PeerDB-io/peer-flow/generated/protos" @@ -28,43 +29,7 @@ type PeerFlowE2ETestSuiteBQ struct { } func TestPeerFlowE2ETestSuiteBQ(t *testing.T) { - s := &PeerFlowE2ETestSuiteBQ{} - s.SetT(t) - s.SetupSuite() - - tests := []struct { - name string - test func(t *testing.T) - }{ - {"Test_Invalid_Connection_Config", s.Test_Invalid_Connection_Config}, - {"Test_Complete_Flow_No_Data", s.Test_Complete_Flow_No_Data}, - {"Test_Char_ColType_Error", s.Test_Char_ColType_Error}, - {"Test_Complete_Simple_Flow_BQ", s.Test_Complete_Simple_Flow_BQ}, - {"Test_Toast_BQ", s.Test_Toast_BQ}, - {"Test_Toast_Nochanges_BQ", s.Test_Toast_Nochanges_BQ}, - {"Test_Toast_Advance_1_BQ", s.Test_Toast_Advance_1_BQ}, - {"Test_Toast_Advance_2_BQ", s.Test_Toast_Advance_2_BQ}, - {"Test_Toast_Advance_3_BQ", s.Test_Toast_Advance_3_BQ}, - {"Test_Types_BQ", s.Test_Types_BQ}, - {"Test_Types_Avro_BQ", s.Test_Types_Avro_BQ}, - {"Test_Simple_Flow_BQ_Avro_CDC", s.Test_Simple_Flow_BQ_Avro_CDC}, - {"Test_Multi_Table_BQ", s.Test_Multi_Table_BQ}, - {"Test_Simple_Schema_Changes_BQ", s.Test_Simple_Schema_Changes_BQ}, - {"Test_Composite_PKey_BQ", s.Test_Composite_PKey_BQ}, - {"Test_Composite_PKey_Toast_1_BQ", s.Test_Composite_PKey_Toast_1_BQ}, - {"Test_Composite_PKey_Toast_2_BQ", s.Test_Composite_PKey_Toast_2_BQ}, - } - - // Assert that there are no duplicate test names - testNames := make(map[string]bool) - for _, tt := range tests { - if testNames[tt.name] { - t.Fatalf("duplicate test name: %s", tt.name) - } - testNames[tt.name] = true - - t.Run(tt.name, tt.test) - } + suite.Run(t, new(PeerFlowE2ETestSuiteBQ)) } func (s *PeerFlowE2ETestSuiteBQ) attachSchemaSuffix(tableName string) string { @@ -113,7 +78,9 @@ func (s *PeerFlowE2ETestSuiteBQ) SetupSuite() { s.setupTemporalLogger() - s.bqSuffix = strings.ToLower(util.RandomString(8)) + suffix := util.RandomString(8) + tsSuffix := time.Now().Format("20060102150405") + s.bqSuffix = fmt.Sprintf("bq_%s_%s", strings.ToLower(suffix), tsSuffix) pool, err := e2e.SetupPostgres(s.bqSuffix) if err != nil { s.Fail("failed to setup postgres", err) @@ -139,8 +106,7 @@ func (s *PeerFlowE2ETestSuiteBQ) TearDownSuite() { } } -func (s *PeerFlowE2ETestSuiteBQ) Test_Invalid_Connection_Config(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteBQ) Test_Invalid_Connection_Config() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -163,8 +129,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Invalid_Connection_Config(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteBQ) Test_Complete_Flow_No_Data(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteBQ) Test_Complete_Flow_No_Data() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -178,17 +143,19 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Complete_Flow_No_Data(t *testing.T) { value VARCHAR(255) NOT NULL ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_complete_flow_no_data"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.bqHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, + CdcStagingPath: "", } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ TotalSyncFlows: 1, @@ -208,8 +175,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Complete_Flow_No_Data(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteBQ) Test_Char_ColType_Error(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteBQ) Test_Char_ColType_Error() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -223,17 +189,19 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Char_ColType_Error(t *testing.T) { value CHAR(255) NOT NULL ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_char_table"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.bqHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, + CdcStagingPath: "", } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ TotalSyncFlows: 1, @@ -256,8 +224,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Char_ColType_Error(t *testing.T) { // Test_Complete_Simple_Flow_BQ tests a complete flow with data in the source table. // The test inserts 10 rows into the source table and verifies that the data is // correctly synced to the destination table after sync flow completes. -func (s *PeerFlowE2ETestSuiteBQ) Test_Complete_Simple_Flow_BQ(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteBQ) Test_Complete_Simple_Flow_BQ() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -271,17 +238,19 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Complete_Simple_Flow_BQ(t *testing.T) { value TEXT NOT NULL ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_complete_simple_flow"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.bqHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, + CdcStagingPath: "", } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ TotalSyncFlows: 2, @@ -299,7 +268,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Complete_Simple_Flow_BQ(t *testing.T) { _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(key, value) VALUES ($1, $2) `, srcTableName), testKey, testValue) - require.NoError(t, err) + s.NoError(err) } fmt.Println("Inserted 10 rows into the source table") }() @@ -315,7 +284,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Complete_Simple_Flow_BQ(t *testing.T) { s.Contains(err.Error(), "continue as new") count, err := s.bqHelper.countRows(dstTableName) - require.NoError(t, err) + s.NoError(err) s.Equal(10, count) // TODO: verify that the data is correctly synced to the destination table @@ -324,8 +293,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Complete_Simple_Flow_BQ(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_BQ(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_BQ() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -340,20 +308,22 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_BQ(t *testing.T) { k int ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_toast_bq_1"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.bqHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, + CdcStagingPath: "", } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ - TotalSyncFlows: 1, + TotalSyncFlows: 2, MaxBatchSize: 100, } @@ -375,7 +345,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_BQ(t *testing.T) { UPDATE %s SET t1='dummy' WHERE id=2; END; `, srcTableName, srcTableName, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Executed a transaction touching toast columns") }() @@ -393,8 +363,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_BQ(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Nochanges_BQ(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Nochanges_BQ() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -409,20 +378,22 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Nochanges_BQ(t *testing.T) { k int ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_toast_bq_2"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.bqHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, + CdcStagingPath: "", } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ - TotalSyncFlows: 1, + TotalSyncFlows: 2, MaxBatchSize: 100, } @@ -437,7 +408,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Nochanges_BQ(t *testing.T) { UPDATE %s SET t1='dummy' WHERE id=2; END; `, srcTableName, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Executed a transaction touching toast columns") }() @@ -455,8 +426,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Nochanges_BQ(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Advance_1_BQ(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Advance_1_BQ() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -471,17 +441,19 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Advance_1_BQ(t *testing.T) { k int ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_toast_bq_3"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.bqHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, + CdcStagingPath: "", } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ TotalSyncFlows: 1, @@ -512,7 +484,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Advance_1_BQ(t *testing.T) { END; `, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Executed a transaction touching toast columns") }() @@ -530,8 +502,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Advance_1_BQ(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Advance_2_BQ(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Advance_2_BQ() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -545,20 +516,22 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Advance_2_BQ(t *testing.T) { k int ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_toast_bq_4"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.bqHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, + CdcStagingPath: "", } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ - TotalSyncFlows: 1, + TotalSyncFlows: 2, MaxBatchSize: 100, } @@ -580,7 +553,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Advance_2_BQ(t *testing.T) { UPDATE %s SET k=4 WHERE id=1; END; `, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Executed a transaction touching toast columns") }() @@ -598,8 +571,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Advance_2_BQ(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Advance_3_BQ(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Advance_3_BQ() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -614,20 +586,22 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Advance_3_BQ(t *testing.T) { k int ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_toast_bq_5"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.bqHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, + CdcStagingPath: "", } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ - TotalSyncFlows: 1, + TotalSyncFlows: 2, MaxBatchSize: 100, } @@ -648,7 +622,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Advance_3_BQ(t *testing.T) { UPDATE %s SET t2='dummy' WHERE id=1; END; `, srcTableName, srcTableName, srcTableName, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Executed a transaction touching toast columns") }() @@ -666,8 +640,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Toast_Advance_3_BQ(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteBQ) Test_Types_BQ(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteBQ) Test_Types_BQ() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -682,21 +655,23 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Types_BQ(t *testing.T) { c33 TIMESTAMP,c34 TIMESTAMPTZ,c35 TIME, c36 TIMETZ,c37 TSQUERY,c38 TSVECTOR, c39 TXID_SNAPSHOT,c40 UUID,c41 XML, c42 INT[], c43 FLOAT[], c44 TEXT[]); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_types_bq"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.bqHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, + CdcStagingPath: "", } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ - TotalSyncFlows: 1, + TotalSyncFlows: 2, MaxBatchSize: 100, } @@ -719,88 +694,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Types_BQ(t *testing.T) { ARRAY[0.0003, 8902.0092], ARRAY['hello','bye']; `, srcTableName)) - require.NoError(t, err) - }() - - env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) - - // Verify workflow completes without error - s.True(env.IsWorkflowCompleted()) - err = env.GetWorkflowError() - - // allow only continue as new error - s.Error(err) - s.Contains(err.Error(), "continue as new") - - noNulls, err := s.bqHelper.CheckNull(dstTableName, []string{"c41", "c1", "c2", "c3", "c4", - "c6", "c39", "c40", "id", "c9", "c11", "c12", "c13", "c14", "c15", "c16", "c17", "c18", - "c21", "c22", "c23", "c24", "c28", "c29", "c30", "c31", "c33", "c34", "c35", "c36", - "c37", "c38", "c7", "c8", "c32", "c42", "c43", "c44"}) - if err != nil { - fmt.Println("error %w", err) - } - // Make sure that there are no nulls - s.True(noNulls) - - env.AssertExpectations(s.T()) -} - -func (s *PeerFlowE2ETestSuiteBQ) Test_Types_Avro_BQ(t *testing.T) { - t.Parallel() - env := s.NewTestWorkflowEnvironment() - e2e.RegisterWorkflowsAndActivities(env) - - srcTableName := s.attachSchemaSuffix("test_types_avro_bq") - dstTableName := "test_types_avro_bq" - - _, err := s.pool.Exec(context.Background(), fmt.Sprintf(` - CREATE TABLE IF NOT EXISTS %s (id serial PRIMARY KEY,c1 BIGINT,c2 BIT,c3 VARBIT,c4 BOOLEAN, - c6 BYTEA,c7 CHARACTER,c8 varchar,c9 CIDR,c11 DATE,c12 FLOAT,c13 DOUBLE PRECISION, - c14 INET,c15 INTEGER,c16 INTERVAL,c17 JSON,c18 JSONB,c21 MACADDR,c22 MONEY, - c23 NUMERIC,c24 OID,c28 REAL,c29 SMALLINT,c30 SMALLSERIAL,c31 SERIAL,c32 TEXT, - c33 TIMESTAMP,c34 TIMESTAMPTZ,c35 TIME, c36 TIMETZ,c37 TSQUERY,c38 TSVECTOR, - c39 TXID_SNAPSHOT,c40 UUID,c41 XML, c42 INT[], c43 FLOAT[], c44 TEXT[]); - `, srcTableName)) - require.NoError(t, err) - - connectionGen := e2e.FlowConnectionGenerationConfig{ - FlowJobName: s.attachSuffix("test_types_avro_bq"), - TableNameMapping: map[string]string{srcTableName: dstTableName}, - PostgresPort: e2e.PostgresPort, - Destination: s.bqHelper.Peer, - CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, - CdcStagingPath: "peerdb_staging", - } - - flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) - - limits := peerflow.CDCFlowLimits{ - - TotalSyncFlows: 1, - MaxBatchSize: 100, - } - - // in a separate goroutine, wait for PeerFlowStatusQuery to finish setup - // and execute a transaction touching toast columns - go func() { - e2e.SetupCDCFlowStatusQuery(env, connectionGen) - /* test inserting various types*/ - _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s SELECT 2,2,b'1',b'101', - true,random_bytea(32),'s','test','1.1.10.2'::cidr, - CURRENT_DATE,1.23,1.234,'192.168.1.5'::inet,1, - '5 years 2 months 29 days 1 minute 2 seconds 200 milliseconds 20000 microseconds'::interval, - '{"sai":1}'::json,'{"sai":1}'::jsonb,'08:00:2b:01:02:03'::macaddr, - 1.2,1.23,4::oid,1.23,1,1,1,'test',now(),now(),now()::time,now()::timetz, - 'fat & rat'::tsquery,'a fat cat sat on a mat and ate a fat rat'::tsvector, - txid_current_snapshot(), - '66073c38-b8df-4bdb-bbca-1c97596b8940'::uuid,xmlcomment('hello'), - ARRAY[9301,239827], - ARRAY[0.0003, 1039.0034], - ARRAY['hello','bye']; - `, srcTableName)) - require.NoError(t, err) + s.NoError(err) }() env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) @@ -826,74 +720,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Types_Avro_BQ(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteBQ) Test_Simple_Flow_BQ_Avro_CDC(t *testing.T) { - t.Parallel() - env := s.NewTestWorkflowEnvironment() - e2e.RegisterWorkflowsAndActivities(env) - - srcTableName := s.attachSchemaSuffix("test_simple_flow_bq_avro_cdc") - dstTableName := "test_simple_flow_bq_avro_cdc" - - _, err := s.pool.Exec(context.Background(), fmt.Sprintf(` - CREATE TABLE IF NOT EXISTS %s ( - id SERIAL PRIMARY KEY, - key TEXT NOT NULL, - value TEXT NOT NULL - ); - `, srcTableName)) - require.NoError(t, err) - connectionGen := e2e.FlowConnectionGenerationConfig{ - FlowJobName: s.attachSuffix("test_simple_flow_bq_avro_cdc"), - TableNameMapping: map[string]string{srcTableName: dstTableName}, - PostgresPort: e2e.PostgresPort, - Destination: s.bqHelper.Peer, - CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, - CdcStagingPath: "peerdb_staging", - } - - flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) - - limits := peerflow.CDCFlowLimits{ - TotalSyncFlows: 2, - MaxBatchSize: 100, - } - - go func() { - e2e.SetupCDCFlowStatusQuery(env, connectionGen) - for i := 0; i < 10; i++ { - testKey := fmt.Sprintf("test_key_%d", i) - testValue := fmt.Sprintf("test_value_%d", i) - _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s (key, value) VALUES ($1, $2) - `, srcTableName), testKey, testValue) - require.NoError(t, err) - } - fmt.Println("Inserted 10 rows into the source table") - }() - - env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) - - // Verify workflow completes without error - s.True(env.IsWorkflowCompleted()) - err = env.GetWorkflowError() - - // allow only continue as new error - s.Error(err) - s.Contains(err.Error(), "continue as new") - - count, err := s.bqHelper.countRows(dstTableName) - require.NoError(t, err) - s.Equal(10, count) - - // TODO: verify that the data is correctly synced to the destination table - // on the bigquery side - - env.AssertExpectations(s.T()) -} - -func (s *PeerFlowE2ETestSuiteBQ) Test_Multi_Table_BQ(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteBQ) Test_Multi_Table_BQ() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -906,20 +733,22 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Multi_Table_BQ(t *testing.T) { CREATE TABLE %s (id serial primary key, c1 int, c2 text); CREATE TABLE %s(id serial primary key, c1 int, c2 text); `, srcTable1Name, srcTable2Name)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_multi_table_bq"), TableNameMapping: map[string]string{srcTable1Name: dstTable1Name, srcTable2Name: dstTable2Name}, PostgresPort: e2e.PostgresPort, Destination: s.bqHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, + CdcStagingPath: "", } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ - TotalSyncFlows: 1, + TotalSyncFlows: 2, MaxBatchSize: 100, } @@ -932,7 +761,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Multi_Table_BQ(t *testing.T) { INSERT INTO %s (c1,c2) VALUES (1,'dummy_1'); INSERT INTO %s (c1,c2) VALUES (-1,'dummy_-1'); `, srcTable1Name, srcTable2Name)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Executed an insert on two tables") }() @@ -943,9 +772,9 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Multi_Table_BQ(t *testing.T) { err = env.GetWorkflowError() count1, err := s.bqHelper.countRows(dstTable1Name) - require.NoError(t, err) + s.NoError(err) count2, err := s.bqHelper.countRows(dstTable2Name) - require.NoError(t, err) + s.NoError(err) s.Equal(1, count1) s.Equal(1, count2) @@ -954,8 +783,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Multi_Table_BQ(t *testing.T) { } // TODO: not checking schema exactly, add later -func (s *PeerFlowE2ETestSuiteBQ) Test_Simple_Schema_Changes_BQ(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteBQ) Test_Simple_Schema_Changes_BQ() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -968,17 +796,19 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Simple_Schema_Changes_BQ(t *testing.T) { c1 BIGINT ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_simple_schema_changes"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.bqHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, + CdcStagingPath: "", } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ TotalSyncFlows: 10, @@ -992,7 +822,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Simple_Schema_Changes_BQ(t *testing.T) { e2e.SetupCDCFlowStatusQuery(env, connectionGen) _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(c1) VALUES ($1)`, srcTableName), 1) - require.NoError(t, err) + s.NoError(err) fmt.Println("Inserted initial row in the source table") // verify we got our first row. @@ -1002,11 +832,11 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Simple_Schema_Changes_BQ(t *testing.T) { // alter source table, add column c2 and insert another row. _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` ALTER TABLE %s ADD COLUMN c2 BIGINT`, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Altered source table, added column c2") _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(c1,c2) VALUES ($1,$2)`, srcTableName), 2, 2) - require.NoError(t, err) + s.NoError(err) fmt.Println("Inserted row with added c2 in the source table") // verify we got our two rows, if schema did not match up it will error. @@ -1016,11 +846,11 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Simple_Schema_Changes_BQ(t *testing.T) { // alter source table, add column c3, drop column c2 and insert another row. _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` ALTER TABLE %s DROP COLUMN c2, ADD COLUMN c3 BIGINT`, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Altered source table, dropped column c2 and added column c3") _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(c1,c3) VALUES ($1,$2)`, srcTableName), 3, 3) - require.NoError(t, err) + s.NoError(err) fmt.Println("Inserted row with added c3 in the source table") // verify we got our two rows, if schema did not match up it will error. @@ -1030,11 +860,11 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Simple_Schema_Changes_BQ(t *testing.T) { // alter source table, drop column c3 and insert another row. _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` ALTER TABLE %s DROP COLUMN c3`, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Altered source table, dropped column c3") _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(c1) VALUES ($1)`, srcTableName), 4) - require.NoError(t, err) + s.NoError(err) fmt.Println("Inserted row after dropping all columns in the source table") // verify we got our two rows, if schema did not match up it will error. @@ -1055,8 +885,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Simple_Schema_Changes_BQ(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_BQ(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_BQ() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -1072,17 +901,19 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_BQ(t *testing.T) { PRIMARY KEY(id,t) ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_cpkey_flow"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.bqHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, + CdcStagingPath: "", } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ TotalSyncFlows: 2, @@ -1099,7 +930,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_BQ(t *testing.T) { _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(c2,t) VALUES ($1,$2) `, srcTableName), i, testValue) - require.NoError(t, err) + s.NoError(err) } fmt.Println("Inserted 10 rows into the source table") @@ -1109,9 +940,9 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_BQ(t *testing.T) { _, err := s.pool.Exec(context.Background(), fmt.Sprintf(`UPDATE %s SET c1=c1+1 WHERE MOD(c2,2)=$1`, srcTableName), 1) - require.NoError(t, err) + s.NoError(err) _, err = s.pool.Exec(context.Background(), fmt.Sprintf(`DELETE FROM %s WHERE MOD(c2,2)=$1`, srcTableName), 0) - require.NoError(t, err) + s.NoError(err) }() env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) @@ -1129,8 +960,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_BQ(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_Toast_1_BQ(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_Toast_1_BQ() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -1147,17 +977,19 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_Toast_1_BQ(t *testing.T) { PRIMARY KEY(id,t) ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_cpkey_toast1_flow"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.bqHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, + CdcStagingPath: "", } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ TotalSyncFlows: 2, @@ -1169,7 +1001,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_Toast_1_BQ(t *testing.T) { go func() { e2e.SetupCDCFlowStatusQuery(env, connectionGen) rowsTx, err := s.pool.Begin(context.Background()) - require.NoError(t, err) + s.NoError(err) // insert 10 rows into the source table for i := 0; i < 10; i++ { @@ -1177,18 +1009,18 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_Toast_1_BQ(t *testing.T) { _, err = rowsTx.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(c2,t,t2) VALUES ($1,$2,random_string(9000)) `, srcTableName), i, testValue) - require.NoError(t, err) + s.NoError(err) } fmt.Println("Inserted 10 rows into the source table") _, err = rowsTx.Exec(context.Background(), fmt.Sprintf(`UPDATE %s SET c1=c1+1 WHERE MOD(c2,2)=$1`, srcTableName), 1) - require.NoError(t, err) + s.NoError(err) _, err = rowsTx.Exec(context.Background(), fmt.Sprintf(`DELETE FROM %s WHERE MOD(c2,2)=$1`, srcTableName), 0) - require.NoError(t, err) + s.NoError(err) err = rowsTx.Commit(context.Background()) - require.NoError(t, err) + s.NoError(err) }() env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) @@ -1207,8 +1039,7 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_Toast_1_BQ(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_Toast_2_BQ(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_Toast_2_BQ() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -1225,17 +1056,19 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_Toast_2_BQ(t *testing.T) { PRIMARY KEY(id,t) ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_cpkey_toast2_flow"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.bqHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, + CdcStagingPath: "", } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ TotalSyncFlows: 2, @@ -1253,16 +1086,16 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Composite_PKey_Toast_2_BQ(t *testing.T) { _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(c2,t,t2) VALUES ($1,$2,random_string(9000)) `, srcTableName), i, testValue) - require.NoError(t, err) + s.NoError(err) } fmt.Println("Inserted 10 rows into the source table") e2e.NormalizeFlowCountQuery(env, connectionGen, 2) _, err = s.pool.Exec(context.Background(), fmt.Sprintf(`UPDATE %s SET c1=c1+1 WHERE MOD(c2,2)=$1`, srcTableName), 1) - require.NoError(t, err) + s.NoError(err) _, err = s.pool.Exec(context.Background(), fmt.Sprintf(`DELETE FROM %s WHERE MOD(c2,2)=$1`, srcTableName), 0) - require.NoError(t, err) + s.NoError(err) }() env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) diff --git a/flow/e2e/bigquery/qrep_flow_bq_test.go b/flow/e2e/bigquery/qrep_flow_bq_test.go index 8bd4b6135f..8a97c3b85b 100644 --- a/flow/e2e/bigquery/qrep_flow_bq_test.go +++ b/flow/e2e/bigquery/qrep_flow_bq_test.go @@ -39,9 +39,9 @@ func (s *PeerFlowE2ETestSuiteBQ) compareTableContentsBQ(tableName string, colsSt // read rows from destination table qualifiedTableName := fmt.Sprintf("`%s.%s`", s.bqHelper.Config.DatasetId, tableName) - bqRows, err := s.bqHelper.ExecuteAndProcessQuery( - fmt.Sprintf("SELECT %s FROM %s ORDER BY id", colsString, qualifiedTableName), - ) + bqSelQuery := fmt.Sprintf("SELECT %s FROM %s ORDER BY id", colsString, qualifiedTableName) + fmt.Printf("running query on bigquery: %s\n", bqSelQuery) + bqRows, err := s.bqHelper.ExecuteAndProcessQuery(bqSelQuery) s.NoError(err) s.True(pgRows.Equals(bqRows), "rows from source and destination tables are not equal") @@ -81,43 +81,3 @@ func (s *PeerFlowE2ETestSuiteBQ) Test_Complete_QRep_Flow_Avro() { env.AssertExpectations(s.T()) } - -// NOTE: Disabled due to large JSON tests being added: https://github.com/PeerDB-io/peerdb/issues/309 - -// Test_Complete_QRep_Flow tests a complete flow with data in the source table. -// The test inserts 10 rows into the source table and verifies that the data is -// // correctly synced to the destination table this runs a QRep Flow. -// func (s *E2EPeerFlowTestSuite) Test_Complete_QRep_Flow_Multi_Insert() { -// env := s.NewTestWorkflowEnvironment() -// registerWorkflowsAndActivities(env) - -// numRows := 10 - -// tblName := "test_qrep_flow_multi_insert" -// s.setupSourceTable(tblName, numRows) -// s.setupBQDestinationTable(tblName) - -// query := fmt.Sprintf("SELECT * FROM e2e_test.%s WHERE updated_at BETWEEN {{.start}} AND {{.end}}", tblName) - -// qrepConfig := s.createQRepWorkflowConfig("test_qrep_flow_mi", -// "e2e_test."+tblName, -// tblName, -// query, -// protos.QRepSyncMode_QREP_SYNC_MODE_MULTI_INSERT, -// s.bqHelper.Peer) -// runQrepFlowWorkflow(env, qrepConfig) - -// // Verify workflow completes without error -// s.True(env.IsWorkflowCompleted()) - -// // assert that error contains "invalid connection configs" -// err := env.GetWorkflowError() -// s.NoError(err) - -// count, err := s.bqHelper.CountRows(tblName) -// s.NoError(err) - -// s.Equal(numRows, count) - -// env.AssertExpectations(s.T()) -// } diff --git a/flow/e2e/congen.go b/flow/e2e/congen.go index 3a0f71b15a..72a756e531 100644 --- a/flow/e2e/congen.go +++ b/flow/e2e/congen.go @@ -95,7 +95,6 @@ func SetupPostgres(suffix string) (*pgxpool.Pool, error) { } _, err = pool.Exec(context.Background(), ` - SELECT pg_advisory_lock(hashtext('peerdb_pg_setup_lock')); CREATE OR REPLACE FUNCTION random_string( int ) RETURNS TEXT as $$ SELECT string_agg(substring('0123456789bcdfghjkmnpqrstvwxyz', round(random() * 30)::integer, 1), '') FROM generate_series(1, $1); diff --git a/flow/e2e/snowflake/peer_flow_sf_test.go b/flow/e2e/snowflake/peer_flow_sf_test.go index 95bb972a71..37848f0383 100644 --- a/flow/e2e/snowflake/peer_flow_sf_test.go +++ b/flow/e2e/snowflake/peer_flow_sf_test.go @@ -32,42 +32,7 @@ type PeerFlowE2ETestSuiteSF struct { } func TestPeerFlowE2ETestSuiteSF(t *testing.T) { - s := &PeerFlowE2ETestSuiteSF{} - s.SetT(t) - s.SetupSuite() - - tests := []struct { - name string - test func(t *testing.T) - }{ - {"Test_Complete_Simple_Flow_SF", s.Test_Complete_Simple_Flow_SF}, - {"Test_Complete_Simple_Flow_SF_Avro_CDC", s.Test_Complete_Simple_Flow_SF_Avro_CDC}, - {"Test_Invalid_Geo_SF_Avro_CDC", s.Test_Invalid_Geo_SF_Avro_CDC}, - {"Test_Toast_SF", s.Test_Toast_SF}, - {"Test_Toast_Nochanges_SF", s.Test_Toast_Nochanges_SF}, - {"Test_Toast_Advance_1_SF", s.Test_Toast_Advance_1_SF}, - {"Test_Toast_Advance_2_SF", s.Test_Toast_Advance_2_SF}, - {"Test_Toast_Advance_3_SF", s.Test_Toast_Advance_3_SF}, - {"Test_Types_SF", s.Test_Types_SF}, - {"Test_Types_SF_Avro_CDC", s.Test_Types_SF_Avro_CDC}, - {"Test_Multi_Table_SF", s.Test_Multi_Table_SF}, - {"Test_Simple_Schema_Changes_SF", s.Test_Simple_Schema_Changes_SF}, - {"Test_Composite_PKey_SF", s.Test_Composite_PKey_SF}, - {"Test_Composite_PKey_Toast_1_SF", s.Test_Composite_PKey_Toast_1_SF}, - {"Test_Composite_PKey_Toast_2_SF", s.Test_Composite_PKey_Toast_2_SF}, - {"Test_Column_Exclusion", s.Test_Column_Exclusion}, - } - - // assert that there are no duplicate test names - testNames := make(map[string]bool) - for _, tt := range tests { - if testNames[tt.name] { - t.Fatalf("duplicate test name: %s", tt.name) - } - testNames[tt.name] = true - - t.Run(tt.name, tt.test) - } + suite.Run(t, new(PeerFlowE2ETestSuiteSF)) } func (s *PeerFlowE2ETestSuiteSF) attachSchemaSuffix(tableName string) string { @@ -149,8 +114,7 @@ func (s *PeerFlowE2ETestSuiteSF) TearDownSuite() { require.NoError(s.T(), err) } -func (s *PeerFlowE2ETestSuiteSF) Test_Complete_Simple_Flow_SF(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteSF) Test_Complete_Simple_Flow_SF() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -164,17 +128,18 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Complete_Simple_Flow_SF(t *testing.T) { value TEXT NOT NULL ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_simple_flow"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.sfHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ TotalSyncFlows: 2, @@ -182,17 +147,17 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Complete_Simple_Flow_SF(t *testing.T) { } // in a separate goroutine, wait for PeerFlowStatusQuery to finish setup - // and then insert 10 rows into the source table + // and then insert 15 rows into the source table go func() { e2e.SetupCDCFlowStatusQuery(env, connectionGen) - // insert 10 rows into the source table - for i := 0; i < 10; i++ { + // insert 15 rows into the source table + for i := 0; i < 20; i++ { testKey := fmt.Sprintf("test_key_%d", i) testValue := fmt.Sprintf("test_value_%d", i) _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s (key, value) VALUES ($1, $2) `, srcTableName), testKey, testValue) - require.NoError(t, err) + s.NoError(err) } fmt.Println("Inserted 10 rows into the source table") }() @@ -208,8 +173,8 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Complete_Simple_Flow_SF(t *testing.T) { s.Contains(err.Error(), "continue as new") count, err := s.sfHelper.CountRows("test_simple_flow_sf") - require.NoError(t, err) - s.Equal(10, count) + s.NoError(err) + s.Equal(20, count) // check the number of rows where _PEERDB_SYNCED_AT is newer than 5 mins ago // it should match the count. @@ -217,87 +182,16 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Complete_Simple_Flow_SF(t *testing.T) { SELECT COUNT(*) FROM %s WHERE _PEERDB_SYNCED_AT > CURRENT_TIMESTAMP() - INTERVAL '30 MINUTE' `, dstTableName) numNewRows, err := s.sfHelper.RunIntQuery(newerSyncedAtQuery) - require.NoError(t, err) - s.Equal(10, numNewRows) + s.NoError(err) + s.Equal(20, numNewRows) // TODO: verify that the data is correctly synced to the destination table - // on the bigquery side + // on the Snowflake side env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteSF) Test_Complete_Simple_Flow_SF_Avro_CDC(t *testing.T) { - t.Parallel() - env := s.NewTestWorkflowEnvironment() - e2e.RegisterWorkflowsAndActivities(env) - - tblConst := "test_simple_flow_sf_avro_cdc" - srcTableName := s.attachSchemaSuffix(tblConst) - dstTableName := fmt.Sprintf("%s.%s", s.sfHelper.testSchemaName, tblConst) - - _, err := s.pool.Exec(context.Background(), fmt.Sprintf(` - CREATE TABLE IF NOT EXISTS %s ( - id SERIAL PRIMARY KEY, - key TEXT NOT NULL, - value TEXT NOT NULL - ); - `, srcTableName)) - require.NoError(t, err) - - connectionGen := e2e.FlowConnectionGenerationConfig{ - FlowJobName: s.attachSuffix("test_simple_flow_avro"), - TableNameMapping: map[string]string{srcTableName: dstTableName}, - PostgresPort: e2e.PostgresPort, - Destination: s.sfHelper.Peer, - CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, - } - - flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) - - limits := peerflow.CDCFlowLimits{ - TotalSyncFlows: 2, - MaxBatchSize: 100, - } - - // in a separate goroutine, wait for PeerFlowStatusQuery to finish setup - // and then insert 10 rows into the source table - go func() { - e2e.SetupCDCFlowStatusQuery(env, connectionGen) - // insert 10 rows into the source table - for i := 0; i < 15; i++ { - testKey := fmt.Sprintf("test_key_%d", i) - testValue := fmt.Sprintf("test_value_%d", i) - _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s (key, value) VALUES ($1, $2) - `, srcTableName), testKey, testValue) - require.NoError(t, err) - } - fmt.Println("Inserted 15 rows into the source table") - }() - - env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) - - // Verify workflow completes without error - require.True(t, env.IsWorkflowCompleted()) - err = env.GetWorkflowError() - - // allow only continue as new error - s.Error(err) - s.Contains(err.Error(), "continue as new") - - count, err := s.sfHelper.CountRows(tblConst) - require.NoError(t, err) - s.Equal(15, count) - - // TODO: verify that the data is correctly synced to the destination table - // on the bigquery side - - env.AssertExpectations(s.T()) -} - -func (s *PeerFlowE2ETestSuiteSF) Test_Invalid_Geo_SF_Avro_CDC(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteSF) Test_Invalid_Geo_SF_Avro_CDC() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -311,7 +205,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Invalid_Geo_SF_Avro_CDC(t *testing.T) { poly GEOGRAPHY(POLYGON) NOT NULL ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_invalid_geo_sf_avro_cdc"), @@ -322,7 +216,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Invalid_Geo_SF_Avro_CDC(t *testing.T) { } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ TotalSyncFlows: 2, @@ -343,7 +237,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Invalid_Geo_SF_Avro_CDC(t *testing.T) { "5fc64140f2567052abc2c9bf2df9c5925fc641409394e16573c2c9bf2df9c5925fc6414049eceda9afc1c9bfdd1cc1a05fc64140fe43faedebc0"+ "c9bf4694f6065fc64140fe43faedebc0c9bfffe7305f5ec641406693d6f2ddc0c9bf1a8361d35dc64140afdb8d2b1bc3c9bf", ) - require.NoError(t, err) + s.NoError(err) } fmt.Println("Inserted 4 invalid geography rows into the source table") for i := 4; i < 10; i++ { @@ -353,7 +247,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Invalid_Geo_SF_Avro_CDC(t *testing.T) { "010300000001000000050000000000000000000000000000000000000000000000"+ "00000000000000000000f03f000000000000f03f000000000000f03f0000000000"+ "00f03f000000000000000000000000000000000000000000000000") - require.NoError(t, err) + s.NoError(err) } fmt.Println("Inserted 6 valid geography rows and 10 total rows into source") }() @@ -371,11 +265,11 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Invalid_Geo_SF_Avro_CDC(t *testing.T) { // We inserted 4 invalid shapes in each. // They should have filtered out as null on destination lineCount, err := s.sfHelper.CountNonNullRows("test_invalid_geo_sf_avro_cdc", "line") - require.NoError(t, err) + s.NoError(err) s.Equal(6, lineCount) polyCount, err := s.sfHelper.CountNonNullRows("test_invalid_geo_sf_avro_cdc", "poly") - require.NoError(t, err) + s.NoError(err) s.Equal(6, polyCount) // TODO: verify that the data is correctly synced to the destination table @@ -384,8 +278,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Invalid_Geo_SF_Avro_CDC(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteSF) Test_Toast_SF(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteSF) Test_Toast_SF() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -400,20 +293,21 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Toast_SF(t *testing.T) { k int ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_toast_sf_1"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.sfHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ - TotalSyncFlows: 1, + TotalSyncFlows: 2, MaxBatchSize: 100, } @@ -435,7 +329,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Toast_SF(t *testing.T) { UPDATE %s SET t1='dummy' WHERE id=2; END; `, srcTableName, srcTableName, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Executed a transaction touching toast columns") }() @@ -453,8 +347,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Toast_SF(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Nochanges_SF(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Nochanges_SF() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -471,20 +364,21 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Nochanges_SF(t *testing.T) { ); `, srcTableName, srcTableName)) log.Infof("Creating table '%s', err: %v", srcTableName, err) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_toast_sf_2"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.sfHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ - TotalSyncFlows: 1, + TotalSyncFlows: 2, MaxBatchSize: 100, } @@ -499,7 +393,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Nochanges_SF(t *testing.T) { UPDATE %s SET t1='dummy' WHERE id=2; END; `, srcTableName, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Executed a transaction touching toast columns") }() @@ -517,8 +411,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Nochanges_SF(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Advance_1_SF(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Advance_1_SF() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -534,17 +427,18 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Advance_1_SF(t *testing.T) { k int ); `, srcTableName, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_toast_sf_3"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.sfHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ TotalSyncFlows: 2, @@ -575,7 +469,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Advance_1_SF(t *testing.T) { END; `, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Executed a transaction touching toast columns") }() @@ -593,8 +487,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Advance_1_SF(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Advance_2_SF(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Advance_2_SF() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -609,20 +502,21 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Advance_2_SF(t *testing.T) { k int ); `, srcTableName, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_toast_sf_4"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.sfHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ - TotalSyncFlows: 1, + TotalSyncFlows: 2, MaxBatchSize: 100, } @@ -644,7 +538,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Advance_2_SF(t *testing.T) { UPDATE %s SET k=4 WHERE id=1; END; `, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Executed a transaction touching toast columns") }() @@ -662,8 +556,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Advance_2_SF(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Advance_3_SF(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Advance_3_SF() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -679,20 +572,21 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Advance_3_SF(t *testing.T) { k int ); `, srcTableName, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_toast_sf_5"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.sfHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ - TotalSyncFlows: 1, + TotalSyncFlows: 2, MaxBatchSize: 100, } @@ -713,7 +607,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Advance_3_SF(t *testing.T) { UPDATE %s SET t2='dummy' WHERE id=1; END; `, srcTableName, srcTableName, srcTableName, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Executed a transaction touching toast columns") }() @@ -731,8 +625,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Toast_Advance_3_SF(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteSF) Test_Types_SF(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteSF) Test_Types_SF() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -749,86 +642,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Types_SF(t *testing.T) { c39 TXID_SNAPSHOT,c40 UUID,c41 XML, c42 GEOMETRY(POINT), c43 GEOGRAPHY(POINT), c44 GEOGRAPHY(POLYGON), c45 GEOGRAPHY(LINESTRING), c46 GEOMETRY(LINESTRING), c47 GEOMETRY(POLYGON)); `, srcTableName, srcTableName)) - require.NoError(t, err) - - connectionGen := e2e.FlowConnectionGenerationConfig{ - FlowJobName: s.attachSuffix("test_types_sf"), - TableNameMapping: map[string]string{srcTableName: dstTableName}, - PostgresPort: e2e.PostgresPort, - Destination: s.sfHelper.Peer, - } - - flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) - - limits := peerflow.CDCFlowLimits{ - TotalSyncFlows: 1, - MaxBatchSize: 100, - } - - // in a separate goroutine, wait for PeerFlowStatusQuery to finish setup - // and execute a transaction touching toast columns - go func() { - e2e.SetupCDCFlowStatusQuery(env, connectionGen) - /* test inserting various types*/ - _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s SELECT 2,2,b'1',b'101', - true,random_bytea(32),'s','test','1.1.10.2'::cidr, - CURRENT_DATE,1.23,1.234,'192.168.1.5'::inet,1, - '5 years 2 months 29 days 1 minute 2 seconds 200 milliseconds 20000 microseconds'::interval, - '{"sai":1}'::json,'{"sai":1}'::jsonb,'08:00:2b:01:02:03'::macaddr, - 1.2,1.23,4::oid,1.23,1,1,1,'test',now(),now(),now()::time,now()::timetz, - 'fat & rat'::tsquery,'a fat cat sat on a mat and ate a fat rat'::tsvector, - txid_current_snapshot(), - '66073c38-b8df-4bdb-bbca-1c97596b8940'::uuid,xmlcomment('hello'), - 'POINT(1 2)','POINT(40.7128 -74.0060)','POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))', - 'LINESTRING(-74.0060 40.7128, -73.9352 40.7306, -73.9123 40.7831)','LINESTRING(0 0, 1 1, 2 2)', - 'POLYGON((-74.0060 40.7128, -73.9352 40.7306, -73.9123 40.7831, -74.0060 40.7128))'; - `, srcTableName)) - require.NoError(t, err) - }() - - env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) - - // Verify workflow completes without error - s.True(env.IsWorkflowCompleted()) - err = env.GetWorkflowError() - - // allow only continue as new error - s.Error(err) - s.Contains(err.Error(), "continue as new") - - noNulls, err := s.sfHelper.CheckNull("test_types_sf", []string{"c41", "c1", "c2", "c3", "c4", - "c6", "c39", "c40", "id", "c9", "c11", "c12", "c13", "c14", "c15", "c16", "c17", "c18", - "c21", "c22", "c23", "c24", "c28", "c29", "c30", "c31", "c33", "c34", "c35", "c36", - "c37", "c38", "c7", "c8", "c32", "c42", "c43", "c44", "c45", "c46"}) - if err != nil { - fmt.Println("error %w", err) - } - // Make sure that there are no nulls - s.Equal(noNulls, true) - - env.AssertExpectations(s.T()) -} - -func (s *PeerFlowE2ETestSuiteSF) Test_Types_SF_Avro_CDC(t *testing.T) { - t.Parallel() - env := s.NewTestWorkflowEnvironment() - e2e.RegisterWorkflowsAndActivities(env) - - srcTableName := s.attachSchemaSuffix("test_types_sf_avro_cdc") - dstTableName := fmt.Sprintf("%s.%s", s.sfHelper.testSchemaName, "test_types_sf_avro_cdc") - - _, err := s.pool.Exec(context.Background(), fmt.Sprintf(` - CREATE TABLE IF NOT EXISTS %s (id serial PRIMARY KEY,c1 BIGINT,c2 BIT,c3 VARBIT,c4 BOOLEAN, - c6 BYTEA,c7 CHARACTER,c8 varchar,c9 CIDR,c11 DATE,c12 FLOAT,c13 DOUBLE PRECISION, - c14 INET,c15 INTEGER,c16 INTERVAL,c17 JSON,c18 JSONB,c21 MACADDR,c22 MONEY, - c23 NUMERIC,c24 OID,c28 REAL,c29 SMALLINT,c30 SMALLSERIAL,c31 SERIAL,c32 TEXT, - c33 TIMESTAMP,c34 TIMESTAMPTZ,c35 TIME, c36 TIMETZ,c37 TSQUERY,c38 TSVECTOR, - c39 TXID_SNAPSHOT,c40 UUID,c41 XML, c42 GEOMETRY(POINT), c43 GEOGRAPHY(POINT), - c44 GEOGRAPHY(POLYGON), c45 GEOGRAPHY(LINESTRING), c46 GEOMETRY(LINESTRING), c47 GEOMETRY(POLYGON)); - `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_types_sf"), @@ -839,10 +653,10 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Types_SF_Avro_CDC(t *testing.T) { } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ - TotalSyncFlows: 1, + TotalSyncFlows: 2, MaxBatchSize: 100, } @@ -865,7 +679,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Types_SF_Avro_CDC(t *testing.T) { 'LINESTRING(-74.0060 40.7128, -73.9352 40.7306, -73.9123 40.7831)','LINESTRING(0 0, 1 1, 2 2)', 'POLYGON((-74.0060 40.7128, -73.9352 40.7306, -73.9123 40.7831, -74.0060 40.7128))'; `, srcTableName)) - require.NoError(t, err) + s.NoError(err) }() env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) @@ -878,7 +692,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Types_SF_Avro_CDC(t *testing.T) { s.Error(err) s.Contains(err.Error(), "continue as new") - noNulls, err := s.sfHelper.CheckNull("test_types_sf_avro_cdc", []string{"c41", "c1", "c2", "c3", "c4", + noNulls, err := s.sfHelper.CheckNull("test_types_sf", []string{"c41", "c1", "c2", "c3", "c4", "c6", "c39", "c40", "id", "c9", "c11", "c12", "c13", "c14", "c15", "c16", "c17", "c18", "c21", "c22", "c23", "c24", "c28", "c29", "c30", "c31", "c33", "c34", "c35", "c36", "c37", "c38", "c7", "c8", "c32", "c42", "c43", "c44", "c45", "c46"}) @@ -891,8 +705,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Types_SF_Avro_CDC(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteSF) Test_Multi_Table_SF(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteSF) Test_Multi_Table_SF() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -905,20 +718,21 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Multi_Table_SF(t *testing.T) { CREATE TABLE IF NOT EXISTS %s (id serial primary key, c1 int, c2 text); CREATE TABLE IF NOT EXISTS %s (id serial primary key, c1 int, c2 text); `, srcTable1Name, srcTable2Name)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_multi_table"), TableNameMapping: map[string]string{srcTable1Name: dstTable1Name, srcTable2Name: dstTable2Name}, PostgresPort: e2e.PostgresPort, Destination: s.sfHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ - TotalSyncFlows: 1, + TotalSyncFlows: 2, MaxBatchSize: 100, } @@ -931,7 +745,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Multi_Table_SF(t *testing.T) { INSERT INTO %s (c1,c2) VALUES (1,'dummy_1'); INSERT INTO %s (c1,c2) VALUES (-1,'dummy_-1'); `, srcTable1Name, srcTable2Name)) - require.NoError(t, err) + s.NoError(err) }() env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) @@ -941,9 +755,9 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Multi_Table_SF(t *testing.T) { err = env.GetWorkflowError() count1, err := s.sfHelper.CountRows("test1_sf") - require.NoError(t, err) + s.NoError(err) count2, err := s.sfHelper.CountRows("test2_sf") - require.NoError(t, err) + s.NoError(err) s.Equal(1, count1) s.Equal(1, count2) @@ -951,8 +765,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Multi_Table_SF(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteSF) Test_Simple_Schema_Changes_SF(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteSF) Test_Simple_Schema_Changes_SF() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -965,17 +778,18 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Simple_Schema_Changes_SF(t *testing.T) { c1 BIGINT ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_simple_schema_changes"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.sfHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ TotalSyncFlows: 10, @@ -989,7 +803,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Simple_Schema_Changes_SF(t *testing.T) { e2e.SetupCDCFlowStatusQuery(env, connectionGen) _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(c1) VALUES ($1)`, srcTableName), 1) - require.NoError(t, err) + s.NoError(err) fmt.Println("Inserted initial row in the source table") // verify we got our first row. @@ -1006,18 +820,18 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Simple_Schema_Changes_SF(t *testing.T) { output, err := s.connector.GetTableSchema(&protos.GetTableSchemaBatchInput{ TableIdentifiers: []string{dstTableName}, }) - require.NoError(t, err) + s.NoError(err) s.Equal(expectedTableSchema, output.TableNameSchemaMapping[dstTableName]) s.compareTableContentsSF("test_simple_schema_changes", "id,c1", false) // alter source table, add column c2 and insert another row. _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` ALTER TABLE %s ADD COLUMN c2 BIGINT`, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Altered source table, added column c2") _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(c1,c2) VALUES ($1,$2)`, srcTableName), 2, 2) - require.NoError(t, err) + s.NoError(err) fmt.Println("Inserted row with added c2 in the source table") // verify we got our two rows, if schema did not match up it will error. @@ -1035,18 +849,18 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Simple_Schema_Changes_SF(t *testing.T) { output, err = s.connector.GetTableSchema(&protos.GetTableSchemaBatchInput{ TableIdentifiers: []string{dstTableName}, }) - require.NoError(t, err) + s.NoError(err) s.Equal(expectedTableSchema, output.TableNameSchemaMapping[dstTableName]) s.compareTableContentsSF("test_simple_schema_changes", "id,c1,c2", false) // alter source table, add column c3, drop column c2 and insert another row. _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` ALTER TABLE %s DROP COLUMN c2, ADD COLUMN c3 BIGINT`, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Altered source table, dropped column c2 and added column c3") _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(c1,c3) VALUES ($1,$2)`, srcTableName), 3, 3) - require.NoError(t, err) + s.NoError(err) fmt.Println("Inserted row with added c3 in the source table") // verify we got our two rows, if schema did not match up it will error. @@ -1065,18 +879,18 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Simple_Schema_Changes_SF(t *testing.T) { output, err = s.connector.GetTableSchema(&protos.GetTableSchemaBatchInput{ TableIdentifiers: []string{dstTableName}, }) - require.NoError(t, err) + s.NoError(err) s.Equal(expectedTableSchema, output.TableNameSchemaMapping[dstTableName]) s.compareTableContentsSF("test_simple_schema_changes", "id,c1,c3", false) // alter source table, drop column c3 and insert another row. _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` ALTER TABLE %s DROP COLUMN c3`, srcTableName)) - require.NoError(t, err) + s.NoError(err) fmt.Println("Altered source table, dropped column c3") _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(c1) VALUES ($1)`, srcTableName), 4) - require.NoError(t, err) + s.NoError(err) fmt.Println("Inserted row after dropping all columns in the source table") // verify we got our two rows, if schema did not match up it will error. @@ -1095,7 +909,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Simple_Schema_Changes_SF(t *testing.T) { output, err = s.connector.GetTableSchema(&protos.GetTableSchemaBatchInput{ TableIdentifiers: []string{dstTableName}, }) - require.NoError(t, err) + s.NoError(err) s.Equal(expectedTableSchema, output.TableNameSchemaMapping[dstTableName]) s.compareTableContentsSF("test_simple_schema_changes", "id,c1", false) }() @@ -1113,8 +927,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Simple_Schema_Changes_SF(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_SF(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_SF() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -1130,17 +943,18 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_SF(t *testing.T) { PRIMARY KEY(id,t) ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_cpkey_flow"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.sfHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ TotalSyncFlows: 5, @@ -1157,7 +971,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_SF(t *testing.T) { _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(c2,t) VALUES ($1,$2) `, srcTableName), i, testValue) - require.NoError(t, err) + s.NoError(err) } fmt.Println("Inserted 10 rows into the source table") @@ -1167,9 +981,9 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_SF(t *testing.T) { _, err := s.pool.Exec(context.Background(), fmt.Sprintf(`UPDATE %s SET c1=c1+1 WHERE MOD(c2,2)=$1`, srcTableName), 1) - require.NoError(t, err) + s.NoError(err) _, err = s.pool.Exec(context.Background(), fmt.Sprintf(`DELETE FROM %s WHERE MOD(c2,2)=$1`, srcTableName), 0) - require.NoError(t, err) + s.NoError(err) }() env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) @@ -1188,8 +1002,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_SF(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_Toast_1_SF(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_Toast_1_SF() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -1206,17 +1019,18 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_Toast_1_SF(t *testing.T) { PRIMARY KEY(id,t) ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_cpkey_toast1_flow"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.sfHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ TotalSyncFlows: 2, @@ -1228,7 +1042,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_Toast_1_SF(t *testing.T) { go func() { e2e.SetupCDCFlowStatusQuery(env, connectionGen) rowsTx, err := s.pool.Begin(context.Background()) - require.NoError(t, err) + s.NoError(err) // insert 10 rows into the source table for i := 0; i < 10; i++ { @@ -1236,18 +1050,18 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_Toast_1_SF(t *testing.T) { _, err = rowsTx.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(c2,t,t2) VALUES ($1,$2,random_string(9000)) `, srcTableName), i, testValue) - require.NoError(t, err) + s.NoError(err) } fmt.Println("Inserted 10 rows into the source table") _, err = rowsTx.Exec(context.Background(), fmt.Sprintf(`UPDATE %s SET c1=c1+1 WHERE MOD(c2,2)=$1`, srcTableName), 1) - require.NoError(t, err) + s.NoError(err) _, err = rowsTx.Exec(context.Background(), fmt.Sprintf(`DELETE FROM %s WHERE MOD(c2,2)=$1`, srcTableName), 0) - require.NoError(t, err) + s.NoError(err) err = rowsTx.Commit(context.Background()) - require.NoError(t, err) + s.NoError(err) }() env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) @@ -1266,8 +1080,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_Toast_1_SF(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_Toast_2_SF(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_Toast_2_SF() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -1284,17 +1097,18 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_Toast_2_SF(t *testing.T) { PRIMARY KEY(id,t) ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_cpkey_toast2_flow"), TableNameMapping: map[string]string{srcTableName: dstTableName}, PostgresPort: e2e.PostgresPort, Destination: s.sfHelper.Peer, + CDCSyncMode: protos.QRepSyncMode_QREP_SYNC_MODE_STORAGE_AVRO, } flowConnConfig, err := connectionGen.GenerateFlowConnectionConfigs() - require.NoError(t, err) + s.NoError(err) limits := peerflow.CDCFlowLimits{ TotalSyncFlows: 4, @@ -1312,16 +1126,16 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_Toast_2_SF(t *testing.T) { _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(c2,t,t2) VALUES ($1,$2,random_string(9000)) `, srcTableName), i, testValue) - require.NoError(t, err) + s.NoError(err) } fmt.Println("Inserted 10 rows into the source table") e2e.NormalizeFlowCountQuery(env, connectionGen, 2) _, err = s.pool.Exec(context.Background(), fmt.Sprintf(`UPDATE %s SET c1=c1+1 WHERE MOD(c2,2)=$1`, srcTableName), 1) - require.NoError(t, err) + s.NoError(err) _, err = s.pool.Exec(context.Background(), fmt.Sprintf(`DELETE FROM %s WHERE MOD(c2,2)=$1`, srcTableName), 0) - require.NoError(t, err) + s.NoError(err) }() env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, flowConnConfig, &limits, nil) @@ -1340,8 +1154,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Composite_PKey_Toast_2_SF(t *testing.T) { env.AssertExpectations(s.T()) } -func (s *PeerFlowE2ETestSuiteSF) Test_Column_Exclusion(t *testing.T) { - t.Parallel() +func (s *PeerFlowE2ETestSuiteSF) Test_Column_Exclusion() { env := s.NewTestWorkflowEnvironment() e2e.RegisterWorkflowsAndActivities(env) @@ -1358,7 +1171,7 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Column_Exclusion(t *testing.T) { PRIMARY KEY(id,t) ); `, srcTableName)) - require.NoError(t, err) + s.NoError(err) connectionGen := e2e.FlowConnectionGenerationConfig{ FlowJobName: s.attachSuffix("test_exclude_flow"), @@ -1395,20 +1208,20 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Column_Exclusion(t *testing.T) { _, err = s.pool.Exec(context.Background(), fmt.Sprintf(` INSERT INTO %s(c2,t,t2) VALUES ($1,$2,random_string(100)) `, srcTableName), i, testValue) - require.NoError(t, err) + s.NoError(err) } fmt.Println("Inserted 10 rows into the source table") e2e.NormalizeFlowCountQuery(env, connectionGen, 2) _, err = s.pool.Exec(context.Background(), fmt.Sprintf(`UPDATE %s SET c1=c1+1 WHERE MOD(c2,2)=$1`, srcTableName), 1) - require.NoError(t, err) + s.NoError(err) _, err = s.pool.Exec(context.Background(), fmt.Sprintf(`DELETE FROM %s WHERE MOD(c2,2)=$1`, srcTableName), 0) - require.NoError(t, err) + s.NoError(err) }() env.ExecuteWorkflow(peerflow.CDCFlowWorkflowWithConfig, config, &limits, nil) - require.True(t, env.IsWorkflowCompleted()) + s.True(env.IsWorkflowCompleted()) err = env.GetWorkflowError() s.Error(err) s.Contains(err.Error(), "continue as new") @@ -1416,11 +1229,11 @@ func (s *PeerFlowE2ETestSuiteSF) Test_Column_Exclusion(t *testing.T) { query := fmt.Sprintf("SELECT * FROM %s.%s.test_exclude_sf ORDER BY id", s.sfHelper.testDatabaseName, s.sfHelper.testSchemaName) sfRows, err := s.sfHelper.ExecuteAndProcessQuery(query) - require.NoError(t, err) + s.NoError(err) for _, field := range sfRows.Schema.Fields { - require.NotEqual(t, field.Name, "c2") + s.NotEqual(field.Name, "c2") } - require.Equal(t, 4, len(sfRows.Schema.Fields)) - require.Equal(t, 10, len(sfRows.Records)) + s.Equal(4, len(sfRows.Schema.Fields)) + s.Equal(10, len(sfRows.Records)) } diff --git a/flow/e2e/test_utils.go b/flow/e2e/test_utils.go index f26afc2ee1..b57ad72214 100644 --- a/flow/e2e/test_utils.go +++ b/flow/e2e/test_utils.go @@ -71,7 +71,7 @@ func SetupCDCFlowStatusQuery(env *testsuite.TestWorkflowEnvironment, log.Errorln(err) } - if state.SetupComplete { + if state.SnapshotComplete { break } } else { @@ -293,7 +293,6 @@ func CreateQRepWorkflowConfig( if err != nil { return nil, err } - qrepConfig.InitialCopyOnly = true return qrepConfig, nil @@ -301,6 +300,7 @@ func CreateQRepWorkflowConfig( func RunQrepFlowWorkflow(env *testsuite.TestWorkflowEnvironment, config *protos.QRepConfig) { state := peerflow.NewQRepFlowState() + time.Sleep(5 * time.Second) env.ExecuteWorkflow(peerflow.QRepFlowWorkflow, config, state) } diff --git a/flow/generated/protos/flow.pb.go b/flow/generated/protos/flow.pb.go index df610088a1..bf29f33454 100644 --- a/flow/generated/protos/flow.pb.go +++ b/flow/generated/protos/flow.pb.go @@ -2659,8 +2659,8 @@ type QRepConfig struct { // This is only used when sync_mode is AVRO // this is the location where the avro files will be written // if this starts with gs:// then it will be written to GCS - // if this starts with s3:// then it will be written to S3 - // if nothing is specified then it will be written to local disk, only supported in Snowflake + // if this starts with s3:// then it will be written to S3, only supported in Snowflake + // if nothing is specified then it will be written to local disk // if using GCS or S3 make sure your instance has the correct permissions. StagingPath string `protobuf:"bytes,15,opt,name=staging_path,json=stagingPath,proto3" json:"staging_path,omitempty"` // This setting overrides batch_size_int and batch_duration_seconds @@ -3225,6 +3225,7 @@ type QRepFlowState struct { LastPartition *QRepPartition `protobuf:"bytes,1,opt,name=last_partition,json=lastPartition,proto3" json:"last_partition,omitempty"` NumPartitionsProcessed uint64 `protobuf:"varint,2,opt,name=num_partitions_processed,json=numPartitionsProcessed,proto3" json:"num_partitions_processed,omitempty"` NeedsResync bool `protobuf:"varint,3,opt,name=needs_resync,json=needsResync,proto3" json:"needs_resync,omitempty"` + DisableWaitForNewRows bool `protobuf:"varint,4,opt,name=disable_wait_for_new_rows,json=disableWaitForNewRows,proto3" json:"disable_wait_for_new_rows,omitempty"` } func (x *QRepFlowState) Reset() { @@ -3280,6 +3281,13 @@ func (x *QRepFlowState) GetNeedsResync() bool { return false } +func (x *QRepFlowState) GetDisableWaitForNewRows() bool { + if x != nil { + return x.DisableWaitForNewRows + } + return false +} + var File_flow_proto protoreflect.FileDescriptor var file_flow_proto_rawDesc = []byte{ @@ -3941,7 +3949,7 @@ var file_flow_proto_rawDesc = []byte{ 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x11, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x22, 0xaf, + 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x22, 0xe9, 0x01, 0x0a, 0x0d, 0x51, 0x52, 0x65, 0x70, 0x46, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, @@ -3953,26 +3961,30 @@ var file_flow_proto_rawDesc = []byte{ 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x65, 0x64, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x73, 0x52, 0x65, 0x73, 0x79, 0x6e, 0x63, - 0x2a, 0x50, 0x0a, 0x0c, 0x51, 0x52, 0x65, 0x70, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x6f, 0x64, 0x65, - 0x12, 0x1f, 0x0a, 0x1b, 0x51, 0x52, 0x45, 0x50, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4d, 0x4f, - 0x44, 0x45, 0x5f, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x5f, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x10, - 0x00, 0x12, 0x1f, 0x0a, 0x1b, 0x51, 0x52, 0x45, 0x50, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4d, - 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x41, 0x56, 0x52, 0x4f, - 0x10, 0x01, 0x2a, 0x66, 0x0a, 0x0d, 0x51, 0x52, 0x65, 0x70, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x51, 0x52, 0x45, 0x50, 0x5f, 0x57, 0x52, 0x49, 0x54, - 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x10, 0x00, 0x12, - 0x1a, 0x0a, 0x16, 0x51, 0x52, 0x45, 0x50, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x4d, 0x4f, - 0x44, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x51, - 0x52, 0x45, 0x50, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x4f, - 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x02, 0x42, 0x76, 0x0a, 0x0f, 0x63, 0x6f, - 0x6d, 0x2e, 0x70, 0x65, 0x65, 0x72, 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x42, 0x09, 0x46, - 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x10, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0xa2, 0x02, 0x03, 0x50, - 0x58, 0x58, 0xaa, 0x02, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, 0x6c, 0x6f, 0x77, 0xca, - 0x02, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, 0x6c, 0x6f, 0x77, 0xe2, 0x02, 0x16, 0x50, - 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, 0x6c, 0x6f, 0x77, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, 0x6c, - 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x12, 0x38, 0x0a, 0x19, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x77, 0x61, 0x69, 0x74, + 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x61, 0x69, 0x74, + 0x46, 0x6f, 0x72, 0x4e, 0x65, 0x77, 0x52, 0x6f, 0x77, 0x73, 0x2a, 0x50, 0x0a, 0x0c, 0x51, 0x52, + 0x65, 0x70, 0x53, 0x79, 0x6e, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x51, 0x52, + 0x45, 0x50, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x4d, 0x55, 0x4c, + 0x54, 0x49, 0x5f, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x10, 0x00, 0x12, 0x1f, 0x0a, 0x1b, 0x51, + 0x52, 0x45, 0x50, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x54, + 0x4f, 0x52, 0x41, 0x47, 0x45, 0x5f, 0x41, 0x56, 0x52, 0x4f, 0x10, 0x01, 0x2a, 0x66, 0x0a, 0x0d, + 0x51, 0x52, 0x65, 0x70, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, + 0x16, 0x51, 0x52, 0x45, 0x50, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x45, + 0x5f, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x51, 0x52, 0x45, + 0x50, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x50, 0x53, + 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x51, 0x52, 0x45, 0x50, 0x5f, 0x57, 0x52, + 0x49, 0x54, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, 0x49, + 0x54, 0x45, 0x10, 0x02, 0x42, 0x76, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x65, 0x65, 0x72, + 0x64, 0x62, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x42, 0x09, 0x46, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x10, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0xa2, 0x02, 0x03, 0x50, 0x58, 0x58, 0xaa, 0x02, 0x0a, 0x50, + 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, 0x6c, 0x6f, 0x77, 0xca, 0x02, 0x0a, 0x50, 0x65, 0x65, 0x72, + 0x64, 0x62, 0x46, 0x6c, 0x6f, 0x77, 0xe2, 0x02, 0x16, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, + 0x6c, 0x6f, 0x77, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, + 0x02, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x64, 0x62, 0x46, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/flow/go.sum b/flow/go.sum index 3122b24711..1e3fe7ff88 100644 --- a/flow/go.sum +++ b/flow/go.sum @@ -289,6 +289,8 @@ github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= +github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= diff --git a/flow/model/qrecord_batch.go b/flow/model/qrecord_batch.go index 25f4f7b20c..27ebc4014e 100644 --- a/flow/model/qrecord_batch.go +++ b/flow/model/qrecord_batch.go @@ -21,31 +21,32 @@ type QRecordBatch struct { // Equals checks if two QRecordBatches are identical. func (q *QRecordBatch) Equals(other *QRecordBatch) bool { if other == nil { + fmt.Printf("other is nil") return q == nil } // First check simple attributes if q.NumRecords != other.NumRecords { // print num records - log.Infof("q.NumRecords: %d\n", q.NumRecords) - log.Infof("other.NumRecords: %d\n", other.NumRecords) + fmt.Printf("q.NumRecords: %d\n", q.NumRecords) + fmt.Printf("other.NumRecords: %d\n", other.NumRecords) return false } // Compare column names if !q.Schema.EqualNames(other.Schema) { - log.Infof("Column names are not equal") - log.Infof("Schema 1: %v", q.Schema.GetColumnNames()) - log.Infof("Schema 2: %v", other.Schema.GetColumnNames()) + fmt.Printf("Column names are not equal\n") + fmt.Printf("Schema 1: %v\n", q.Schema.GetColumnNames()) + fmt.Printf("Schema 2: %v\n", other.Schema.GetColumnNames()) return false } // Compare records for i, record := range q.Records { if !record.equals(other.Records[i]) { - log.Infof("Record %d is not equal", i) - log.Infof("Record 1: %v", record) - log.Infof("Record 2: %v", other.Records[i]) + fmt.Printf("Record %d is not equal\n", i) + fmt.Printf("Record 1: %v\n", record) + fmt.Printf("Record 2: %v\n", other.Records[i]) return false } } diff --git a/flow/workflows/qrep_flow.go b/flow/workflows/qrep_flow.go index f20d17951e..3b8e77a686 100644 --- a/flow/workflows/qrep_flow.go +++ b/flow/workflows/qrep_flow.go @@ -34,7 +34,7 @@ type QRepPartitionFlowExecution struct { runUUID string } -// returns a new empty PeerFlowState +// returns a new empty QRepFlowState func NewQRepFlowState() *protos.QRepFlowState { return &protos.QRepFlowState{ LastPartition: &protos.QRepPartition{ @@ -46,6 +46,19 @@ func NewQRepFlowState() *protos.QRepFlowState { } } +// returns a new empty QRepFlowState +func NewQRepFlowStateForTesting() *protos.QRepFlowState { + return &protos.QRepFlowState{ + LastPartition: &protos.QRepPartition{ + PartitionId: "not-applicable-partition", + Range: nil, + }, + NumPartitionsProcessed: 0, + NeedsResync: true, + DisableWaitForNewRows: true, + } +} + // NewQRepFlowExecution creates a new instance of QRepFlowExecution. func NewQRepFlowExecution(ctx workflow.Context, config *protos.QRepConfig, runUUID string) *QRepFlowExecution { return &QRepFlowExecution{ @@ -440,10 +453,12 @@ func QRepFlowWorkflow( state.LastPartition = partitions.Partitions[len(partitions.Partitions)-1] } - // sleep for a while and continue the workflow - err = q.waitForNewRows(ctx, state.LastPartition) - if err != nil { - return err + if !state.DisableWaitForNewRows { + // sleep for a while and continue the workflow + err = q.waitForNewRows(ctx, state.LastPartition) + if err != nil { + return err + } } workflow.GetLogger(ctx).Info("Continuing as new workflow", diff --git a/nexus/pt/src/peerdb_flow.rs b/nexus/pt/src/peerdb_flow.rs index 67ba78e80e..dc308131e6 100644 --- a/nexus/pt/src/peerdb_flow.rs +++ b/nexus/pt/src/peerdb_flow.rs @@ -457,8 +457,8 @@ pub struct QRepConfig { /// This is only used when sync_mode is AVRO /// this is the location where the avro files will be written /// if this starts with gs:// then it will be written to GCS - /// if this starts with s3:// then it will be written to S3 - /// if nothing is specified then it will be written to local disk, only supported in Snowflake + /// if this starts with s3:// then it will be written to S3, only supported in Snowflake + /// if nothing is specified then it will be written to local disk /// if using GCS or S3 make sure your instance has the correct permissions. #[prost(string, tag="15")] pub staging_path: ::prost::alloc::string::String, @@ -540,6 +540,8 @@ pub struct QRepFlowState { pub num_partitions_processed: u64, #[prost(bool, tag="3")] pub needs_resync: bool, + #[prost(bool, tag="4")] + pub disable_wait_for_new_rows: bool, } /// protos for qrep #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] diff --git a/nexus/pt/src/peerdb_flow.serde.rs b/nexus/pt/src/peerdb_flow.serde.rs index ebcd1ffe57..0436bf3345 100644 --- a/nexus/pt/src/peerdb_flow.serde.rs +++ b/nexus/pt/src/peerdb_flow.serde.rs @@ -3004,6 +3004,9 @@ impl serde::Serialize for QRepFlowState { if self.needs_resync { len += 1; } + if self.disable_wait_for_new_rows { + len += 1; + } let mut struct_ser = serializer.serialize_struct("peerdb_flow.QRepFlowState", len)?; if let Some(v) = self.last_partition.as_ref() { struct_ser.serialize_field("lastPartition", v)?; @@ -3014,6 +3017,9 @@ impl serde::Serialize for QRepFlowState { if self.needs_resync { struct_ser.serialize_field("needsResync", &self.needs_resync)?; } + if self.disable_wait_for_new_rows { + struct_ser.serialize_field("disableWaitForNewRows", &self.disable_wait_for_new_rows)?; + } struct_ser.end() } } @@ -3030,6 +3036,8 @@ impl<'de> serde::Deserialize<'de> for QRepFlowState { "numPartitionsProcessed", "needs_resync", "needsResync", + "disable_wait_for_new_rows", + "disableWaitForNewRows", ]; #[allow(clippy::enum_variant_names)] @@ -3037,6 +3045,7 @@ impl<'de> serde::Deserialize<'de> for QRepFlowState { LastPartition, NumPartitionsProcessed, NeedsResync, + DisableWaitForNewRows, __SkipField__, } impl<'de> serde::Deserialize<'de> for GeneratedField { @@ -3062,6 +3071,7 @@ impl<'de> serde::Deserialize<'de> for QRepFlowState { "lastPartition" | "last_partition" => Ok(GeneratedField::LastPartition), "numPartitionsProcessed" | "num_partitions_processed" => Ok(GeneratedField::NumPartitionsProcessed), "needsResync" | "needs_resync" => Ok(GeneratedField::NeedsResync), + "disableWaitForNewRows" | "disable_wait_for_new_rows" => Ok(GeneratedField::DisableWaitForNewRows), _ => Ok(GeneratedField::__SkipField__), } } @@ -3084,6 +3094,7 @@ impl<'de> serde::Deserialize<'de> for QRepFlowState { let mut last_partition__ = None; let mut num_partitions_processed__ = None; let mut needs_resync__ = None; + let mut disable_wait_for_new_rows__ = None; while let Some(k) = map.next_key()? { match k { GeneratedField::LastPartition => { @@ -3106,6 +3117,12 @@ impl<'de> serde::Deserialize<'de> for QRepFlowState { } needs_resync__ = Some(map.next_value()?); } + GeneratedField::DisableWaitForNewRows => { + if disable_wait_for_new_rows__.is_some() { + return Err(serde::de::Error::duplicate_field("disableWaitForNewRows")); + } + disable_wait_for_new_rows__ = Some(map.next_value()?); + } GeneratedField::__SkipField__ => { let _ = map.next_value::()?; } @@ -3115,6 +3132,7 @@ impl<'de> serde::Deserialize<'de> for QRepFlowState { last_partition: last_partition__, num_partitions_processed: num_partitions_processed__.unwrap_or_default(), needs_resync: needs_resync__.unwrap_or_default(), + disable_wait_for_new_rows: disable_wait_for_new_rows__.unwrap_or_default(), }) } } diff --git a/protos/flow.proto b/protos/flow.proto index 6289f993f2..281f609993 100644 --- a/protos/flow.proto +++ b/protos/flow.proto @@ -307,8 +307,8 @@ message QRepConfig { // This is only used when sync_mode is AVRO // this is the location where the avro files will be written // if this starts with gs:// then it will be written to GCS - // if this starts with s3:// then it will be written to S3 - // if nothing is specified then it will be written to local disk, only supported in Snowflake + // if this starts with s3:// then it will be written to S3, only supported in Snowflake + // if nothing is specified then it will be written to local disk // if using GCS or S3 make sure your instance has the correct permissions. string staging_path = 15; @@ -364,4 +364,5 @@ message QRepFlowState { QRepPartition last_partition = 1; uint64 num_partitions_processed = 2; bool needs_resync = 3; + bool disable_wait_for_new_rows = 4; } diff --git a/ui/grpc_generated/flow.ts b/ui/grpc_generated/flow.ts index 95118814f5..3e8f36e97f 100644 --- a/ui/grpc_generated/flow.ts +++ b/ui/grpc_generated/flow.ts @@ -416,8 +416,8 @@ export interface QRepConfig { * This is only used when sync_mode is AVRO * this is the location where the avro files will be written * if this starts with gs:// then it will be written to GCS - * if this starts with s3:// then it will be written to S3 - * if nothing is specified then it will be written to local disk, only supported in Snowflake + * if this starts with s3:// then it will be written to S3, only supported in Snowflake + * if nothing is specified then it will be written to local disk * if using GCS or S3 make sure your instance has the correct permissions. */ stagingPath: string; @@ -475,6 +475,7 @@ export interface QRepFlowState { lastPartition: QRepPartition | undefined; numPartitionsProcessed: number; needsResync: boolean; + disableWaitForNewRows: boolean; } function createBaseTableNameMapping(): TableNameMapping { @@ -6155,7 +6156,7 @@ export const ReplayTableSchemaDeltaInput = { }; function createBaseQRepFlowState(): QRepFlowState { - return { lastPartition: undefined, numPartitionsProcessed: 0, needsResync: false }; + return { lastPartition: undefined, numPartitionsProcessed: 0, needsResync: false, disableWaitForNewRows: false }; } export const QRepFlowState = { @@ -6169,6 +6170,9 @@ export const QRepFlowState = { if (message.needsResync === true) { writer.uint32(24).bool(message.needsResync); } + if (message.disableWaitForNewRows === true) { + writer.uint32(32).bool(message.disableWaitForNewRows); + } return writer; }, @@ -6200,6 +6204,13 @@ export const QRepFlowState = { message.needsResync = reader.bool(); continue; + case 4: + if (tag !== 32) { + break; + } + + message.disableWaitForNewRows = reader.bool(); + continue; } if ((tag & 7) === 4 || tag === 0) { break; @@ -6214,6 +6225,7 @@ export const QRepFlowState = { lastPartition: isSet(object.lastPartition) ? QRepPartition.fromJSON(object.lastPartition) : undefined, numPartitionsProcessed: isSet(object.numPartitionsProcessed) ? Number(object.numPartitionsProcessed) : 0, needsResync: isSet(object.needsResync) ? Boolean(object.needsResync) : false, + disableWaitForNewRows: isSet(object.disableWaitForNewRows) ? Boolean(object.disableWaitForNewRows) : false, }; }, @@ -6228,6 +6240,9 @@ export const QRepFlowState = { if (message.needsResync === true) { obj.needsResync = message.needsResync; } + if (message.disableWaitForNewRows === true) { + obj.disableWaitForNewRows = message.disableWaitForNewRows; + } return obj; }, @@ -6241,6 +6256,7 @@ export const QRepFlowState = { : undefined; message.numPartitionsProcessed = object.numPartitionsProcessed ?? 0; message.needsResync = object.needsResync ?? false; + message.disableWaitForNewRows = object.disableWaitForNewRows ?? false; return message; }, }; From b53231f96233b3e57dff2eeccaeca8aa13b73b7c Mon Sep 17 00:00:00 2001 From: Kaushik Iska Date: Thu, 23 Nov 2023 10:56:14 -0500 Subject: [PATCH 6/8] Move nil check earlier (#659) Followup to #658 --- flow/e2e/snowflake/peer_flow_sf_test.go | 8 ++++++++ flow/model/model.go | 25 +++++++++++++------------ 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/flow/e2e/snowflake/peer_flow_sf_test.go b/flow/e2e/snowflake/peer_flow_sf_test.go index 37848f0383..3c62490271 100644 --- a/flow/e2e/snowflake/peer_flow_sf_test.go +++ b/flow/e2e/snowflake/peer_flow_sf_test.go @@ -63,6 +63,13 @@ func (s *PeerFlowE2ETestSuiteSF) setupTemporalLogger() { s.SetLogger(tlogger) } +type logWriterType struct{ t *testing.T } + +func (l logWriterType) Write(p []byte) (n int, err error) { + l.t.Logf(string(p)) + return len(p), nil +} + func (s *PeerFlowE2ETestSuiteSF) SetupSuite() { err := godotenv.Load() if err != nil { @@ -73,6 +80,7 @@ func (s *PeerFlowE2ETestSuiteSF) SetupSuite() { log.SetReportCaller(true) log.SetLevel(log.WarnLevel) + log.SetOutput(logWriterType{t: s.T()}) s.setupTemporalLogger() diff --git a/flow/model/model.go b/flow/model/model.go index 0557af2774..b91dbaa901 100644 --- a/flow/model/model.go +++ b/flow/model/model.go @@ -135,22 +135,23 @@ func (r *RecordItems) toMap() (map[string]interface{}, error) { jsonStruct := make(map[string]interface{}) for col, idx := range r.colToValIdx { v := r.values[idx] + if v.Value == nil { + jsonStruct[col] = nil + continue + } + var err error switch v.Kind { case qvalue.QValueKindString, qvalue.QValueKindJSON: - if v.Value == nil { - jsonStruct[col] = nil - } else { - strVal, ok := v.Value.(string) - if !ok { - return nil, fmt.Errorf("expected string value for column %s for %T", col, v.Value) - } + strVal, ok := v.Value.(string) + if !ok { + return nil, fmt.Errorf("expected string value for column %s for %T", col, v.Value) + } - if len(strVal) > 15*1024*1024 { - jsonStruct[col] = "" - } else { - jsonStruct[col] = strVal - } + if len(strVal) > 15*1024*1024 { + jsonStruct[col] = "" + } else { + jsonStruct[col] = strVal } case qvalue.QValueKindTimestamp, qvalue.QValueKindTimestampTZ, qvalue.QValueKindDate, qvalue.QValueKindTime, qvalue.QValueKindTimeTZ: From 7e84b315f307c6a1ecc4b74bccd7faa4adc5239a Mon Sep 17 00:00:00 2001 From: Kevin K Biju <52661649+heavycrystal@users.noreply.github.com> Date: Thu, 23 Nov 2023 21:51:30 +0530 Subject: [PATCH 7/8] removed Grafana and Prometheus and related logging code (#703) Closes #694 --- README.md | 10 - docker-compose-dev.yml | 46 +- docker-compose.yml | 45 +- flow/activities/flowable.go | 5 - flow/activities/snapshot_activity.go | 1 - flow/cmd/main.go | 18 - flow/cmd/worker.go | 42 -- flow/go.mod | 11 - flow/go.sum | 133 ---- flow/shared/constants.go | 1 - stacks/grafana.Dockerfile | 5 - stacks/grafana/dashboard.yml | 11 - stacks/grafana/flow_monitoring_dashboard.json | 536 ---------------- .../grafana/flow_monitoring_dashboard_v0.json | 585 ------------------ stacks/grafana/prometheus_datasource.yml | 7 - stacks/prometheus.Dockerfile | 4 - stacks/prometheus/prometheus.yml | 13 - 17 files changed, 2 insertions(+), 1471 deletions(-) delete mode 100644 stacks/grafana.Dockerfile delete mode 100644 stacks/grafana/dashboard.yml delete mode 100644 stacks/grafana/flow_monitoring_dashboard.json delete mode 100644 stacks/grafana/flow_monitoring_dashboard_v0.json delete mode 100644 stacks/grafana/prometheus_datasource.yml delete mode 100644 stacks/prometheus.Dockerfile delete mode 100644 stacks/prometheus/prometheus.yml diff --git a/README.md b/README.md index 4958ad6e95..c2275a8908 100644 --- a/README.md +++ b/README.md @@ -66,16 +66,6 @@ You can use Postgres’ eco-system to manage your ETL — We support multiple target connectors to move data from Postgres and a couple of source connectors to move data into Postgres. Check the status of connectors [here](https://docs.peerdb.io/sql/commands/supported-connectors) -#### Metrics for MIRROR - -Both types of MIRRORs export some crucial metrics with regards to the health of the MIRROR. By default, our development Docker stack does not capture or visualize these metrics. They are available in a Docker Compose profile called `metrics`, which can be enabled by: - -```bash -# add --profile metrics like this in front of any docker compose command being used. -docker compose --profile metrics up --build -``` - -This sets up both a Prometheus instance on port 9090 that scrapes the metrics from the flow workers, and also a Grafana instance on port 3000 that reads and visualizes the metrics from mirrors in a preconfigured dashboard. To view the dashboard, access the Grafana instance on `localhost:3000` with the user `admin` and the password `peerdb`. ## License diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index 63cd979ed5..f6324df139 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -27,8 +27,6 @@ x-flow-worker-env: &flow-worker-env AWS_ENDPOINT: ${AWS_ENDPOINT:-} # enables worker profiling using Grafana Pyroscope ENABLE_PROFILING: "true" - # enables exporting of mirror metrics to Prometheus for visualization using Grafana - ENABLE_METRICS: "true" PYROSCOPE_SERVER_ADDRESS: http://pyroscope:4040 services: @@ -141,10 +139,6 @@ services: target: flow-worker environment: <<: [*catalog-config, *flow-worker-env] - METRICS_SERVER: 0.0.0.0:6061 - ports: - - 6060:6060 - - 6061:6061 depends_on: temporal-admin-tools: condition: service_healthy @@ -157,13 +151,8 @@ services: target: flow-worker environment: <<: [*catalog-config, *flow-worker-env] - METRICS_SERVER: 0.0.0.0:6063 - ports: - - 6062:6062 - - 6063:6063 profiles: - multi - - multi-metrics depends_on: temporal-admin-tools: condition: service_healthy @@ -176,13 +165,8 @@ services: target: flow-worker environment: <<: [*catalog-config, *flow-worker-env] - METRICS_SERVER: 0.0.0.0:6065 - ports: - - 6064:6064 - - 6065:6065 profiles: - multi - - multi-metrics depends_on: temporal-admin-tools: condition: service_healthy @@ -205,40 +189,13 @@ services: catalog: condition: service_healthy - peerdb-prometheus: - container_name: peerdb-prometheus - build: - context: . - dockerfile: stacks/prometheus.Dockerfile - volumes: - - prometheusdata:/prometheus - ports: - - 9090:9090 - profiles: - - multi-metrics - - metrics - - peerdb-grafana: - container_name: peerdb-grafana - build: - context: . - dockerfile: stacks/grafana.Dockerfile - ports: - - 3000:3000 - environment: - GF_SECURITY_ADMIN_USER: admin - GF_SECURITY_ADMIN_PASSWORD: peerdb - profiles: - - multi-metrics - - metrics - peerdb-ui: container_name: peerdb-ui build: context: . dockerfile: stacks/peerdb-ui.Dockerfile ports: - - 3001:3000 + - 3000:3000 environment: <<: *catalog-config DATABASE_URL: postgres://postgres:postgres@catalog:5432/postgres @@ -246,4 +203,3 @@ services: volumes: pgdata: - prometheusdata: diff --git a/docker-compose.yml b/docker-compose.yml index 2a087ded38..3cb0135d8d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -21,8 +21,6 @@ x-flow-worker-env: &flow-worker-env AWS_REGION: ${AWS_REGION:-} # For GCS, set this as: https://storage.googleapis.com AWS_ENDPOINT: ${AWS_ENDPOINT:-} - # enables exporting of mirror metrics to Prometheus for visualization using Grafana - ENABLE_METRICS: "true" services: catalog: @@ -119,10 +117,6 @@ services: image: ghcr.io/peerdb-io/flow-worker:latest-dev environment: <<: [*catalog-config, *flow-worker-env] - METRICS_SERVER: 0.0.0.0:6061 - ports: - - 6060:6060 - - 6061:6061 depends_on: temporal-admin-tools: condition: service_healthy @@ -132,13 +126,8 @@ services: image: ghcr.io/peerdb-io/flow-worker:latest-dev environment: <<: [*catalog-config, *flow-worker-env] - METRICS_SERVER: 0.0.0.0:6063 - ports: - - 6062:6062 - - 6063:6063 profiles: - multi - - multi-metrics depends_on: temporal-admin-tools: condition: service_healthy @@ -148,13 +137,8 @@ services: image: ghcr.io/peerdb-io/flow-worker:latest-dev environment: <<: [*catalog-config, *flow-worker-env] - METRICS_SERVER: 0.0.0.0:6065 - ports: - - 6064:6064 - - 6065:6065 profiles: - multi - - multi-metrics depends_on: temporal-admin-tools: condition: service_healthy @@ -175,38 +159,11 @@ services: catalog: condition: service_healthy - peerdb-prometheus: - container_name: peerdb-prometheus - build: - context: . - dockerfile: stacks/prometheus.Dockerfile - volumes: - - prometheusdata:/prometheus - ports: - - 9090:9090 - profiles: - - multi-metrics - - metrics - - peerdb-grafana: - container_name: peerdb-grafana - build: - context: . - dockerfile: stacks/grafana.Dockerfile - ports: - - 3000:3000 - environment: - GF_SECURITY_ADMIN_USER: admin - GF_SECURITY_ADMIN_PASSWORD: peerdb - profiles: - - multi-metrics - - metrics - peerdb-ui: container_name: peerdb-ui image: ghcr.io/peerdb-io/peerdb-ui:latest-dev ports: - - 3001:3000 + - 3000:3000 environment: <<: *catalog-config DATABASE_URL: postgres://postgres:postgres@catalog:5432/postgres diff --git a/flow/activities/flowable.go b/flow/activities/flowable.go index 956a60de15..167f15e3b5 100644 --- a/flow/activities/flowable.go +++ b/flow/activities/flowable.go @@ -35,7 +35,6 @@ type SlotSnapshotSignal struct { } type FlowableActivity struct { - EnableMetrics bool CatalogMirrorMonitor *monitoring.CatalogMirrorMonitor } @@ -163,7 +162,6 @@ func (a *FlowableActivity) StartFlow(ctx context.Context, activity.RecordHeartbeat(ctx, "starting flow...") conn := input.FlowConnectionConfigs - ctx = context.WithValue(ctx, shared.EnableMetricsKey, a.EnableMetrics) ctx = context.WithValue(ctx, shared.CDCMirrorMonitorKey, a.CatalogMirrorMonitor) dstConn, err := connectors.GetCDCSyncConnector(ctx, conn.Destination) @@ -338,7 +336,6 @@ func (a *FlowableActivity) StartNormalize( ) (*model.NormalizeResponse, error) { conn := input.FlowConnectionConfigs - ctx = context.WithValue(ctx, shared.EnableMetricsKey, a.EnableMetrics) dstConn, err := connectors.GetCDCNormalizeConnector(ctx, conn.Destination) if errors.Is(err, connectors.ErrUnsupportedFunctionality) { dstConn, err := connectors.GetCDCSyncConnector(ctx, conn.Destination) @@ -505,7 +502,6 @@ func (a *FlowableActivity) replicateQRepPartition(ctx context.Context, return fmt.Errorf("failed to update start time for partition: %w", err) } - ctx = context.WithValue(ctx, shared.EnableMetricsKey, a.EnableMetrics) srcConn, err := connectors.GetQRepPullConnector(ctx, config.SourcePeer) if err != nil { return fmt.Errorf("failed to get qrep source connector: %w", err) @@ -606,7 +602,6 @@ func (a *FlowableActivity) replicateQRepPartition(ctx context.Context, func (a *FlowableActivity) ConsolidateQRepPartitions(ctx context.Context, config *protos.QRepConfig, runUUID string) error { - ctx = context.WithValue(ctx, shared.EnableMetricsKey, a.EnableMetrics) dstConn, err := connectors.GetQRepConsolidateConnector(ctx, config.DestinationPeer) if errors.Is(err, connectors.ErrUnsupportedFunctionality) { return a.CatalogMirrorMonitor.UpdateEndTimeForQRepRun(ctx, runUUID) diff --git a/flow/activities/snapshot_activity.go b/flow/activities/snapshot_activity.go index f50c235bac..a490674273 100644 --- a/flow/activities/snapshot_activity.go +++ b/flow/activities/snapshot_activity.go @@ -11,7 +11,6 @@ import ( ) type SnapshotActivity struct { - EnableMetrics bool SnapshotConnections map[string]*SlotSnapshotSignal } diff --git a/flow/cmd/main.go b/flow/cmd/main.go index 74d2fd872e..000baf5653 100644 --- a/flow/cmd/main.go +++ b/flow/cmd/main.go @@ -49,13 +49,6 @@ func main() { EnvVars: []string{"ENABLE_PROFILING"}, } - metricsFlag := &cli.BoolFlag{ - Name: "enable-metrics", - Value: false, // Default is off - Usage: "Enable metrics collection for the application", - EnvVars: []string{"ENABLE_METRICS"}, - } - pyroscopeServerFlag := &cli.StringFlag{ Name: "pyroscope-server-address", Value: "http://pyroscope:4040", @@ -63,13 +56,6 @@ func main() { EnvVars: []string{"PYROSCOPE_SERVER_ADDRESS"}, } - metricsServerFlag := &cli.StringFlag{ - Name: "metrics-server", - Value: "localhost:6061", // Default is localhost:6061 - Usage: "HTTP server address for metrics collection", - EnvVars: []string{"METRICS_SERVER"}, - } - temporalNamespaceFlag := &cli.StringFlag{ Name: "temporal-namespace", Value: "default", @@ -87,9 +73,7 @@ func main() { return WorkerMain(&WorkerOptions{ TemporalHostPort: temporalHostPort, EnableProfiling: ctx.Bool("enable-profiling"), - EnableMetrics: ctx.Bool("enable-metrics"), PyroscopeServer: ctx.String("pyroscope-server-address"), - MetricsServer: ctx.String("metrics-server"), TemporalNamespace: ctx.String("temporal-namespace"), TemporalCert: ctx.String("temporal-cert"), TemporalKey: ctx.String("temporal-key"), @@ -98,9 +82,7 @@ func main() { Flags: []cli.Flag{ temporalHostPortFlag, profilingFlag, - metricsFlag, pyroscopeServerFlag, - metricsServerFlag, temporalNamespaceFlag, &temporalCertFlag, &temporalKeyFlag, diff --git a/flow/cmd/worker.go b/flow/cmd/worker.go index 55ca6c0efd..0a0901da99 100644 --- a/flow/cmd/worker.go +++ b/flow/cmd/worker.go @@ -7,30 +7,23 @@ import ( "os/signal" "runtime" "syscall" - "time" "github.com/PeerDB-io/peer-flow/activities" utils "github.com/PeerDB-io/peer-flow/connectors/utils/catalog" "github.com/PeerDB-io/peer-flow/connectors/utils/monitoring" "github.com/PeerDB-io/peer-flow/shared" peerflow "github.com/PeerDB-io/peer-flow/workflows" - "github.com/uber-go/tally/v4" - "github.com/uber-go/tally/v4/prometheus" "github.com/grafana/pyroscope-go" - prom "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "go.temporal.io/sdk/client" - sdktally "go.temporal.io/sdk/contrib/tally" "go.temporal.io/sdk/worker" ) type WorkerOptions struct { TemporalHostPort string EnableProfiling bool - EnableMetrics bool PyroscopeServer string - MetricsServer string TemporalNamespace string TemporalCert string TemporalKey string @@ -111,15 +104,6 @@ func WorkerMain(opts *WorkerOptions) error { clientOptions.ConnectionOptions = connOptions } - if opts.EnableMetrics { - clientOptions.MetricsHandler = sdktally.NewMetricsHandler(newPrometheusScope( - prometheus.Configuration{ - ListenAddress: opts.MetricsServer, - TimerType: "histogram", - }, - )) - } - conn, err := utils.GetCatalogConnectionPoolFromEnv() if err != nil { return fmt.Errorf("unable to create catalog connection pool: %w", err) @@ -143,7 +127,6 @@ func WorkerMain(opts *WorkerOptions) error { w.RegisterWorkflow(peerflow.QRepPartitionWorkflow) w.RegisterWorkflow(peerflow.DropFlowWorkflow) w.RegisterActivity(&activities.FlowableActivity{ - EnableMetrics: opts.EnableMetrics, CatalogMirrorMonitor: catalogMirrorMonitor, }) @@ -154,28 +137,3 @@ func WorkerMain(opts *WorkerOptions) error { return nil } - -func newPrometheusScope(c prometheus.Configuration) tally.Scope { - reporter, err := c.NewReporter( - prometheus.ConfigurationOptions{ - Registry: prom.NewRegistry(), - OnError: func(err error) { - log.Println("error in prometheus reporter", err) - }, - }, - ) - if err != nil { - log.Fatalln("error creating prometheus reporter", err) - } - scopeOpts := tally.ScopeOptions{ - CachedReporter: reporter, - Separator: prometheus.DefaultSeparator, - SanitizeOptions: &sdktally.PrometheusSanitizeOptions, - Prefix: "flow_worker", - } - scope, _ := tally.NewRootScope(scopeOpts, time.Second) - scope = sdktally.NewPrometheusNamingScope(scope) - - log.Println("prometheus metrics scope created") - return scope -} diff --git a/flow/go.mod b/flow/go.mod index c621337411..40d5ee7fe0 100644 --- a/flow/go.mod +++ b/flow/go.mod @@ -24,17 +24,14 @@ require ( github.com/linkedin/goavro/v2 v2.12.0 github.com/microsoft/go-mssqldb v1.6.0 github.com/orcaman/concurrent-map/v2 v2.0.1 - github.com/prometheus/client_golang v1.17.0 github.com/sirupsen/logrus v1.9.3 github.com/snowflakedb/gosnowflake v1.6.25 github.com/stretchr/testify v1.8.4 github.com/twpayne/go-geos v0.14.0 - github.com/uber-go/tally/v4 v4.1.10 github.com/urfave/cli/v2 v2.25.7 github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a go.temporal.io/api v1.25.0 go.temporal.io/sdk v1.25.1 - go.temporal.io/sdk/contrib/tally v0.2.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 golang.org/x/sync v0.5.0 @@ -72,8 +69,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.2 // indirect github.com/aws/aws-sdk-go-v2/service/s3 v1.42.1 // indirect github.com/aws/smithy-go v1.16.0 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/danieljoos/wincred v1.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -110,22 +105,16 @@ require ( github.com/klauspost/asmfmt v1.3.2 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/pborman/uuid v1.2.1 // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect github.com/robfig/cron v1.2.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/stretchr/objx v0.5.1 // indirect - github.com/twmb/murmur3 v1.1.8 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.opencensus.io v0.24.0 // indirect diff --git a/flow/go.sum b/flow/go.sum index 1e3fe7ff88..77d0b13010 100644 --- a/flow/go.sum +++ b/flow/go.sum @@ -49,16 +49,10 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0/go.mod h1:wP83 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0= github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI= github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -107,23 +101,11 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.25.1/go.mod h1:VAiJiNaoP1L89STFlEMgm github.com/aws/smithy-go v1.16.0 h1:gJZEH/Fqh+RsvlJ1Zt4tVAtV6bKkp3cC+R6FCZMNzik= github.com/aws/smithy-go v1.16.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cactus/go-statsd-client/statsd v0.0.0-20200423205355-cb0885a1018c/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= -github.com/cactus/go-statsd-client/v5 v5.0.0/go.mod h1:COEvJ1E+/E2L4q6QE5CkjWPi4eeDw9maJBMIuMPBZbY= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -139,8 +121,6 @@ github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= @@ -151,11 +131,7 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -167,11 +143,8 @@ github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6 github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= github.com/golang-jwt/jwt/v5 v5.1.0 h1:UGKbA/IPjtS6zLcdB7i5TyACMgSbOTiR8qzXgw8HWQU= @@ -188,7 +161,6 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= @@ -197,7 +169,6 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -214,19 +185,15 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= @@ -237,7 +204,6 @@ github.com/grafana/pyroscope-go v1.0.4 h1:oyQX0BOkL+iARXzHuCdIF5TQ7/sRSel1YFViMH github.com/grafana/pyroscope-go v1.0.4/go.mod h1:0d7ftwSMBV/Awm7CCiYmHQEG8Y44Ma3YSjt+nWcWztY= github.com/grafana/pyroscope-go/godeltaprof v0.1.5 h1:gkFVqihFRL1Nro2FCC0u6mW47jclef96Zu8I/ykq+4E= github.com/grafana/pyroscope-go/godeltaprof v0.1.5/go.mod h1:1HSPtjU8vLG0jE9JrTdzjgFqdJ/VgN7fvxBNq3luJko= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= @@ -268,7 +234,6 @@ github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSlj github.com/jackc/puddle/v2 v2.0.0/go.mod h1:itE7ZJY8xnoo0JqJEpSMprN0f+NQkMCuEV/N9j8h0oc= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -277,12 +242,6 @@ github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= @@ -294,8 +253,6 @@ github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6K github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -314,23 +271,14 @@ github.com/linkedin/goavro/v2 v2.12.0 h1:rIQQSj8jdAUlKQh6DttK8wCRv4t4QO09g1C4aBW github.com/linkedin/goavro/v2 v2.12.0/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk= github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/microsoft/go-mssqldb v1.6.0 h1:mM3gYdVwEPFrlg/Dvr2DNVEgYFG7L42l+dGc67NNNpc= github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c= @@ -341,37 +289,12 @@ github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -380,17 +303,13 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/snowflakedb/gosnowflake v1.6.25 h1:o5zUmxTOo0Eo9AdkEj8blCeiMuILrQJ+rjUMAeZhcRE= github.com/snowflakedb/gosnowflake v1.6.25/go.mod h1:KfO4F7bk+aXPUIvBqYxvPhxLlu2/w4TtSC8Rw/yr5Mg= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= @@ -407,14 +326,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/twmb/murmur3 v1.1.5/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= -github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg= -github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= github.com/twpayne/go-geos v0.14.0 h1:yFPDj6EZq2rL74eOSn40tZA8zOnv6sRFFdtDWR7KpQ4= github.com/twpayne/go-geos v0.14.0/go.mod h1:K6llLdkCFVIrUgGFs5V/DRS1jpwAXq8xxm2uwtlFT40= -github.com/uber-go/tally/v4 v4.1.1/go.mod h1:aXeSTDMl4tNosyf6rdU8jlgScHyjEGGtfJ/uwCIf/vM= -github.com/uber-go/tally/v4 v4.1.10 h1:2GSX7Tmq26wjAvOtQEc5EvRROIkX2OX4vpROt6mlRLM= -github.com/uber-go/tally/v4 v4.1.10/go.mod h1:pPR56rjthjtLB8xQlEx2I1VwAwRGCh/i4xMUcmG+6z4= github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= @@ -431,30 +344,18 @@ github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.temporal.io/api v1.5.0/go.mod h1:BqKxEJJYdxb5dqf0ODfzfMxh8UEQ5L3zKS51FiIYYkA= go.temporal.io/api v1.25.0 h1:V6lIYuQlfmM1dc2vn6mIG5F2cY3EQ+xEjfTZ801Vpx8= go.temporal.io/api v1.25.0/go.mod h1:LTJM9iMOIuiE5hRtym4Ne6I4rKlDGioUiscdD9D6N2Y= -go.temporal.io/sdk v1.12.0/go.mod h1:lSp3lH1lI0TyOsus0arnO3FYvjVXBZGi/G7DjnAnm6o= go.temporal.io/sdk v1.25.1 h1:jC9l9vHHz5OJ7PR6OjrpYSN4+uEG0bLe5rdF9nlMSGk= go.temporal.io/sdk v1.25.1/go.mod h1:X7iFKZpsj90BfszfpFCzLX8lwEJXbnRrl351/HyEgmU= -go.temporal.io/sdk/contrib/tally v0.2.0 h1:XnTJIQcjOv+WuCJ1u8Ve2nq+s2H4i/fys34MnWDRrOo= -go.temporal.io/sdk/contrib/tally v0.2.0/go.mod h1:1kpSuCms/tHeJQDPuuKkaBsMqfHnIIRnCtUYlPNXxuE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -470,8 +371,6 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -480,62 +379,47 @@ golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0= golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -548,15 +432,12 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.4.0 h1:Z81tqI5ddIoXDPvVQ7/7CC9TnLM7ubaFG2qXYd5BbYY= golang.org/x/time v0.4.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -566,11 +447,9 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= @@ -594,7 +473,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg= google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY= google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f h1:2yNACc1O40tTnrsbk9Cv6oxiW8pxI/pXj0wRtdlYmgY= @@ -609,8 +487,6 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -624,26 +500,17 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/validator.v2 v2.0.0-20200605151824-2b28d334fa05/go.mod h1:o4V0GXN9/CAmCsvJ0oXYZvrZOe7syiDZSN1GWGZTGzc= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/flow/shared/constants.go b/flow/shared/constants.go index afeb8f7fc5..9df826c32d 100644 --- a/flow/shared/constants.go +++ b/flow/shared/constants.go @@ -14,7 +14,6 @@ const ( ShutdownSignal PauseSignal - EnableMetricsKey ContextKey = "enableMetrics" CDCMirrorMonitorKey ContextKey = "cdcMirrorMonitor" ) diff --git a/stacks/grafana.Dockerfile b/stacks/grafana.Dockerfile deleted file mode 100644 index eca7c9464f..0000000000 --- a/stacks/grafana.Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -# syntax=docker/dockerfile:1.2 - -FROM grafana/grafana:latest -COPY stacks/grafana/flow_monitoring_dashboard.json stacks/grafana/dashboard.yml /etc/grafana/provisioning/dashboards/ -COPY stacks/grafana/prometheus_datasource.yml /etc/grafana/provisioning/datasources/ diff --git a/stacks/grafana/dashboard.yml b/stacks/grafana/dashboard.yml deleted file mode 100644 index 2e2dee6c1a..0000000000 --- a/stacks/grafana/dashboard.yml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: 1 - -providers: -- name: 'Prometheus' - orgId: 1 - folder: '' - type: file - disableDeletion: false - editable: true - options: - path: /etc/grafana/provisioning/dashboards \ No newline at end of file diff --git a/stacks/grafana/flow_monitoring_dashboard.json b/stacks/grafana/flow_monitoring_dashboard.json deleted file mode 100644 index 0c1477ff82..0000000000 --- a/stacks/grafana/flow_monitoring_dashboard.json +++ /dev/null @@ -1,536 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "links": [], - "liveNow": false, - "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 11, - "x": 0, - "y": 0 - }, - "id": 4, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "editorMode": "code", - "expr": "flow_worker_${job_type}_${job_name}_records_synced_per_second", - "instant": false, - "legendFormat": "{{__name__}}", - "range": true, - "refId": "A" - } - ], - "title": "records synced / second", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "P8C2E8E0157474F52" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "flow_worker_cdcflow_schemaa_records_throughput" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] - }, - "gridPos": { - "h": 10, - "w": 13, - "x": 11, - "y": 0 - }, - "id": 2, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "editorMode": "code", - "expr": "flow_worker_${job_type}_${job_name}_records_throughput", - "instant": false, - "legendFormat": "{{__name__}}", - "range": true, - "refId": "A" - } - ], - "title": "overall mirror throughput", - "transformations": [], - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 11, - "x": 0, - "y": 10 - }, - "id": 3, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "editorMode": "code", - "expr": "flow_worker_${job_type}_${job_name}_records_normalized_per_second", - "instant": false, - "legendFormat": "{{__name__}}", - "range": true, - "refId": "A" - } - ], - "title": "records normalized / second", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 13, - "x": 11, - "y": 10 - }, - "id": 1, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "editorMode": "code", - "expr": "flow_worker_${job_type}_${job_name}_insert_records_pulled", - "instant": false, - "legendFormat": "{{__name__}}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "editorMode": "code", - "expr": "flow_worker_${job_type}_${job_name}_update_records_pulled", - "hide": false, - "instant": false, - "legendFormat": "{{__name__}}", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "editorMode": "code", - "expr": "flow_worker_${job_type}_${job_name}_delete_records_pulled", - "hide": false, - "instant": false, - "legendFormat": "{{__name__}}", - "range": true, - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "editorMode": "code", - "expr": "flow_worker_${job_type}_${job_name}_total_records_pulled", - "hide": false, - "instant": false, - "legendFormat": "{{__name__}}", - "range": true, - "refId": "D" - } - ], - "title": "records pulled / second with types", - "type": "timeseries" - } - ], - "refresh": "10s", - "schemaVersion": 38, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "schemaa", - "value": "schemaa" - }, - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "definition": "metrics(flow_worker_${job_type}_.*_total_records_pulled)", - "hide": 0, - "includeAll": false, - "multi": false, - "name": "job_name", - "options": [], - "query": { - "query": "metrics(flow_worker_${job_type}_.*_total_records_pulled)", - "refId": "PrometheusVariableQueryEditor-VariableQuery" - }, - "refresh": 1, - "regex": "flow_worker_${job_type}_(?.*)_total_records_pulled", - "skipUrlSync": false, - "sort": 0, - "type": "query" - }, - { - "current": { - "selected": false, - "text": "cdcflow", - "value": "cdcflow" - }, - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "definition": "metrics(flow_worker_.*_total_records_pulled)", - "hide": 0, - "includeAll": false, - "multi": false, - "name": "job_type", - "options": [], - "query": { - "query": "metrics(flow_worker_.*_total_records_pulled)", - "refId": "PrometheusVariableQueryEditor-VariableQuery" - }, - "refresh": 1, - "regex": "flow_worker_(?.*flow)_.*", - "skipUrlSync": false, - "sort": 0, - "type": "query" - } - ] - }, - "time": { - "from": "now-6h", - "to": "now" - }, - "timepicker": {}, - "timezone": "", - "title": "PeerDB mirror monitoring dashboard", - "uid": "cac849d7-5353-4bd2-8f4f-925ad428cf1d", - "version": 1, - "weekStart": "" - } \ No newline at end of file diff --git a/stacks/grafana/flow_monitoring_dashboard_v0.json b/stacks/grafana/flow_monitoring_dashboard_v0.json deleted file mode 100644 index 722f757fcf..0000000000 --- a/stacks/grafana/flow_monitoring_dashboard_v0.json +++ /dev/null @@ -1,585 +0,0 @@ -{ - "__inputs": [ - { - "name": "peerdb_prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__elements": {}, - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "10.0.2" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": null, - "links": [], - "liveNow": false, - "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 11, - "x": 0, - "y": 0 - }, - "id": 4, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "editorMode": "code", - "expr": "flow_worker_${job_type}_${job_name}_records_synced_per_second", - "instant": false, - "legendFormat": "{{__name__}}", - "range": true, - "refId": "A" - } - ], - "title": "records synced / second", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "difference_in_record_counts" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] - }, - "gridPos": { - "h": 10, - "w": 13, - "x": 11, - "y": 0 - }, - "id": 2, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "editorMode": "code", - "expr": "flow_worker_${job_type}_${job_name}_records_at_source", - "instant": false, - "legendFormat": "{{__name__}}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "editorMode": "code", - "expr": "flow_worker_${job_type}_${job_name}_records_at_target", - "hide": false, - "instant": false, - "legendFormat": "{{__name__}}", - "range": true, - "refId": "B" - } - ], - "title": "difference in records between source and target", - "transformations": [ - { - "id": "calculateField", - "options": { - "alias": "difference_in_record_counts", - "binary": { - "left": "flow_worker_${job_type}_${job_name}_records_at_source", - "operator": "-", - "reducer": "sum", - "right": "flow_worker_${job_type}_${job_name}_records_at_target" - }, - "mode": "binary", - "reduce": { - "reducer": "sum" - } - } - } - ], - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 11, - "x": 0, - "y": 10 - }, - "id": 3, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "editorMode": "code", - "expr": "flow_worker_${job_type}_${job_name}_records_normalized_per_second", - "instant": false, - "legendFormat": "{{__name__}}", - "range": true, - "refId": "A" - } - ], - "title": "records normalized / second", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 13, - "x": 11, - "y": 10 - }, - "id": 1, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "editorMode": "code", - "expr": "flow_worker_${job_type}_${job_name}_insert_records_pulled", - "instant": false, - "legendFormat": "{{__name__}}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "editorMode": "code", - "expr": "flow_worker_${job_type}_${job_name}_update_records_pulled", - "hide": false, - "instant": false, - "legendFormat": "{{__name__}}", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "editorMode": "code", - "expr": "flow_worker_${job_type}_${job_name}_delete_records_pulled", - "hide": false, - "instant": false, - "legendFormat": "{{__name__}}", - "range": true, - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "editorMode": "code", - "expr": "flow_worker_${job_type}_${job_name}_total_records_pulled", - "hide": false, - "instant": false, - "legendFormat": "{{__name__}}", - "range": true, - "refId": "D" - } - ], - "title": "records pulled / second with types", - "type": "timeseries" - } - ], - "refresh": "10s", - "schemaVersion": 38, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "current": {}, - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "definition": "metrics(flow_worker_${job_type}_.*_total_records_pulled)", - "hide": 0, - "includeAll": false, - "multi": false, - "name": "job_name", - "options": [], - "query": { - "query": "metrics(flow_worker_${job_type}_.*_total_records_pulled)", - "refId": "PrometheusVariableQueryEditor-VariableQuery" - }, - "refresh": 1, - "regex": "flow_worker_${job_type}_(?.*)_total_records_pulled", - "skipUrlSync": false, - "sort": 0, - "type": "query" - }, - { - "current": {}, - "datasource": { - "type": "prometheus", - "uid": "peerdb_prometheus" - }, - "definition": "metrics(flow_worker_.*_total_records_pulled)", - "hide": 0, - "includeAll": false, - "multi": false, - "name": "job_type", - "options": [], - "query": { - "query": "metrics(flow_worker_.*_total_records_pulled)", - "refId": "PrometheusVariableQueryEditor-VariableQuery" - }, - "refresh": 1, - "regex": "flow_worker_(?.*flow)_.*", - "skipUrlSync": false, - "sort": 0, - "type": "query" - } - ] - }, - "time": { - "from": "now-6h", - "to": "now" - }, - "timepicker": {}, - "timezone": "", - "title": "PeerDB mirror monitoring dashboard", - "uid": "cac849d7-5353-4bd2-8f4f-925ad428cf1d", - "version": 11, - "weekStart": "" -} \ No newline at end of file diff --git a/stacks/grafana/prometheus_datasource.yml b/stacks/grafana/prometheus_datasource.yml deleted file mode 100644 index ad40b1ef58..0000000000 --- a/stacks/grafana/prometheus_datasource.yml +++ /dev/null @@ -1,7 +0,0 @@ -datasources: - - name: peerdb_prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://host.docker.internal:9090 - uid: peerdb_prometheus \ No newline at end of file diff --git a/stacks/prometheus.Dockerfile b/stacks/prometheus.Dockerfile deleted file mode 100644 index ea2d8f458e..0000000000 --- a/stacks/prometheus.Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -# syntax=docker/dockerfile:1.2 - -FROM prom/prometheus:latest -COPY stacks/prometheus/prometheus.yml /etc/prometheus \ No newline at end of file diff --git a/stacks/prometheus/prometheus.yml b/stacks/prometheus/prometheus.yml deleted file mode 100644 index 343456da15..0000000000 --- a/stacks/prometheus/prometheus.yml +++ /dev/null @@ -1,13 +0,0 @@ -global: - scrape_interval: 15s - -scrape_configs: -- job_name: peerdb_flow_workers - static_configs: - - targets: ['host.docker.internal:6061', 'host.docker.internal:6063', 'host.docker.internal:6065'] - metric_relabel_configs: - - regex: "instance" - action: labeldrop - - regex: "job" - action: labeldrop - From 4b1447b213c2ca9168cebf7242fa852aac18d107 Mon Sep 17 00:00:00 2001 From: Amogh Bharadwaj Date: Thu, 23 Nov 2023 21:52:37 +0530 Subject: [PATCH 8/8] Heirarchical UI For CDC Table Picker And Refactoring (#700) - Redesigns the table picking section of Create CDC Mirror to have a heirarchical view of Schema -> Table -> Columns - Wired up with column exclusion and column exclusion tested from PG -> SF - Refactors code and organises files into CDC and QRep folders Screenshot 2023-11-23 at 9 54 22 AM --- flow/cmd/peer_data.go | 42 ++- ui/app/dto/MirrorsDTO.ts | 1 + ui/app/mirrors/create/{ => cdc}/cdc.tsx | 14 +- ui/app/mirrors/create/cdc/schemabox.tsx | 325 +++++++++++++++++++ ui/app/mirrors/create/cdc/styles.ts | 36 +++ ui/app/mirrors/create/cdc/tablemapping.tsx | 86 +++++ ui/app/mirrors/create/columns.tsx | 132 -------- ui/app/mirrors/create/handlers.ts | 104 +++++- ui/app/mirrors/create/mirrorcards.tsx | 90 ++++++ ui/app/mirrors/create/page.tsx | 305 ++++-------------- ui/app/mirrors/create/{ => qrep}/qrep.tsx | 10 +- ui/app/mirrors/create/{ => qrep}/query.tsx | 0 ui/app/mirrors/create/schema.ts | 4 +- ui/app/mirrors/create/tablemapping.tsx | 353 --------------------- ui/app/mirrors/types.ts | 1 - 15 files changed, 728 insertions(+), 775 deletions(-) rename ui/app/mirrors/create/{ => cdc}/cdc.tsx (94%) create mode 100644 ui/app/mirrors/create/cdc/schemabox.tsx create mode 100644 ui/app/mirrors/create/cdc/styles.ts create mode 100644 ui/app/mirrors/create/cdc/tablemapping.tsx delete mode 100644 ui/app/mirrors/create/columns.tsx create mode 100644 ui/app/mirrors/create/mirrorcards.tsx rename ui/app/mirrors/create/{ => qrep}/qrep.tsx (97%) rename ui/app/mirrors/create/{ => qrep}/query.tsx (100%) delete mode 100644 ui/app/mirrors/create/tablemapping.tsx diff --git a/flow/cmd/peer_data.go b/flow/cmd/peer_data.go index 1896f6143c..14c8ba43cc 100644 --- a/flow/cmd/peer_data.go +++ b/flow/cmd/peer_data.go @@ -135,9 +135,40 @@ func (h *FlowRequestHandler) GetColumns( } defer peerPool.Close() - rows, err := peerPool.Query(ctx, "SELECT column_name, data_type"+ - " FROM information_schema.columns"+ - " WHERE table_schema = $1 AND table_name = $2;", req.SchemaName, req.TableName) + rows, err := peerPool.Query(ctx, ` + SELECT + cols.column_name, + cols.data_type, + CASE + WHEN constraint_type = 'PRIMARY KEY' THEN true + ELSE false + END AS is_primary_key + FROM + information_schema.columns cols + LEFT JOIN + ( + SELECT + kcu.column_name, + tc.constraint_type + FROM + information_schema.key_column_usage kcu + JOIN + information_schema.table_constraints tc + ON + kcu.constraint_name = tc.constraint_name + AND kcu.constraint_schema = tc.constraint_schema + AND kcu.constraint_name = tc.constraint_name + WHERE + tc.constraint_type = 'PRIMARY KEY' + AND kcu.table_schema = $1 + AND kcu.table_name = $2 + ) AS pk + ON + cols.column_name = pk.column_name + WHERE + cols.table_schema = $3 + AND cols.table_name = $4; + `, req.SchemaName, req.TableName, req.SchemaName, req.TableName) if err != nil { return &protos.TableColumnsResponse{Columns: nil}, err } @@ -147,11 +178,12 @@ func (h *FlowRequestHandler) GetColumns( for rows.Next() { var columnName string var datatype string - err := rows.Scan(&columnName, &datatype) + var isPkey bool + err := rows.Scan(&columnName, &datatype, &isPkey) if err != nil { return &protos.TableColumnsResponse{Columns: nil}, err } - column := fmt.Sprintf("%s:%s", columnName, datatype) + column := fmt.Sprintf("%s:%s:%v", columnName, datatype, isPkey) columns = append(columns, column) } return &protos.TableColumnsResponse{Columns: columns}, nil diff --git a/ui/app/dto/MirrorsDTO.ts b/ui/app/dto/MirrorsDTO.ts index 33fa094c59..f63a93a15f 100644 --- a/ui/app/dto/MirrorsDTO.ts +++ b/ui/app/dto/MirrorsDTO.ts @@ -14,6 +14,7 @@ export type CDCConfig = FlowConnectionConfigs; export type MirrorConfig = CDCConfig | QRepConfig; export type MirrorSetter = Dispatch>; export type TableMapRow = { + schema: string; source: string; destination: string; partitionKey: string; diff --git a/ui/app/mirrors/create/cdc.tsx b/ui/app/mirrors/create/cdc/cdc.tsx similarity index 94% rename from ui/app/mirrors/create/cdc.tsx rename to ui/app/mirrors/create/cdc/cdc.tsx index ce7b6b931a..58ae9afd5a 100644 --- a/ui/app/mirrors/create/cdc.tsx +++ b/ui/app/mirrors/create/cdc/cdc.tsx @@ -8,9 +8,9 @@ import { Switch } from '@/lib/Switch'; import { TextField } from '@/lib/TextField'; import { Dispatch, SetStateAction } from 'react'; import ReactSelect from 'react-select'; -import { InfoPopover } from '../../../components/InfoPopover'; -import { CDCConfig, MirrorSetter, TableMapRow } from '../../dto/MirrorsDTO'; -import { MirrorSetting } from './helpers/common'; +import { InfoPopover } from '../../../../components/InfoPopover'; +import { CDCConfig, MirrorSetter, TableMapRow } from '../../../dto/MirrorsDTO'; +import { MirrorSetting } from '../helpers/common'; import TableMapping from './tablemapping'; interface MirrorConfigProps { @@ -19,8 +19,6 @@ interface MirrorConfigProps { setter: MirrorSetter; rows: TableMapRow[]; setRows: Dispatch>; - schema: string; - setSchema: Dispatch>; } const SyncModeOptions = ['AVRO', 'Copy with Binary'].map((value) => ({ @@ -46,8 +44,6 @@ export default function CDCConfigForm({ setter, rows, setRows, - schema, - setSchema, }: MirrorConfigProps) { const setToDefault = (setting: MirrorSetting) => { const destinationPeerType = mirrorConfig.destination?.type; @@ -81,15 +77,13 @@ export default function CDCConfigForm({ return true; }; - if (mirrorConfig.source != undefined) + if (mirrorConfig.source != undefined && mirrorConfig.destination != undefined) return ( <> {settings.map((setting, id) => { diff --git a/ui/app/mirrors/create/cdc/schemabox.tsx b/ui/app/mirrors/create/cdc/schemabox.tsx new file mode 100644 index 0000000000..5a40092ca2 --- /dev/null +++ b/ui/app/mirrors/create/cdc/schemabox.tsx @@ -0,0 +1,325 @@ +'use client'; +import { TableMapRow } from '@/app/dto/MirrorsDTO'; +import { DBType } from '@/grpc_generated/peers'; +import { Checkbox } from '@/lib/Checkbox'; +import { Icon } from '@/lib/Icon'; +import { Label } from '@/lib/Label'; +import { RowWithCheckbox } from '@/lib/Layout'; +import { SearchField } from '@/lib/SearchField'; +import { TextField } from '@/lib/TextField'; +import { Dispatch, SetStateAction, useCallback, useState } from 'react'; +import { BarLoader } from 'react-spinners/'; +import { fetchColumns, fetchTables } from '../handlers'; +import { expandableStyle, schemaBoxStyle, tableBoxStyle } from './styles'; + +interface SchemaBoxProps { + sourcePeer: string; + schema: string; + rows: TableMapRow[]; + setRows: Dispatch>; + tableColumns: { tableName: string; columns: string[] }[]; + setTableColumns: Dispatch< + SetStateAction<{ tableName: string; columns: string[] }[]> + >; + peerType?: DBType; +} +const SchemaBox = ({ + sourcePeer, + peerType, + schema, + rows, + setRows, + tableColumns, + setTableColumns, +}: SchemaBoxProps) => { + const [tablesLoading, setTablesLoading] = useState(false); + const [columnsLoading, setColumnsLoading] = useState(false); + const [expandedSchemas, setExpandedSchemas] = useState([]); + const [tableQuery, setTableQuery] = useState(''); + + const schemaIsExpanded = useCallback( + (schema: string) => { + return !!expandedSchemas.find((schemaName) => schemaName === schema); + }, + [expandedSchemas] + ); + + const handleAddRow = (source: string) => { + const newRows = [...rows]; + const index = newRows.findIndex((row) => row.source === source); + if (index >= 0) newRows[index] = { ...newRows[index], selected: true }; + setRows(newRows); + addTableColumns(source); + }; + + const handleRemoveRow = (source: string) => { + const newRows = [...rows]; + const index = newRows.findIndex((row) => row.source === source); + if (index >= 0) newRows[index] = { ...newRows[index], selected: false }; + setRows(newRows); + removeTableColumns(source); + }; + + const handleTableSelect = (on: boolean, source: string) => { + on ? handleAddRow(source) : handleRemoveRow(source); + }; + + const updateDestination = (source: string, dest: string) => { + const newRows = [...rows]; + const index = newRows.findIndex((row) => row.source === source); + newRows[index] = { ...newRows[index], destination: dest }; + setRows(newRows); + }; + + const addTableColumns = (table: string) => { + const schemaName = table.split('.')[0]; + const tableName = table.split('.')[1]; + fetchColumns(sourcePeer, schemaName, tableName, setColumnsLoading).then( + (res) => + setTableColumns((prev) => { + return [...prev, { tableName: table, columns: res }]; + }) + ); + }; + + const removeTableColumns = (table: string) => { + setTableColumns((prev) => { + return prev.filter((column) => column.tableName !== table); + }); + }; + + const getTableColumns = (tableName: string) => { + return tableColumns?.find((column) => column.tableName === tableName) + ?.columns; + }; + + const handleColumnExclusion = ( + source: string, + column: string, + include: boolean + ) => { + const currRows = [...rows]; + const rowOfSource = currRows.find((row) => row.source === source); + if (rowOfSource) { + if (include) { + const updatedExclude = rowOfSource.exclude.filter( + (col) => col !== column + ); + rowOfSource.exclude = updatedExclude; + } else { + rowOfSource.exclude.push(column); + } + } + setRows(currRows); + }; + + const handleSelectAll = ( + e: React.MouseEvent + ) => { + const newRows = [...rows]; + for (const row of newRows) { + row.selected = e.currentTarget.checked; + if (e.currentTarget.checked) addTableColumns(row.source); + else removeTableColumns(row.source); + } + setRows(newRows); + }; + + const handleSchemaClick = (schemaName: string) => { + if (!schemaIsExpanded(schemaName)) { + setTablesLoading(true); + setExpandedSchemas((curr) => [...curr, schemaName]); + fetchTables(sourcePeer, schemaName, peerType).then((tableRows) => { + const newRows = [...rows, ...tableRows]; + setRows(newRows); + setTablesLoading(false); + }); + } else { + setExpandedSchemas((curr) => + curr.filter((expandedSchema) => expandedSchema != schemaName) + ); + } + }; + + return ( +
+
+
+
handleSchemaClick(schema)} + > + +

{schema}

+
+
+
+ handleSelectAll(e)} /> + +
+ ) => + setTableQuery(e.target.value) + } + /> +
+
+ {schemaIsExpanded(schema) && ( +
+ {rows.filter((row) => row.schema === schema).length ? ( + rows + .filter( + (row) => + row.schema === schema && + row.source.toLowerCase().includes(tableQuery.toLowerCase()) + ) + .map((row, index) => { + const columns = getTableColumns(row.source); + return ( +
+
+ + {row.source} + + } + action={ + + handleTableSelect(state, row.source) + } + /> + } + /> + +
+

Target Table:

+ + ) => updateDestination(row.source, e.target.value)} + /> +
+
+ {row.selected && ( +
+ + {columns ? ( + columns.map((column, index) => { + const columnName = column.split(':')[0]; + const columnType = column.split(':')[1]; + const isPkey = column.split(':')[2] === 'true'; + return ( + + {columnName}{' '} +

+ {columnType} +

+ + } + action={ + col == columnName + ) + } + onCheckedChange={(state: boolean) => + handleColumnExclusion( + row.source, + columnName, + state + ) + } + /> + } + /> + ); + }) + ) : columnsLoading ? ( + + ) : ( + + )} +
+ )} +
+ ); + }) + ) : tablesLoading ? ( + + ) : ( + + )} +
+ )} +
+
+ ); +}; + +export default SchemaBox; diff --git a/ui/app/mirrors/create/cdc/styles.ts b/ui/app/mirrors/create/cdc/styles.ts new file mode 100644 index 0000000000..b60ee4035e --- /dev/null +++ b/ui/app/mirrors/create/cdc/styles.ts @@ -0,0 +1,36 @@ +import { CSSProperties } from 'styled-components'; + +export const expandableStyle = { + fontSize: 14, + display: 'flex', + alignItems: 'center', + justifyContent: 'space-between', + color: 'rgba(0,0,0,0.7)', + cursor: 'pointer', +}; + +export const schemaBoxStyle: CSSProperties = { + width: '100%', + marginTop: '0.5rem', + padding: '0.5rem', + display: 'flex', + flexDirection: 'column', + border: '1px solid #e9ecf2', + borderRadius: '0.8rem', +}; + +export const tableBoxStyle: CSSProperties = { + border: '1px solid #e9ecf2', + borderRadius: '0.5rem', + marginBottom: '0.5rem', + width: '90%', + padding: '0.5rem', +}; + +export const loaderContainer: CSSProperties = { + display: 'flex', + flexDirection: 'column', + alignItems: 'center', + justifyContent: 'center', + height: '100%', +}; diff --git a/ui/app/mirrors/create/cdc/tablemapping.tsx b/ui/app/mirrors/create/cdc/tablemapping.tsx new file mode 100644 index 0000000000..ee4d86d588 --- /dev/null +++ b/ui/app/mirrors/create/cdc/tablemapping.tsx @@ -0,0 +1,86 @@ +'use client'; +import { DBType } from '@/grpc_generated/peers'; +import { Label } from '@/lib/Label'; +import { SearchField } from '@/lib/SearchField'; +import { Dispatch, SetStateAction, useEffect, useState } from 'react'; +import { BarLoader } from 'react-spinners/'; +import { TableMapRow } from '../../../dto/MirrorsDTO'; +import { fetchSchemas } from '../handlers'; +import SchemaBox from './schemabox'; +import { loaderContainer } from './styles'; + +interface TableMappingProps { + sourcePeerName: string; + rows: TableMapRow[]; + setRows: Dispatch>; + peerType?: DBType; +} + +const TableMapping = ({ + sourcePeerName, + rows, + setRows, + peerType, +}: TableMappingProps) => { + const [allSchemas, setAllSchemas] = useState(); + const [schemaQuery, setSchemaQuery] = useState(''); + const [tableColumns, setTableColumns] = useState< + { tableName: string; columns: string[] }[] + >([]); + useEffect(() => { + fetchSchemas(sourcePeerName).then((res) => setAllSchemas(res)); + }, [sourcePeerName]); + + return ( +
+ +
+
+ ) => + setSchemaQuery(e.target.value) + } + /> +
+
+
+ {allSchemas ? ( + allSchemas + ?.filter((schema) => { + return schema.toLowerCase().includes(schemaQuery.toLowerCase()); + }) + .map((schema, index) => ( + + )) + ) : ( +
+ +
+ )} +
+
+ ); +}; + +export default TableMapping; diff --git a/ui/app/mirrors/create/columns.tsx b/ui/app/mirrors/create/columns.tsx deleted file mode 100644 index 20dca1a5ec..0000000000 --- a/ui/app/mirrors/create/columns.tsx +++ /dev/null @@ -1,132 +0,0 @@ -'use client'; -import { Button } from '@/lib/Button'; -import { Dispatch, SetStateAction, useState } from 'react'; -import { PulseLoader } from 'react-spinners'; -import { fetchColumns } from './handlers'; - -interface ColumnsDisplayProps { - setColumns: Dispatch< - SetStateAction< - { - tableName: string; - columns: string[]; - }[] - > - >; - columns?: { - tableName: string; - columns: string[]; - }[]; - peerName: string; - schemaName: string; - tableName: string; -} - -const ColumnsDisplay = (props: ColumnsDisplayProps) => { - const [loading, setLoading] = useState(false); - const addTableColumns = (table: string) => { - // add table to columns - fetchColumns( - props.peerName, - props.schemaName, - props.tableName, - setLoading - ).then((res) => - props.setColumns((prev) => { - return [...prev, { tableName: table, columns: res }]; - }) - ); - }; - - const removeTableColumns = (table: string) => { - // remove table from columns - props.setColumns((prev) => { - return prev.filter((column) => column.tableName !== table); - }); - }; - - const getTableColumns = (tableName: string) => { - // get table columns - return props.columns?.find((column) => column.tableName === tableName) - ?.columns; - }; - return ( -
- - -
- {getTableColumns(props.tableName)?.map((column, id) => { - const columnName = column.split(':')[0]; - const columnType = column.split(':')[1]; - return ( -
-
- {columnName} -
-
- {columnType} -
-
- ); - })} -
-
- ); -}; - -export default ColumnsDisplay; diff --git a/ui/app/mirrors/create/handlers.ts b/ui/app/mirrors/create/handlers.ts index 8e43035e25..10ccc4e0cc 100644 --- a/ui/app/mirrors/create/handlers.ts +++ b/ui/app/mirrors/create/handlers.ts @@ -4,7 +4,13 @@ import { USchemasResponse, UTablesResponse, } from '@/app/dto/PeersDTO'; -import { QRepConfig, QRepWriteType } from '@/grpc_generated/flow'; +import { + FlowConnectionConfigs, + QRepConfig, + QRepSyncMode, + QRepWriteType, +} from '@/grpc_generated/flow'; +import { DBType, Peer, dBTypeToJSON } from '@/grpc_generated/peers'; import { Dispatch, SetStateAction } from 'react'; import { CDCConfig, TableMapRow } from '../../dto/MirrorsDTO'; import { @@ -14,8 +20,59 @@ import { tableMappingSchema, } from './schema'; +export const handlePeer = ( + peer: Peer | null, + peerEnd: 'src' | 'dst', + setConfig: (value: SetStateAction) => void +) => { + if (!peer) return; + if (peerEnd === 'dst') { + if (peer.type === DBType.POSTGRES) { + setConfig((curr) => { + return { + ...curr, + cdcSyncMode: QRepSyncMode.QREP_SYNC_MODE_MULTI_INSERT, + snapshotSyncMode: QRepSyncMode.QREP_SYNC_MODE_MULTI_INSERT, + syncMode: QRepSyncMode.QREP_SYNC_MODE_MULTI_INSERT, + }; + }); + } else if ( + peer.type === DBType.SNOWFLAKE || + peer.type === DBType.BIGQUERY + ) { + setConfig((curr) => { + return { + ...curr, + cdcSyncMode: QRepSyncMode.QREP_SYNC_MODE_STORAGE_AVRO, + snapshotSyncMode: QRepSyncMode.QREP_SYNC_MODE_STORAGE_AVRO, + syncMode: QRepSyncMode.QREP_SYNC_MODE_STORAGE_AVRO, + }; + }); + } + setConfig((curr) => ({ + ...curr, + destination: peer, + destinationPeer: peer, + })); + } else { + setConfig((curr) => ({ + ...curr, + source: peer, + sourcePeer: peer, + })); + } +}; + const validateCDCFields = ( - tableMapping: TableMapRow[], + tableMapping: ( + | { + sourceTableIdentifier: string; + destinationTableIdentifier: string; + partitionKey: string; + exclude: string[]; + } + | undefined + )[], setMsg: Dispatch>, config: CDCConfig ): boolean => { @@ -66,7 +123,7 @@ interface TableMapping { const reformattedTableMapping = (tableMapping: TableMapRow[]) => { const mapping = tableMapping .map((row) => { - if (row.selected === true) { + if (row?.selected === true) { return { sourceTableIdentifier: row.source, destinationTableIdentifier: row.destination, @@ -98,10 +155,10 @@ export const handleCreateCDC = async ( setMsg({ ok: false, msg: flowNameErr }); return; } - - const isValid = validateCDCFields(rows, setMsg, config); - if (!isValid) return; const tableNameMapping = reformattedTableMapping(rows); + const isValid = validateCDCFields(tableNameMapping, setMsg, config); + if (!isValid) return; + config['tableMappings'] = tableNameMapping as TableMapping[]; config['flowJobName'] = flowJobName; setLoading(true); @@ -183,28 +240,22 @@ export const handleCreateQRep = async ( setLoading(false); }; -export const fetchSchemas = async ( - peerName: string, - setLoading: Dispatch> -) => { - setLoading(true); +export const fetchSchemas = async (peerName: string) => { const schemasRes: USchemasResponse = await fetch('/api/peers/schemas', { method: 'POST', body: JSON.stringify({ peerName, }), }).then((res) => res.json()); - setLoading(false); return schemasRes.schemas; }; export const fetchTables = async ( peerName: string, schemaName: string, - setLoading: Dispatch> + peerType?: DBType ) => { if (schemaName.length === 0) return []; - setLoading(true); const tablesRes: UTablesResponse = await fetch('/api/peers/tables', { method: 'POST', body: JSON.stringify({ @@ -212,8 +263,29 @@ export const fetchTables = async ( schemaName, }), }).then((res) => res.json()); - setLoading(false); - return tablesRes.tables; + + let tables = []; + const tableNames = tablesRes.tables; + if (tableNames) { + for (const tableName of tableNames) { + // setting defaults: + // for bigquery, tables are not schema-qualified + const dstName = + peerType != undefined && dBTypeToJSON(peerType) == 'BIGQUERY' + ? tableName + : `${schemaName}.${tableName}`; + + tables.push({ + schema: schemaName, + source: `${schemaName}.${tableName}`, + destination: dstName, + partitionKey: '', + exclude: [], + selected: false, + }); + } + } + return tables; }; export const fetchColumns = async ( diff --git a/ui/app/mirrors/create/mirrorcards.tsx b/ui/app/mirrors/create/mirrorcards.tsx new file mode 100644 index 0000000000..fd07aa94eb --- /dev/null +++ b/ui/app/mirrors/create/mirrorcards.tsx @@ -0,0 +1,90 @@ +'use client'; +import { Label } from '@/lib/Label'; +import { RowWithRadiobutton } from '@/lib/Layout'; +import { RadioButton, RadioButtonGroup } from '@/lib/RadioButtonGroup'; +import Link from 'next/link'; +import { SetStateAction } from 'react'; + +const MirrorCards = ({ + setMirrorType, +}: { + setMirrorType: (value: SetStateAction) => void; +}) => { + const cards = [ + { + title: 'CDC', + description: + 'Change-data Capture or CDC refers to replication of changes on the source table to the target table with initial load. This is recommended.', + link: 'https://docs.peerdb.io/usecases/Real-time%20CDC/overview', + }, + { + title: 'Query Replication', + description: + 'Query Replication allows you to specify a set of rows to be synced via a SELECT query.', + link: 'https://docs.peerdb.io/usecases/Streaming%20Query%20Replication/overview', + }, + { + title: 'XMIN', + description: + 'XMIN mode uses the xmin system column of PostgreSQL as a watermark column for replication.', + link: 'https://docs.peerdb.io/sql/commands/create-mirror#xmin-query-replication', + }, + ]; + return ( + setMirrorType(value)}> +
+ {cards.map((card, index) => { + return ( +
+
+ +
{card.title}
+ + } + action={} + /> + +
+ +
+ ); + })} +
+
+ ); +}; + +export default MirrorCards; diff --git a/ui/app/mirrors/create/page.tsx b/ui/app/mirrors/create/page.tsx index b443cdd760..ba9c45359c 100644 --- a/ui/app/mirrors/create/page.tsx +++ b/ui/app/mirrors/create/page.tsx @@ -1,18 +1,13 @@ 'use client'; import { DBTypeToImageMapping } from '@/components/PeerComponent'; import { RequiredIndicator } from '@/components/RequiredIndicator'; -import { QRepConfig, QRepSyncMode } from '@/grpc_generated/flow'; +import { QRepConfig } from '@/grpc_generated/flow'; import { DBType, Peer } from '@/grpc_generated/peers'; import { Button } from '@/lib/Button'; import { ButtonGroup } from '@/lib/ButtonGroup'; import { Label } from '@/lib/Label'; -import { - RowWithRadiobutton, - RowWithSelect, - RowWithTextField, -} from '@/lib/Layout'; +import { RowWithSelect, RowWithTextField } from '@/lib/Layout'; import { Panel } from '@/lib/Panel'; -import { RadioButton, RadioButtonGroup } from '@/lib/RadioButtonGroup'; import { TextField } from '@/lib/TextField'; import { Divider } from '@tremor/react'; import Image from 'next/image'; @@ -22,13 +17,14 @@ import { useEffect, useState } from 'react'; import ReactSelect from 'react-select'; import { InfoPopover } from '../../../components/InfoPopover'; import { CDCConfig, TableMapRow } from '../../dto/MirrorsDTO'; -import CDCConfigForm from './cdc'; -import { handleCreateCDC, handleCreateQRep } from './handlers'; +import CDCConfigForm from './cdc/cdc'; +import { handleCreateCDC, handleCreateQRep, handlePeer } from './handlers'; import { cdcSettings } from './helpers/cdc'; import { blankCDCSetting } from './helpers/common'; import { qrepSettings } from './helpers/qrep'; -import QRepConfigForm from './qrep'; -import QRepQuery from './query'; +import MirrorCards from './mirrorcards'; +import QRepConfigForm from './qrep/qrep'; +import QRepQuery from './qrep/query'; function getPeerValue(peer: Peer) { return peer.name; @@ -63,7 +59,6 @@ export default function CreateMirrors() { const [config, setConfig] = useState(blankCDCSetting); const [peers, setPeers] = useState([]); const [rows, setRows] = useState([]); - const [sourceSchema, setSourceSchema] = useState('public'); const [qrepQuery, setQrepQuery] = useState(`-- Here's a sample template: SELECT * FROM @@ -93,45 +88,6 @@ export default function CreateMirrors() { router.push('/mirrors'); }; - const handlePeer = (peer: Peer | null, peerEnd: 'src' | 'dst') => { - if (!peer) return; - if (peerEnd === 'dst') { - if (peer.type === DBType.POSTGRES) { - setConfig((curr) => { - return { - ...curr, - cdcSyncMode: QRepSyncMode.QREP_SYNC_MODE_MULTI_INSERT, - snapshotSyncMode: QRepSyncMode.QREP_SYNC_MODE_MULTI_INSERT, - syncMode: QRepSyncMode.QREP_SYNC_MODE_MULTI_INSERT, - }; - }); - } else if ( - peer.type === DBType.SNOWFLAKE || - peer.type === DBType.BIGQUERY - ) { - setConfig((curr) => { - return { - ...curr, - cdcSyncMode: QRepSyncMode.QREP_SYNC_MODE_STORAGE_AVRO, - snapshotSyncMode: QRepSyncMode.QREP_SYNC_MODE_STORAGE_AVRO, - syncMode: QRepSyncMode.QREP_SYNC_MODE_STORAGE_AVRO, - }; - }); - } - setConfig((curr) => ({ - ...curr, - destination: peer, - destinationPeer: peer, - })); - } else { - setConfig((curr) => ({ - ...curr, - source: peer, - sourcePeer: peer, - })); - } - }; - return (
@@ -150,135 +106,7 @@ export default function CreateMirrors() { > Mirror type - setMirrorType(value)} - > -
-
-
- -
CDC
- - } - action={} - /> - -
- -
- -
-
- -
- Query Replication -
- - } - action={} - /> - -
- -
- -
- -
XMIN
- - } - action={} - /> - - -
-
-
- + Mirror Name} action={ @@ -291,76 +119,53 @@ export default function CreateMirrors() { /> } /> + {['src', 'dst'].map((peerEnd, index) => { + return ( + + {peerEnd === 'src' ? 'Source Peer' : 'Destination Peer'} + {RequiredIndicator(true)} + + } + action={ +
+ + handlePeer(val, peerEnd as 'src' | 'dst', setConfig) + } + options={ + (peerEnd === 'src' + ? peers.filter((peer) => peer.type == DBType.POSTGRES) + : peers) ?? [] + } + getOptionValue={getPeerValue} + formatOptionLabel={getPeerLabel} + /> + +
+ } + /> + ); + })} - - Source Peer - {RequiredIndicator(true)} - - } - action={ -
- handlePeer(val, 'src')} - options={ - peers.filter((peer) => peer.type == DBType.POSTGRES) ?? [] - } - getOptionValue={getPeerValue} - formatOptionLabel={getPeerLabel} - /> - -
- } - /> - - - Destination Peer - {RequiredIndicator(true)} - - } - action={ -
- handlePeer(val, 'dst')} - options={peers ?? []} - getOptionValue={getPeerValue} - formatOptionLabel={getPeerLabel} - /> - -
- } - /> {mirrorType === 'Query Replication' && ( @@ -394,8 +199,6 @@ export default function CreateMirrors() { setter={setConfig} rows={rows} setRows={setRows} - setSchema={setSourceSchema} - schema={sourceSchema} /> ) : ( >; - schema: string; - setSchema: Dispatch>; - peerType?: DBType; -} - -const TableMapping = ({ - sourcePeerName, - rows, - setRows, - schema, - setSchema, - peerType, -}: TableMappingProps) => { - const [allSchemas, setAllSchemas] = useState(); - const [tableColumns, setTableColumns] = useState< - { tableName: string; columns: string[] }[] - >([]); - const [loading, setLoading] = useState(false); - - const handleAddRow = (source: string) => { - const newRows = [...rows]; - const index = newRows.findIndex((row) => row.source === source); - if (index >= 0) newRows[index] = { ...newRows[index], selected: true }; - setRows(newRows); - }; - - const handleRemoveRow = (source: string) => { - const newRows = [...rows]; - const index = newRows.findIndex((row) => row.source === source); - if (index >= 0) newRows[index] = { ...newRows[index], selected: false }; - setRows(newRows); - }; - - const handleSelectAll = ( - e: React.MouseEvent - ) => { - const newRows = [...rows]; - for (const row of newRows) { - row.selected = e.currentTarget.checked; - } - setRows(newRows); - }; - - const handleSwitch = (on: boolean, source: string) => { - if (on) { - handleAddRow(source); - } else { - handleRemoveRow(source); - } - }; - - const updateDestination = (source: string, dest: string) => { - // find the row with source and update the destination - const newRows = [...rows]; - const index = newRows.findIndex((row) => row.source === source); - newRows[index] = { ...newRows[index], destination: dest }; - setRows(newRows); - }; - - const updatePartitionKey = (source: string, pkey: string) => { - const newRows = [...rows]; - const index = newRows.findIndex((row) => row.source === source); - newRows[index] = { ...newRows[index], partitionKey: pkey }; - setRows(newRows); - }; - - const getTablesOfSchema = useCallback( - (schemaName: string) => { - fetchTables(sourcePeerName, schemaName, setLoading).then((tableNames) => { - if (tableNames) { - const newRows = []; - for (const tableName of tableNames) { - const dstName = - peerType != undefined && dBTypeToJSON(peerType) == 'BIGQUERY' - ? tableName - : `${schemaName}.${tableName}`; - newRows.push({ - source: `${schemaName}.${tableName}`, - destination: dstName, - partitionKey: '', - exclude: [], - selected: false, - }); - } - setRows(newRows); - } - }); - }, - [sourcePeerName, setRows, peerType] - ); - - const [searchQuery, setSearchQuery] = useState(''); - - useEffect(() => { - if (peerType != undefined && dBTypeToJSON(peerType) == 'BIGQUERY') { - setRows((rows) => { - const newRows = [...rows]; - newRows.forEach((_, i) => { - const row = newRows[i]; - newRows[i] = { - ...row, - destination: row.destination?.split('.')[1], - }; - }); - return newRows; - }); - } else { - setRows((rows) => { - const newRows = [...rows]; - newRows.forEach((_, i) => { - const row = newRows[i]; - newRows[i] = { - ...row, - destination: `${schema}.${ - row.destination?.split('.')[1] || row.destination - }`, - }; - }); - return newRows; - }); - } - }, [peerType, setRows, schema]); - - useEffect(() => { - fetchSchemas(sourcePeerName, setLoading).then((res) => setAllSchemas(res)); - setSchema('public'); - getTablesOfSchema('public'); - }, [sourcePeerName, setSchema, getTablesOfSchema]); - - return ( -
- - Source Schema} - action={ - { - if (val) { - setSchema(val.value || ''); - getTablesOfSchema(val.value || ''); - } - }} - defaultInputValue={schema.length > 0 ? schema : 'Loading...'} - isLoading={loading} - options={allSchemas?.map((schemaName) => { - return { value: schemaName, label: schemaName }; - })} - /> - } - /> -
-
- handleSelectAll(e)} /> - -
-
- ) => - setSearchQuery(e.target.value) - } - /> -
-
-
- {rows ? ( - rows - ?.filter((row) => { - return row.source - .toLowerCase() - .includes(searchQuery.toLowerCase()); - }) - .map((row, index) => ( -
-
-
-
- - handleSwitch(state, row.source) - } - /> -
- {row.source} -
-
- {row.selected && ( -
- - Destination Table Name - {RequiredIndicator(true)} -
- } - action={ -
- - ) => - updateDestination(row.source, e.target.value) - } - /> -
- } - /> - - Partition Key -
- } - action={ -
- - ) => - updatePartitionKey(row.source, e.target.value) - } - /> -
- } - /> -
- This is used only if you enable initial load, and - specifies its watermark. -
-
- )} -
- -
-
- )) - ) : ( -
- -
- )} -
- - ); -}; - -export default TableMapping; diff --git a/ui/app/mirrors/types.ts b/ui/app/mirrors/types.ts index 16054b2faf..3fa9f24164 100644 --- a/ui/app/mirrors/types.ts +++ b/ui/app/mirrors/types.ts @@ -4,4 +4,3 @@ import { Dispatch, SetStateAction } from 'react'; export type CDCConfig = FlowConnectionConfigs; export type MirrorConfig = CDCConfig | QRepConfig; export type MirrorSetter = Dispatch>; -export type TableMapRow = { source: string; destination: string };