From 2271a1b30168a862bf93abd0c25ae3ff3288480b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Mon, 4 Mar 2024 19:26:43 +0000 Subject: [PATCH 01/13] Remove SyncOptionsSignal (#1429) This did not get wired up in sync_flow.go, but rather than implement it, remove it We're moving towards requiring all option updates to require pausing first anyways --- flow/model/signals.go | 4 ---- flow/workflows/cdc_flow.go | 8 -------- 2 files changed, 12 deletions(-) diff --git a/flow/model/signals.go b/flow/model/signals.go index 45ec805339..5cdd9f2264 100644 --- a/flow/model/signals.go +++ b/flow/model/signals.go @@ -138,10 +138,6 @@ var SyncResultSignal = TypedSignal[*SyncResponse]{ Name: "sync-result", } -var SyncOptionsSignal = TypedSignal[*protos.SyncFlowOptions]{ - Name: "sync-options", -} - var NormalizeSignal = TypedSignal[NormalizePayload]{ Name: "normalize", } diff --git a/flow/workflows/cdc_flow.go b/flow/workflows/cdc_flow.go index 419c2df3db..419b11b7e8 100644 --- a/flow/workflows/cdc_flow.go +++ b/flow/workflows/cdc_flow.go @@ -164,10 +164,6 @@ func (w *CDCFlowWorkflowExecution) processCDCFlowConfigUpdate(ctx workflow.Conte state.SyncFlowOptions.TableMappings = append(state.SyncFlowOptions.TableMappings, flowConfigUpdate.AdditionalTables...) - if w.syncFlowFuture != nil { - _ = model.SyncOptionsSignal.SignalChildWorkflow(ctx, w.syncFlowFuture, state.SyncFlowOptions).Get(ctx, nil) - } - // finished processing, wipe it state.FlowConfigUpdate = nil } @@ -191,10 +187,6 @@ func (w *CDCFlowWorkflowExecution) addCdcPropertiesSignalListener( // do this irrespective of additional tables being present, for auto unpausing state.FlowConfigUpdate = cdcConfigUpdate - if w.syncFlowFuture != nil { - _ = model.SyncOptionsSignal.SignalChildWorkflow(ctx, w.syncFlowFuture, state.SyncFlowOptions).Get(ctx, nil) - } - w.logger.Info("CDC Signal received. Parameters on signal reception:", slog.Int("BatchSize", int(state.SyncFlowOptions.BatchSize)), slog.Int("IdleTimeout", int(state.SyncFlowOptions.IdleTimeoutSeconds)), From 1432b4d52c64fc9611d3a4aa87701de5be55530b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Mon, 4 Mar 2024 19:36:50 +0000 Subject: [PATCH 02/13] Record slot size: select distinct (#1428) Turns out we create a flows record per source/destination table pair --- flow/activities/flowable.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/activities/flowable.go b/flow/activities/flowable.go index 1d6433c8f4..3900d07385 100644 --- a/flow/activities/flowable.go +++ b/flow/activities/flowable.go @@ -852,7 +852,7 @@ func (a *FlowableActivity) SendWALHeartbeat(ctx context.Context) error { } func (a *FlowableActivity) RecordSlotSizes(ctx context.Context) error { - rows, err := a.CatalogPool.Query(ctx, "SELECT flows.name, flows.config_proto FROM flows WHERE query_string IS NULL") + rows, err := a.CatalogPool.Query(ctx, "SELECT DISTINCT ON (name) name, config_proto FROM flows WHERE query_string IS NULL") if err != nil { return err } From 81c9320f077199611338bde5ceaf882ef0e0821b Mon Sep 17 00:00:00 2001 From: Amogh Bharadwaj Date: Tue, 5 Mar 2024 01:33:32 +0530 Subject: [PATCH 03/13] UI: Fix slot page screen (#1430) The slot and activity page became centered via #1418 This PR restores it to its original state Also portrays slot graph in GB not MB --- ui/app/peers/[peerName]/lagGraph.tsx | 5 +++-- ui/app/peers/[peerName]/page.tsx | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ui/app/peers/[peerName]/lagGraph.tsx b/ui/app/peers/[peerName]/lagGraph.tsx index 82f0ed9aae..b495aaf6f3 100644 --- a/ui/app/peers/[peerName]/lagGraph.tsx +++ b/ui/app/peers/[peerName]/lagGraph.tsx @@ -40,7 +40,7 @@ function LagGraph({ slotNames }: { slotNames: string[] }) { ]); return lagDataDot.map((data) => ({ time: formatGraphLabel(new Date(data[0]!), timeSince), - 'Lag in MB': data[1], + 'Lag in GB': parseInt(data[1] || '0', 10) / 1000, })); }, [lagPoints, timeSince]); @@ -108,8 +108,9 @@ function LagGraph({ slotNames }: { slotNames: string[] }) { ); diff --git a/ui/app/peers/[peerName]/page.tsx b/ui/app/peers/[peerName]/page.tsx index fadb1731d4..5d5742c8cd 100644 --- a/ui/app/peers/[peerName]/page.tsx +++ b/ui/app/peers/[peerName]/page.tsx @@ -60,9 +60,9 @@ const PeerData = async ({ params: { peerName } }: DataConfigProps) => {
From ba6feaa72af6b2f34a299d4cbe2feaecbe9a553e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Mon, 4 Mar 2024 21:31:13 +0000 Subject: [PATCH 04/13] Update ui/nexus dependencies (#1431) flow still waiting on temporal sdk 1.26 --- nexus/Cargo.lock | 219 +++++------ nexus/Cargo.toml | 1 + nexus/catalog/Cargo.toml | 2 +- nexus/peer-bigquery/Cargo.toml | 2 +- nexus/peer-connections/Cargo.toml | 2 +- nexus/peer-postgres/Cargo.toml | 2 +- nexus/peer-snowflake/Cargo.toml | 30 +- nexus/value/Cargo.toml | 6 +- ui/package-lock.json | 592 ++++++++++++++++-------------- ui/package.json | 16 +- 10 files changed, 457 insertions(+), 415 deletions(-) diff --git a/nexus/Cargo.lock b/nexus/Cargo.lock index 112d881155..aadb641caf 100644 --- a/nexus/Cargo.lock +++ b/nexus/Cargo.lock @@ -79,9 +79,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.12" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" dependencies = [ "anstyle", "anstyle-parse", @@ -170,7 +170,7 @@ checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -192,7 +192,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -203,7 +203,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -356,7 +356,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", "syn_derive", ] @@ -429,9 +429,9 @@ dependencies = [ [[package]] name = "cargo_toml" -version = "0.19.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dc9f7a067415ab5058020f04c60ec7b557084dbec0e021217bbabc7a8d38d14" +checksum = "a98356df42a2eb1bd8f1793ae4ee4de48e384dd974ce5eac8eee802edb7492be" dependencies = [ "serde", "toml", @@ -469,9 +469,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.88" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02f341c093d19155a6e41631ce5971aac4e9a868262212153124c15fa22d1cdc" +checksum = "a0ba8f7aaa012f30d5b2861462f6708eccd49c3c39863fe083a308035f63d723" [[package]] name = "cfg-if" @@ -497,7 +497,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.3", + "windows-targets 0.52.4", ] [[package]] @@ -541,7 +541,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -635,9 +635,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" +checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" dependencies = [ "crossbeam-utils", ] @@ -752,7 +752,7 @@ checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -775,9 +775,9 @@ checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" [[package]] name = "dyn-clone" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" [[package]] name = "either" @@ -968,7 +968,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -1080,7 +1080,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.2.3", + "indexmap 2.2.5", "slab", "tokio", "tokio-util", @@ -1123,9 +1123,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379dada1584ad501b383485dd706b8afb7a70fcbc7f4da7d780638a5a6124a60" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -1153,9 +1153,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -1288,9 +1288,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -1338,9 +1338,9 @@ checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "js-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -1399,9 +1399,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "lzma-sys" @@ -1474,9 +1474,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi", @@ -1885,7 +1885,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.2.3", + "indexmap 2.2.5", ] [[package]] @@ -1950,7 +1950,7 @@ checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -2100,7 +2100,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -2171,7 +2171,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.50", + "syn 2.0.52", "tempfile", "which", ] @@ -2186,7 +2186,7 @@ dependencies = [ "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -2287,9 +2287,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" +checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" dependencies = [ "either", "rayon-core", @@ -2355,7 +2355,7 @@ dependencies = [ "quote", "refinery-core", "regex", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -2366,7 +2366,7 @@ checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", + "regex-automata 0.4.6", "regex-syntax 0.8.2", ] @@ -2381,9 +2381,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", @@ -2604,9 +2604,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048a63e5b3ac996d78d402940b5fa47973d2d080c6c6fffa1d0f19c4445310b7" +checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" [[package]] name = "rustls-webpki" @@ -2759,7 +2759,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -2945,7 +2945,7 @@ source = "git+https://github.com/peerdb-io/sqlparser-rs.git#9fbfb423db7fc0949dea dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -2984,9 +2984,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.50" +version = "2.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb" +checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" dependencies = [ "proc-macro2", "quote", @@ -3002,7 +3002,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -3050,9 +3050,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand", @@ -3077,7 +3077,7 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -3176,7 +3176,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -3293,7 +3293,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "toml_datetime", "winnow 0.5.40", ] @@ -3304,11 +3304,11 @@ version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c1b5fd4128cc8d3e0cb74d4ed9a9cc7c7284becd4df68f5f940e1ad123606f6" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.2", + "winnow 0.6.5", ] [[package]] @@ -3454,7 +3454,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -3632,9 +3632,9 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -3655,11 +3655,17 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3667,24 +3673,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", @@ -3694,9 +3700,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3704,28 +3710,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "web-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -3760,11 +3766,12 @@ dependencies = [ [[package]] name = "whoami" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" +checksum = "0fec781d48b41f8163426ed18e8fc2864c12937df9ce54c88ede7bd47270893e" dependencies = [ - "wasm-bindgen", + "redox_syscall", + "wasite", "web-sys", ] @@ -3805,7 +3812,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.3", + "windows-targets 0.52.4", ] [[package]] @@ -3823,7 +3830,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.3", + "windows-targets 0.52.4", ] [[package]] @@ -3843,17 +3850,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d380ba1dc7187569a8a9e91ed34b8ccfc33123bbacb8c0aed2d1ad7f3ef2dc5f" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" dependencies = [ - "windows_aarch64_gnullvm 0.52.3", - "windows_aarch64_msvc 0.52.3", - "windows_i686_gnu 0.52.3", - "windows_i686_msvc 0.52.3", - "windows_x86_64_gnu 0.52.3", - "windows_x86_64_gnullvm 0.52.3", - "windows_x86_64_msvc 0.52.3", + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", ] [[package]] @@ -3864,9 +3871,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68e5dcfb9413f53afd9c8f86e56a7b4d86d9a2fa26090ea2dc9e40fba56c6ec6" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" [[package]] name = "windows_aarch64_msvc" @@ -3876,9 +3883,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dab469ebbc45798319e69eebf92308e541ce46760b49b18c6b3fe5e8965b30f" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" [[package]] name = "windows_i686_gnu" @@ -3888,9 +3895,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a4e9b6a7cac734a8b4138a4e1044eac3404d8326b6c0f939276560687a033fb" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" [[package]] name = "windows_i686_msvc" @@ -3900,9 +3907,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b0ec9c422ca95ff34a78755cfa6ad4a51371da2a5ace67500cf7ca5f232c58" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" [[package]] name = "windows_x86_64_gnu" @@ -3912,9 +3919,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "704131571ba93e89d7cd43482277d6632589b18ecf4468f591fbae0a8b101614" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" [[package]] name = "windows_x86_64_gnullvm" @@ -3924,9 +3931,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42079295511643151e98d61c38c0acc444e52dd42ab456f7ccfd5152e8ecf21c" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" [[package]] name = "windows_x86_64_msvc" @@ -3936,9 +3943,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0770833d60a970638e989b3fa9fd2bb1aaadcf88963d1659fd7d9990196ed2d6" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" [[package]] name = "winnow" @@ -3951,9 +3958,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.2" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a4191c47f15cc3ec71fcb4913cb83d58def65dd3787610213c649283b5ce178" +checksum = "dffa400e67ed5a4dd237983829e66475f0a4a26938c4b04c21baede6262215b8" dependencies = [ "memchr", ] @@ -4049,7 +4056,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 3b2f1daacf..2f031ff918 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -18,5 +18,6 @@ members = [ resolver = "2" [workspace.dependencies] +chrono = { version = "0.4", default-features = false, features = ["serde", "std"] } sqlparser = { git = "https://github.com/peerdb-io/sqlparser-rs.git" } pgwire = "0.19" diff --git a/nexus/catalog/Cargo.toml b/nexus/catalog/Cargo.toml index 7884e5df06..0f9372b19c 100644 --- a/nexus/catalog/Cargo.toml +++ b/nexus/catalog/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" [dependencies] anyhow = "1" async-trait = "0.1" -chrono = { version = "0.4.22", default-features = false } +chrono.workspace = true prost = "0.12" peer-cursor = { path = "../peer-cursor" } peer-postgres = { path = "../peer-postgres" } diff --git a/nexus/peer-bigquery/Cargo.toml b/nexus/peer-bigquery/Cargo.toml index 617b5f8acd..9bc6ac49a3 100644 --- a/nexus/peer-bigquery/Cargo.toml +++ b/nexus/peer-bigquery/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" [dependencies] anyhow = "1.0" async-trait = "0.1" -chrono = { version = "0.4", features = ["serde"] } +chrono.workspace = true dashmap = "5.0" futures = { version = "0.3.28", features = ["executor"] } peer-cursor = { path = "../peer-cursor" } diff --git a/nexus/peer-connections/Cargo.toml b/nexus/peer-connections/Cargo.toml index f74b1bf9b4..2fa5bc4ca6 100644 --- a/nexus/peer-connections/Cargo.toml +++ b/nexus/peer-connections/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] anyhow = "1.0" -chrono = { version = "0.4" } +chrono.workspace = true deadpool-postgres = { version = "0.12", features = ["rt_tokio_1"] } tokio = { version = "1", features = ["full"] } tokio-postgres = { version = "0.7.6", features = [ diff --git a/nexus/peer-postgres/Cargo.toml b/nexus/peer-postgres/Cargo.toml index e72e059f25..d9439e5e01 100644 --- a/nexus/peer-postgres/Cargo.toml +++ b/nexus/peer-postgres/Cargo.toml @@ -10,7 +10,7 @@ anyhow = "1.0" async-trait = "0.1" rust_decimal = { version = "1.30.0", features = [ "tokio-pg" ] } bytes = "1.0" -chrono = { version = "0.4", features = ["serde"] } +chrono.workspace = true futures = "0.3" peer-cursor = { path = "../peer-cursor" } peer-connections = { path = "../peer-connections" } diff --git a/nexus/peer-snowflake/Cargo.toml b/nexus/peer-snowflake/Cargo.toml index 97d4a6d50d..caf818f81f 100644 --- a/nexus/peer-snowflake/Cargo.toml +++ b/nexus/peer-snowflake/Cargo.toml @@ -6,27 +6,27 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -catalog = { path = "../catalog" } -peer-cursor = { path = "../peer-cursor" } -sqlparser.workspace = true -value = { path = "../value" } -tracing = "0.1" -secrecy = { version = "0.8.0" } +anyhow = "1.0" +async-recursion = "1.0.0" async-trait = "0.1.57" -jsonwebtoken = { version = "9.0", features = ["use_pem"] } base64 = "0.21" +catalog = { path = "../catalog" } +chrono.workspace = true dashmap = "5.0" +futures = "0.3" +hex = "0.4" +jsonwebtoken = { version = "9.0", features = ["use_pem"] } +peer-cursor = { path = "../peer-cursor" } pgwire.workspace = true -sha2 = "0.10" pt = { path = "../pt" } +reqwest = { version = "0.11", default-features = false, features = ["json", "gzip", "rustls-tls"] } rsa = { version = "0.9.2", features = ["pem", "pkcs5"] } +secrecy = { version = "0.8.0" } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -futures = "0.3" -ureq = { version = "2", features = ["json", "charset"] } -reqwest = { version = "0.11", default-features = false, features = ["json", "gzip", "rustls-tls"] } -anyhow = "1.0" +sha2 = "0.10" +sqlparser.workspace = true tokio = { version = "1.21", features = ["full"] } -hex = "0.4" -chrono = { version = "0.4.22", default-features = false } -async-recursion = "1.0.0" +tracing = "0.1" +ureq = { version = "2", features = ["json", "charset"] } +value = { path = "../value" } diff --git a/nexus/value/Cargo.toml b/nexus/value/Cargo.toml index c3a8078e14..11fe5d9e32 100644 --- a/nexus/value/Cargo.toml +++ b/nexus/value/Cargo.toml @@ -7,14 +7,14 @@ edition = "2021" [dependencies] base64 = "0.21" -rust_decimal = { version = "1.30.0", features = [ "tokio-pg" ] } bytes = "1.1" +chrono.workspace = true +rust_decimal = { version = "1.30.0", features = [ "tokio-pg" ] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" -postgres-inet = "0.19.0" -chrono = { version = "0.4", features = ["serde"] } hex = "0.4" pgwire.workspace = true postgres = { version = "0.19", features = ["with-chrono-0_4"] } +postgres-inet = "0.19.0" postgres-types = { version = "0.2.5", features = ["array-impls"] } uuid = { version = "1.0", features = ["serde", "v4"] } diff --git a/ui/package-lock.json b/ui/package-lock.json index c2a9f74420..e0141120f0 100644 --- a/ui/package-lock.json +++ b/ui/package-lock.json @@ -26,17 +26,17 @@ "@radix-ui/react-toggle": "^1.0.3", "@radix-ui/react-toggle-group": "^1.0.4", "@radix-ui/react-tooltip": "^1.0.7", - "@tremor/react": "^3.14.0", - "@types/node": "^20.11.20", - "@types/react": "^18.2.58", + "@tremor/react": "^3.14.1", + "@types/node": "^20.11.24", + "@types/react": "^18.2.62", "@types/react-dom": "^18.2.19", "classnames": "^2.5.1", "long": "^5.2.3", - "lucide-react": "^0.340.0", - "material-symbols": "^0.15.0", + "lucide-react": "^0.344.0", + "material-symbols": "^0.16.0", "moment": "^2.30.1", "moment-timezone": "^0.5.45", - "next": "^14.1.0", + "next": "^14.1.1", "next-auth": "^4.24.6", "prop-types": "^15.8.1", "protobufjs": "^7.2.6", @@ -59,10 +59,10 @@ "@storybook/nextjs": "^7.6.17", "@storybook/react": "^7.3.0", "@storybook/testing-library": "^0.2.2", - "autoprefixer": "^10.4.17", + "autoprefixer": "^10.4.18", "copy-webpack-plugin": "^12.0.2", "eslint": "^8.57.0", - "eslint-config-next": "^14.1.0", + "eslint-config-next": "^14.1.1", "eslint-config-prettier": "^9.1.0", "eslint-plugin-storybook": "^0.8.0", "gh-pages": "^6.1.1", @@ -100,13 +100,13 @@ } }, "node_modules/@ampproject/remapping": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", - "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", "dev": true, "dependencies": { - "@jridgewell/gen-mapping": "^0.3.0", - "@jridgewell/trace-mapping": "^0.3.9" + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" }, "engines": { "node": ">=6.0.0" @@ -210,9 +210,9 @@ } }, "node_modules/@babel/core": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.9.tgz", - "integrity": "sha512-5q0175NOjddqpvvzU+kDiSOAk4PfdO6FvwCWoQ6RO7rTzEe8vlo+4HVfcnAREhD4npMs0e9uZypjTwzZPCf/cw==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.24.0.tgz", + "integrity": "sha512-fQfkg0Gjkza3nf0c7/w6Xf34BW4YvzNfACRLmmb7XRLa6XHdR+K9AlJlxneFfWYf6uhOzuzZVTjF/8KfndZANw==", "dev": true, "dependencies": { "@ampproject/remapping": "^2.2.0", @@ -220,11 +220,11 @@ "@babel/generator": "^7.23.6", "@babel/helper-compilation-targets": "^7.23.6", "@babel/helper-module-transforms": "^7.23.3", - "@babel/helpers": "^7.23.9", - "@babel/parser": "^7.23.9", - "@babel/template": "^7.23.9", - "@babel/traverse": "^7.23.9", - "@babel/types": "^7.23.9", + "@babel/helpers": "^7.24.0", + "@babel/parser": "^7.24.0", + "@babel/template": "^7.24.0", + "@babel/traverse": "^7.24.0", + "@babel/types": "^7.24.0", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", @@ -295,9 +295,9 @@ } }, "node_modules/@babel/helper-create-class-features-plugin": { - "version": "7.23.10", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.23.10.tgz", - "integrity": "sha512-2XpP2XhkXzgxecPNEEK8Vz8Asj9aRxt08oKOqtiZoqV2UGZ5T+EkyP9sXQ9nwMxBIG34a7jmasVqoMop7VdPUw==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.24.0.tgz", + "integrity": "sha512-QAH+vfvts51BCsNZ2PhY6HAggnlS6omLLFTsIpeqZk/MmJ6cW7tgz5yRv0fMJThcr6FmbMrENh1RgrWPTYA76g==", "dev": true, "dependencies": { "@babel/helper-annotate-as-pure": "^7.22.5", @@ -439,9 +439,9 @@ } }, "node_modules/@babel/helper-plugin-utils": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", - "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.0.tgz", + "integrity": "sha512-9cUznXMG0+FxRuJfvL82QlTqIzhVW9sL0KjMPHhAOOvpQGL8QtdxnBKILjBqxlHyliz0yCa1G903ZXI/FuHy2w==", "dev": true, "engines": { "node": ">=6.9.0" @@ -557,14 +557,14 @@ } }, "node_modules/@babel/helpers": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.9.tgz", - "integrity": "sha512-87ICKgU5t5SzOT7sBMfCOZQ2rHjRU+Pcb9BoILMYz600W6DkVRLFBPwQ18gwUVvggqXivaUakpnxWQGbpywbBQ==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.24.0.tgz", + "integrity": "sha512-ulDZdc0Aj5uLc5nETsa7EPx2L7rM0YJM8r7ck7U73AXi7qOV44IHHRAYZHY6iU1rr3C5N4NtTmMRUJP6kwCWeA==", "dev": true, "dependencies": { - "@babel/template": "^7.23.9", - "@babel/traverse": "^7.23.9", - "@babel/types": "^7.23.9" + "@babel/template": "^7.24.0", + "@babel/traverse": "^7.24.0", + "@babel/types": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -648,9 +648,9 @@ } }, "node_modules/@babel/parser": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.9.tgz", - "integrity": "sha512-9tcKgqKbs3xGJ+NtKF2ndOBBLVwPjl1SHxPQkd36r3Dlirw3xWUeGaTbqr7uGZcTaxkVNwc+03SVP7aCdWrTlA==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.24.0.tgz", + "integrity": "sha512-QuP/FxEAzMSjXygs8v4N9dvdXzEHN4W1oF3PxuWAtPo08UdM17u89RDMgjLn/mlc56iM0HlLmVkO/wgR+rDgHg==", "dev": true, "bin": { "parser": "bin/babel-parser.js" @@ -1498,14 +1498,14 @@ } }, "node_modules/@babel/plugin-transform-object-rest-spread": { - "version": "7.23.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.23.4.tgz", - "integrity": "sha512-9x9K1YyeQVw0iOXJlIzwm8ltobIIv7j2iLyP2jIhEbqPRQ7ScNgwQufU2I0Gq11VjyG4gI4yMXt2VFags+1N3g==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.24.0.tgz", + "integrity": "sha512-y/yKMm7buHpFFXfxVFS4Vk1ToRJDilIa6fKRioB9Vjichv58TDGXTvqV0dN7plobAmTW5eSEGXDngE+Mm+uO+w==", "dev": true, "dependencies": { - "@babel/compat-data": "^7.23.3", - "@babel/helper-compilation-targets": "^7.22.15", - "@babel/helper-plugin-utils": "^7.22.5", + "@babel/compat-data": "^7.23.5", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-plugin-utils": "^7.24.0", "@babel/plugin-syntax-object-rest-spread": "^7.8.3", "@babel/plugin-transform-parameters": "^7.23.3" }, @@ -1726,13 +1726,13 @@ } }, "node_modules/@babel/plugin-transform-runtime": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.23.9.tgz", - "integrity": "sha512-A7clW3a0aSjm3ONU9o2HAILSegJCYlEZmOhmBRReVtIpY/Z/p7yIZ+wR41Z+UipwdGuqwtID/V/dOdZXjwi9gQ==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.24.0.tgz", + "integrity": "sha512-zc0GA5IitLKJrSfXlXmp8KDqLrnGECK7YRfQBmEKg1NmBOQ7e+KuclBEKJgzifQeUYLdNiAw4B4bjyvzWVLiSA==", "dev": true, "dependencies": { "@babel/helper-module-imports": "^7.22.15", - "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-plugin-utils": "^7.24.0", "babel-plugin-polyfill-corejs2": "^0.4.8", "babel-plugin-polyfill-corejs3": "^0.9.0", "babel-plugin-polyfill-regenerator": "^0.5.5", @@ -1903,14 +1903,14 @@ } }, "node_modules/@babel/preset-env": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.23.9.tgz", - "integrity": "sha512-3kBGTNBBk9DQiPoXYS0g0BYlwTQYUTifqgKTjxUwEUkduRT2QOa0FPGBJ+NROQhGyYO5BuTJwGvBnqKDykac6A==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.24.0.tgz", + "integrity": "sha512-ZxPEzV9IgvGn73iK0E6VB9/95Nd7aMFpbE0l8KQFDG70cOV9IxRP7Y2FUPmlK0v6ImlLqYX50iuZ3ZTVhOF2lA==", "dev": true, "dependencies": { "@babel/compat-data": "^7.23.5", "@babel/helper-compilation-targets": "^7.23.6", - "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-plugin-utils": "^7.24.0", "@babel/helper-validator-option": "^7.23.5", "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.23.3", "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.23.3", @@ -1963,7 +1963,7 @@ "@babel/plugin-transform-new-target": "^7.23.3", "@babel/plugin-transform-nullish-coalescing-operator": "^7.23.4", "@babel/plugin-transform-numeric-separator": "^7.23.4", - "@babel/plugin-transform-object-rest-spread": "^7.23.4", + "@babel/plugin-transform-object-rest-spread": "^7.24.0", "@babel/plugin-transform-object-super": "^7.23.3", "@babel/plugin-transform-optional-catch-binding": "^7.23.4", "@babel/plugin-transform-optional-chaining": "^7.23.4", @@ -1997,13 +1997,13 @@ } }, "node_modules/@babel/preset-flow": { - "version": "7.23.3", - "resolved": "https://registry.npmjs.org/@babel/preset-flow/-/preset-flow-7.23.3.tgz", - "integrity": "sha512-7yn6hl8RIv+KNk6iIrGZ+D06VhVY35wLVf23Cz/mMu1zOr7u4MMP4j0nZ9tLf8+4ZFpnib8cFYgB/oYg9hfswA==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/preset-flow/-/preset-flow-7.24.0.tgz", + "integrity": "sha512-cum/nSi82cDaSJ21I4PgLTVlj0OXovFk6GRguJYe/IKg6y6JHLTbJhybtX4k35WT9wdeJfEVjycTixMhBHd0Dg==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.22.5", - "@babel/helper-validator-option": "^7.22.15", + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-validator-option": "^7.23.5", "@babel/plugin-transform-flow-strip-types": "^7.23.3" }, "engines": { @@ -2210,9 +2210,9 @@ "dev": true }, "node_modules/@babel/runtime": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.23.9.tgz", - "integrity": "sha512-0CX6F+BI2s9dkUqr08KFrAIZgNFj75rdBU/DjCyYLIaV/quFjkk6T+EJ2LkZHyZTbEV4L5p97mNkUsHl2wLFAw==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.0.tgz", + "integrity": "sha512-Chk32uHMg6TnQdvw2e9IlqPpFX/6NLuK0Ys2PqLb7/gL5uFn9mXvK715FGLlOLQrcO4qIkNHkvPGktzzXexsFw==", "dependencies": { "regenerator-runtime": "^0.14.0" }, @@ -2221,23 +2221,23 @@ } }, "node_modules/@babel/template": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.23.9.tgz", - "integrity": "sha512-+xrD2BWLpvHKNmX2QbpdpsBaWnRxahMwJjO+KZk2JOElj5nSmKezyS1B4u+QbHMTX69t4ukm6hh9lsYQ7GHCKA==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.24.0.tgz", + "integrity": "sha512-Bkf2q8lMB0AFpX0NFEqSbx1OkTHf0f+0j82mkw+ZpzBnkk7e9Ql0891vlfgi+kHwOk8tQjiQHpqh4LaSa0fKEA==", "dev": true, "dependencies": { "@babel/code-frame": "^7.23.5", - "@babel/parser": "^7.23.9", - "@babel/types": "^7.23.9" + "@babel/parser": "^7.24.0", + "@babel/types": "^7.24.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.9.tgz", - "integrity": "sha512-I/4UJ9vs90OkBtY6iiiTORVMyIhJ4kAVmsKo9KFc8UOxMeUfi2hvtIBsET5u9GizXE6/GFSuKCTNfgCswuEjRg==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.24.0.tgz", + "integrity": "sha512-HfuJlI8qq3dEDmNU5ChzzpZRWq+oxCZQyMzIMEqLho+AQnhMnKQUzH6ydo3RBl/YjPCuk68Y6s0Gx0AeyULiWw==", "dev": true, "dependencies": { "@babel/code-frame": "^7.23.5", @@ -2246,8 +2246,8 @@ "@babel/helper-function-name": "^7.23.0", "@babel/helper-hoist-variables": "^7.22.5", "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/parser": "^7.23.9", - "@babel/types": "^7.23.9", + "@babel/parser": "^7.24.0", + "@babel/types": "^7.24.0", "debug": "^4.3.1", "globals": "^11.1.0" }, @@ -2256,9 +2256,9 @@ } }, "node_modules/@babel/types": { - "version": "7.23.9", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.9.tgz", - "integrity": "sha512-dQjSq/7HaSjRM43FFGnv5keM2HsxpmyV1PfaSVm0nzzjwwTmjOe6J4bC8e3+pTEIgHaHj+1ZlLThRJ2auc/w1Q==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.24.0.tgz", + "integrity": "sha512-+j7a5c253RfKh8iABBhywc8NSfP5LURe7Uh4qpsh6jc+aLJguvmIUBdjSdEMQv2bENrCR5MfRdjGo7vzS/ob7w==", "dependencies": { "@babel/helper-string-parser": "^7.23.4", "@babel/helper-validator-identifier": "^7.22.20", @@ -2355,9 +2355,9 @@ "integrity": "sha512-W2P2c/VRW1/1tLox0mVUalvnWXxavmv/Oum2aPsRcoDJuob75FC3Y8FbpfLwUegRcxINtGUMPq0tFCvYNTBXNA==" }, "node_modules/@emotion/react": { - "version": "11.11.3", - "resolved": "https://registry.npmjs.org/@emotion/react/-/react-11.11.3.tgz", - "integrity": "sha512-Cnn0kuq4DoONOMcnoVsTOR8E+AdnKFf//6kUWc4LCdnxj31pZWn7rIULd6Y7/Js1PiPHzn7SKCM9vB/jBni8eA==", + "version": "11.11.4", + "resolved": "https://registry.npmjs.org/@emotion/react/-/react-11.11.4.tgz", + "integrity": "sha512-t8AjMlF0gHpvvxk5mAtCqR4vmxiGHCeJBaQO6gncUSdklELOgtwjerNY2yuJNfwnc6vi16U/+uMF+afIawJ9iw==", "dependencies": { "@babel/runtime": "^7.18.3", "@emotion/babel-plugin": "^11.11.0", @@ -3215,13 +3215,13 @@ } }, "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.4", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.4.tgz", - "integrity": "sha512-Oud2QPM5dHviZNn4y/WhhYKSXksv+1xLEIsNrAbGcFzUN3ubqWRFT5gwPchNc5NuzILOU4tPBDTZ4VwhL8Y7cw==", + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", "dependencies": { - "@jridgewell/set-array": "^1.0.1", + "@jridgewell/set-array": "^1.2.1", "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" + "@jridgewell/trace-mapping": "^0.3.24" }, "engines": { "node": ">=6.0.0" @@ -3236,9 +3236,9 @@ } }, "node_modules/@jridgewell/set-array": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", - "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", "engines": { "node": ">=6.0.0" } @@ -3259,9 +3259,9 @@ "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.23", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.23.tgz", - "integrity": "sha512-9/4foRoUKp8s96tSkh8DlAAc5A0Ty8vLXld+l9gjKKY6ckwI8G15f0hskGmuLZu78ZlGa1vtsfOa+lnB4vG6Jg==", + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" @@ -3374,23 +3374,23 @@ } }, "node_modules/@next/env": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/env/-/env-14.1.0.tgz", - "integrity": "sha512-Py8zIo+02ht82brwwhTg36iogzFqGLPXlRGKQw5s+qP/kMNc4MAyDeEwBKDijk6zTIbegEgu8Qy7C1LboslQAw==" + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.1.1.tgz", + "integrity": "sha512-7CnQyD5G8shHxQIIg3c7/pSeYFeMhsNbpU/bmvH7ZnDql7mNRgg8O2JZrhrc/soFnfBnKP4/xXNiiSIPn2w8gA==" }, "node_modules/@next/eslint-plugin-next": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-14.1.0.tgz", - "integrity": "sha512-x4FavbNEeXx/baD/zC/SdrvkjSby8nBn8KcCREqk6UuwvwoAPZmaV8TFCAuo/cpovBRTIY67mHhe86MQQm/68Q==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-14.1.1.tgz", + "integrity": "sha512-NP1WoGFnFLpqqCWgGFjnn/sTwUExdPyjeFKRdQP1X/bL/tjAQ/TXDmYqw6vzGaP5NaZ2u6xzg+N/0nd7fOPOGQ==", "dev": true, "dependencies": { "glob": "10.3.10" } }, "node_modules/@next/swc-darwin-arm64": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.1.0.tgz", - "integrity": "sha512-nUDn7TOGcIeyQni6lZHfzNoo9S0euXnu0jhsbMOmMJUBfgsnESdjN97kM7cBqQxZa8L/bM9om/S5/1dzCrW6wQ==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.1.1.tgz", + "integrity": "sha512-yDjSFKQKTIjyT7cFv+DqQfW5jsD+tVxXTckSe1KIouKk75t1qZmj/mV3wzdmFb0XHVGtyRjDMulfVG8uCKemOQ==", "cpu": [ "arm64" ], @@ -3403,9 +3403,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.1.0.tgz", - "integrity": "sha512-1jgudN5haWxiAl3O1ljUS2GfupPmcftu2RYJqZiMJmmbBT5M1XDffjUtRUzP4W3cBHsrvkfOFdQ71hAreNQP6g==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.1.1.tgz", + "integrity": "sha512-KCQmBL0CmFmN8D64FHIZVD9I4ugQsDBBEJKiblXGgwn7wBCSe8N4Dx47sdzl4JAg39IkSN5NNrr8AniXLMb3aw==", "cpu": [ "x64" ], @@ -3418,9 +3418,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.1.0.tgz", - "integrity": "sha512-RHo7Tcj+jllXUbK7xk2NyIDod3YcCPDZxj1WLIYxd709BQ7WuRYl3OWUNG+WUfqeQBds6kvZYlc42NJJTNi4tQ==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.1.1.tgz", + "integrity": "sha512-YDQfbWyW0JMKhJf/T4eyFr4b3tceTorQ5w2n7I0mNVTFOvu6CGEzfwT3RSAQGTi/FFMTFcuspPec/7dFHuP7Eg==", "cpu": [ "arm64" ], @@ -3433,9 +3433,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.1.0.tgz", - "integrity": "sha512-v6kP8sHYxjO8RwHmWMJSq7VZP2nYCkRVQ0qolh2l6xroe9QjbgV8siTbduED4u0hlk0+tjS6/Tuy4n5XCp+l6g==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.1.1.tgz", + "integrity": "sha512-fiuN/OG6sNGRN/bRFxRvV5LyzLB8gaL8cbDH5o3mEiVwfcMzyE5T//ilMmaTrnA8HLMS6hoz4cHOu6Qcp9vxgQ==", "cpu": [ "arm64" ], @@ -3448,9 +3448,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.1.0.tgz", - "integrity": "sha512-zJ2pnoFYB1F4vmEVlb/eSe+VH679zT1VdXlZKX+pE66grOgjmKJHKacf82g/sWE4MQ4Rk2FMBCRnX+l6/TVYzQ==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.1.1.tgz", + "integrity": "sha512-rv6AAdEXoezjbdfp3ouMuVqeLjE1Bin0AuE6qxE6V9g3Giz5/R3xpocHoAi7CufRR+lnkuUjRBn05SYJ83oKNQ==", "cpu": [ "x64" ], @@ -3463,9 +3463,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.1.0.tgz", - "integrity": "sha512-rbaIYFt2X9YZBSbH/CwGAjbBG2/MrACCVu2X0+kSykHzHnYH5FjHxwXLkcoJ10cX0aWCEynpu+rP76x0914atg==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.1.1.tgz", + "integrity": "sha512-YAZLGsaNeChSrpz/G7MxO3TIBLaMN8QWMr3X8bt6rCvKovwU7GqQlDu99WdvF33kI8ZahvcdbFsy4jAFzFX7og==", "cpu": [ "x64" ], @@ -3478,9 +3478,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.1.0.tgz", - "integrity": "sha512-o1N5TsYc8f/HpGt39OUQpQ9AKIGApd3QLueu7hXk//2xq5Z9OxmV6sQfNp8C7qYmiOlHYODOGqNNa0e9jvchGQ==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.1.1.tgz", + "integrity": "sha512-1L4mUYPBMvVDMZg1inUYyPvFSduot0g73hgfD9CODgbr4xiTYe0VOMTZzaRqYJYBA9mana0x4eaAaypmWo1r5A==", "cpu": [ "arm64" ], @@ -3493,9 +3493,9 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.1.0.tgz", - "integrity": "sha512-XXIuB1DBRCFwNO6EEzCTMHT5pauwaSj4SWs7CYnME57eaReAKBXCnkUE80p/pAZcewm7hs+vGvNqDPacEXHVkw==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.1.1.tgz", + "integrity": "sha512-jvIE9tsuj9vpbbXlR5YxrghRfMuG0Qm/nZ/1KDHc+y6FpnZ/apsgh+G6t15vefU0zp3WSpTMIdXRUsNl/7RSuw==", "cpu": [ "ia32" ], @@ -3508,9 +3508,9 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.1.0.tgz", - "integrity": "sha512-9WEbVRRAqJ3YFVqEZIxUqkiO8l1nool1LmNxygr5HWF8AcSYsEpneUDhmjUVJEzO2A04+oPtZdombzzPPkTtgg==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.1.1.tgz", + "integrity": "sha512-S6K6EHDU5+1KrBDLko7/c1MNy/Ya73pIAmvKeFwsF4RmBFJSO7/7YeD4FnZ4iBdzE69PpQ4sOMU9ORKeNuxe8A==", "cpu": [ "x64" ], @@ -5413,9 +5413,9 @@ } }, "node_modules/@storybook/builder-webpack5/node_modules/@types/node": { - "version": "18.19.18", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.18.tgz", - "integrity": "sha512-80CP7B8y4PzZF0GWx15/gVWRrB5y/bIjNI84NK3cmQJu0WZwvmj2WMA5LcofQFVfLqqCSp545+U2LsrVzX36Zg==", + "version": "18.19.21", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.21.tgz", + "integrity": "sha512-2Q2NeB6BmiTFQi4DHBzncSoq/cJMLDdhPaAoJFnFCyD9a8VPZRf7a1GAwp1Edb7ROaZc5Jz/tnZyL6EsWMRaqw==", "dev": true, "dependencies": { "undici-types": "~5.26.4" @@ -5755,9 +5755,9 @@ } }, "node_modules/@storybook/core-common/node_modules/@types/node": { - "version": "18.19.18", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.18.tgz", - "integrity": "sha512-80CP7B8y4PzZF0GWx15/gVWRrB5y/bIjNI84NK3cmQJu0WZwvmj2WMA5LcofQFVfLqqCSp545+U2LsrVzX36Zg==", + "version": "18.19.21", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.21.tgz", + "integrity": "sha512-2Q2NeB6BmiTFQi4DHBzncSoq/cJMLDdhPaAoJFnFCyD9a8VPZRf7a1GAwp1Edb7ROaZc5Jz/tnZyL6EsWMRaqw==", "dev": true, "dependencies": { "undici-types": "~5.26.4" @@ -5830,9 +5830,9 @@ } }, "node_modules/@storybook/core-server/node_modules/@types/node": { - "version": "18.19.18", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.18.tgz", - "integrity": "sha512-80CP7B8y4PzZF0GWx15/gVWRrB5y/bIjNI84NK3cmQJu0WZwvmj2WMA5LcofQFVfLqqCSp545+U2LsrVzX36Zg==", + "version": "18.19.21", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.21.tgz", + "integrity": "sha512-2Q2NeB6BmiTFQi4DHBzncSoq/cJMLDdhPaAoJFnFCyD9a8VPZRf7a1GAwp1Edb7ROaZc5Jz/tnZyL6EsWMRaqw==", "dev": true, "dependencies": { "undici-types": "~5.26.4" @@ -5909,9 +5909,9 @@ } }, "node_modules/@storybook/core-webpack/node_modules/@types/node": { - "version": "18.19.18", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.18.tgz", - "integrity": "sha512-80CP7B8y4PzZF0GWx15/gVWRrB5y/bIjNI84NK3cmQJu0WZwvmj2WMA5LcofQFVfLqqCSp545+U2LsrVzX36Zg==", + "version": "18.19.21", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.21.tgz", + "integrity": "sha512-2Q2NeB6BmiTFQi4DHBzncSoq/cJMLDdhPaAoJFnFCyD9a8VPZRf7a1GAwp1Edb7ROaZc5Jz/tnZyL6EsWMRaqw==", "dev": true, "dependencies": { "undici-types": "~5.26.4" @@ -6109,9 +6109,9 @@ } }, "node_modules/@storybook/nextjs/node_modules/@types/node": { - "version": "18.19.18", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.18.tgz", - "integrity": "sha512-80CP7B8y4PzZF0GWx15/gVWRrB5y/bIjNI84NK3cmQJu0WZwvmj2WMA5LcofQFVfLqqCSp545+U2LsrVzX36Zg==", + "version": "18.19.21", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.21.tgz", + "integrity": "sha512-2Q2NeB6BmiTFQi4DHBzncSoq/cJMLDdhPaAoJFnFCyD9a8VPZRf7a1GAwp1Edb7ROaZc5Jz/tnZyL6EsWMRaqw==", "dev": true, "dependencies": { "undici-types": "~5.26.4" @@ -6254,9 +6254,9 @@ } }, "node_modules/@storybook/preset-react-webpack/node_modules/@types/node": { - "version": "18.19.18", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.18.tgz", - "integrity": "sha512-80CP7B8y4PzZF0GWx15/gVWRrB5y/bIjNI84NK3cmQJu0WZwvmj2WMA5LcofQFVfLqqCSp545+U2LsrVzX36Zg==", + "version": "18.19.21", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.21.tgz", + "integrity": "sha512-2Q2NeB6BmiTFQi4DHBzncSoq/cJMLDdhPaAoJFnFCyD9a8VPZRf7a1GAwp1Edb7ROaZc5Jz/tnZyL6EsWMRaqw==", "dev": true, "dependencies": { "undici-types": "~5.26.4" @@ -6411,9 +6411,9 @@ } }, "node_modules/@storybook/react/node_modules/@types/node": { - "version": "18.19.18", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.18.tgz", - "integrity": "sha512-80CP7B8y4PzZF0GWx15/gVWRrB5y/bIjNI84NK3cmQJu0WZwvmj2WMA5LcofQFVfLqqCSp545+U2LsrVzX36Zg==", + "version": "18.19.21", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.21.tgz", + "integrity": "sha512-2Q2NeB6BmiTFQi4DHBzncSoq/cJMLDdhPaAoJFnFCyD9a8VPZRf7a1GAwp1Edb7ROaZc5Jz/tnZyL6EsWMRaqw==", "dev": true, "dependencies": { "undici-types": "~5.26.4" @@ -6720,11 +6720,11 @@ "dev": true }, "node_modules/@tanstack/react-virtual": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.1.2.tgz", - "integrity": "sha512-qibmxtctgOZo2I+3Rw5GR9kXgaa15U5r3/idDY1ItUKW15UK7GhCfyIfE6qYuJ1fxQF6dJDsD8SbpPyuJgpxuA==", + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.1.3.tgz", + "integrity": "sha512-YCzcbF/Ws/uZ0q3Z6fagH+JVhx4JLvbSflgldMgLsuvB8aXjZLLb3HvrEVxY480F9wFlBiXlvQxOyXb5ENPrNA==", "dependencies": { - "@tanstack/virtual-core": "3.1.2" + "@tanstack/virtual-core": "3.1.3" }, "funding": { "type": "github", @@ -6736,9 +6736,9 @@ } }, "node_modules/@tanstack/virtual-core": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.1.2.tgz", - "integrity": "sha512-DATZJs8iejkIUqXZe6ruDAnjFo78BKnIIgqQZrc7CmEFqfLEN/TPD91n4hRfo6hpRB6xC00bwKxv7vdjFNEmOg==", + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.1.3.tgz", + "integrity": "sha512-Y5B4EYyv1j9V8LzeAoOVeTg0LI7Fo5InYKgAjkY1Pu9GjtUwX/EKxNcU7ng3sKr99WEf+bPTcktAeybyMOYo+g==", "funding": { "type": "github", "url": "https://github.com/sponsors/tannerlinsley" @@ -6777,9 +6777,9 @@ } }, "node_modules/@tremor/react": { - "version": "3.14.0", - "resolved": "https://registry.npmjs.org/@tremor/react/-/react-3.14.0.tgz", - "integrity": "sha512-bDIaId3js6S0LMhSypLN31l98t13XwPmhF6B1NIZUId/zZwnuE25z95VbKUL8NzHuCETIXSAp+Mm+OyA9EeAFw==", + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/@tremor/react/-/react-3.14.1.tgz", + "integrity": "sha512-0LMxFIeBXsAaPnR6mXRK4fbZaTNLFfVngFpoOt+6Tf797k/c6yUkB48/QPB5vO02qzkV74D91hng9r6HwfDW5g==", "dependencies": { "@floating-ui/react": "^0.19.2", "@headlessui/react": "^1.7.18", @@ -6955,9 +6955,9 @@ "dev": true }, "node_modules/@types/eslint": { - "version": "8.56.3", - "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.56.3.tgz", - "integrity": "sha512-PvSf1wfv2wJpVIFUMSb+i4PvqNYkB9Rkp9ZDO3oaWzq4SKhsQk4mrMBr3ZH06I0hKrVGLBacmgl8JM4WVjb9dg==", + "version": "8.56.5", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.56.5.tgz", + "integrity": "sha512-u5/YPJHo1tvkSF2CE0USEkxon82Z5DBy2xR+qfyYNszpX9qcs4sT6uq2kBbj4BXY1+DBGDPnrhMZV3pKWGNukw==", "dev": true, "dependencies": { "@types/estree": "*", @@ -7092,9 +7092,9 @@ "dev": true }, "node_modules/@types/node": { - "version": "20.11.20", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.20.tgz", - "integrity": "sha512-7/rR21OS+fq8IyHTgtLkDK949uzsa6n8BkziAKtPVpugIkO6D+/ooXMvzXxDnZrmtXVfjb1bKQafYpb8s89LOg==", + "version": "20.11.24", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.24.tgz", + "integrity": "sha512-Kza43ewS3xoLgCEpQrsT+xRo/EJej1y0kVYGiLFE1NEODXGzTfwiC6tXTLMQskn1X4/Rjlh0MQUvx9W+L9long==", "dependencies": { "undici-types": "~5.26.4" } @@ -7132,9 +7132,9 @@ "integrity": "sha512-ga8y9v9uyeiLdpKddhxYQkxNDrfvuPrlFb0N1qnZZByvcElJaXthF1UhvCh9TLWJBEHeNtdnbysW7Y6Uq8CVng==" }, "node_modules/@types/qs": { - "version": "6.9.11", - "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.11.tgz", - "integrity": "sha512-oGk0gmhnEJK4Yyk+oI7EfXsLayXatCWPHary1MtcmbAifkobT9cM9yutG/hZKIseOU0MqbIwQ/u2nn/Gb+ltuQ==", + "version": "6.9.12", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.12.tgz", + "integrity": "sha512-bZcOkJ6uWrL0Qb2NAWKa7TBU+mJHPzhx9jjLL1KHF+XpzEcR7EXHvjbHlGtR/IsP1vyPrehuS6XqkmaePy//mg==", "dev": true }, "node_modules/@types/range-parser": { @@ -7144,9 +7144,9 @@ "dev": true }, "node_modules/@types/react": { - "version": "18.2.58", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.58.tgz", - "integrity": "sha512-TaGvMNhxvG2Q0K0aYxiKfNDS5m5ZsoIBBbtfUorxdH4NGSXIlYvZxLJI+9Dd3KjeB3780bciLyAb7ylO8pLhPw==", + "version": "18.2.62", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.62.tgz", + "integrity": "sha512-l3f57BbaEKP0xcFzf+5qRG8/PXykZiuVM6eEoPtqBPCp6dxO3HhDkLIgIyXPhPKNAeXn3KO2pEaNgzaEo/asaw==", "dependencies": { "@types/prop-types": "*", "@types/scheduler": "*", @@ -8157,6 +8157,25 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/array.prototype.findlast": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/array.prototype.findlast/-/array.prototype.findlast-1.2.4.tgz", + "integrity": "sha512-BMtLxpV+8BD+6ZPFIWmnUBpQoy+A+ujcg4rhp2iwCRJYA7PEh2MS4NL3lz8EiDlLrJPp2hg9qWihr5pd//jcGw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "es-abstract": "^1.22.3", + "es-errors": "^1.3.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/array.prototype.findlastindex": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.4.tgz", @@ -8212,6 +8231,18 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/array.prototype.toreversed": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/array.prototype.toreversed/-/array.prototype.toreversed-1.1.2.tgz", + "integrity": "sha512-wwDCoT4Ck4Cz7sLtgUmzR5UV3YF5mFHUlbChCzZBQZ+0m2cl/DH3tKgvphv1nKgFsJ48oCSg6p91q2Vm0I/ZMA==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-shim-unscopables": "^1.0.0" + } + }, "node_modules/array.prototype.tosorted": { "version": "1.1.3", "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.3.tgz", @@ -8324,9 +8355,9 @@ "dev": true }, "node_modules/autoprefixer": { - "version": "10.4.17", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.17.tgz", - "integrity": "sha512-/cpVNRLSfhOtcGflT13P2794gVSgmPgTR+erw5ifnMLZb0UnSlkK4tquLmkd3BhA+nLo5tX8Cu0upUsGKvKbmg==", + "version": "10.4.18", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.18.tgz", + "integrity": "sha512-1DKbDfsr6KUElM6wg+0zRNkB/Q7WcKYAaK+pzXn+Xqmszm/5Xa9coeNdtP88Vi+dPzZnMjhge8GIV49ZQkDa+g==", "dev": true, "funding": [ { @@ -8343,8 +8374,8 @@ } ], "dependencies": { - "browserslist": "^4.22.2", - "caniuse-lite": "^1.0.30001578", + "browserslist": "^4.23.0", + "caniuse-lite": "^1.0.30001591", "fraction.js": "^4.3.7", "normalize-range": "^0.1.2", "picocolors": "^1.0.0", @@ -8672,16 +8703,16 @@ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" }, "node_modules/bare-events": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/bare-events/-/bare-events-2.2.0.tgz", - "integrity": "sha512-Yyyqff4PIFfSuthCZqLlPISTWHmnQxoPuAvkmgzsJEmG3CesdIv6Xweayl0JkCZJSB2yYIdJyEz97tpxNhgjbg==", + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/bare-events/-/bare-events-2.2.1.tgz", + "integrity": "sha512-9GYPpsPFvrWBkelIhOhTWtkeZxVxZOdb3VnFTCzlOo3OjvmTvzLoZFUT8kNFACx0vJej6QPney1Cf9BvzCNE/A==", "dev": true, "optional": true }, "node_modules/bare-fs": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/bare-fs/-/bare-fs-2.2.0.tgz", - "integrity": "sha512-+VhW202E9eTVGkX7p+TNXtZC4RTzj9JfJW7PtfIbZ7mIQ/QT9uOafQTx7lx2n9ERmWsXvLHF4hStAFn4gl2mQw==", + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/bare-fs/-/bare-fs-2.2.1.tgz", + "integrity": "sha512-+CjmZANQDFZWy4PGbVdmALIwmt33aJg8qTkVjClU6X4WmZkTPBDxRHiBn7fpqEWEfF3AC2io++erpViAIQbSjg==", "dev": true, "optional": true, "dependencies": { @@ -8822,13 +8853,13 @@ "dev": true }, "node_modules/body-parser": { - "version": "1.20.1", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", - "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", + "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", "dev": true, "dependencies": { "bytes": "3.1.2", - "content-type": "~1.0.4", + "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", @@ -8836,7 +8867,7 @@ "iconv-lite": "0.4.24", "on-finished": "2.4.1", "qs": "6.11.0", - "raw-body": "2.5.1", + "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" }, @@ -9191,9 +9222,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001589", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001589.tgz", - "integrity": "sha512-vNQWS6kI+q6sBlHbh71IIeC+sRwK2N3EDySc/updIGhIee2x5z00J4c1242/5/d6EpEMdOnk/m+6tuk4/tcsqg==", + "version": "1.0.30001593", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001593.tgz", + "integrity": "sha512-UWM1zlo3cZfkpBysd7AS+z+v007q9G1+fLTUU42rQnY6t2axoogPW/xol6T7juU5EUoOhML4WgBIdG+9yYqAjQ==", "funding": [ { "type": "opencollective", @@ -10816,9 +10847,9 @@ } }, "node_modules/electron-to-chromium": { - "version": "1.4.681", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.681.tgz", - "integrity": "sha512-1PpuqJUFWoXZ1E54m8bsLPVYwIVCRzvaL+n5cjigGga4z854abDnFRc+cTa2th4S79kyGqya/1xoR7h+Y5G5lg==", + "version": "1.4.691", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.691.tgz", + "integrity": "sha512-vJ+/LmKja/St8Ofq4JGMFVZuwG7ECU6akjNSn2/g6nv8xbIBOWGlEs+WA8/3XaWkU0Nlyu0iFGgOxC4mpgFjgA==", "dev": true }, "node_modules/elliptic": { @@ -10892,9 +10923,9 @@ } }, "node_modules/enhanced-resolve": { - "version": "5.15.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.15.0.tgz", - "integrity": "sha512-LXYT42KJ7lpIKECr2mAXIaMldcNCh/7E0KBKOu4KSfkHmP+mZmSs+8V5gBAqisWBy0OO4W5Oyys0GO1Y8KtdKg==", + "version": "5.15.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.15.1.tgz", + "integrity": "sha512-3d3JRbwsCLJsYgvb6NuWEG44jjPSOMuS73L/6+7BZuoKm3W+qXnSoIYVHi8dG7Qcg4inAY4jbzkZ7MnskePeDg==", "dev": true, "dependencies": { "graceful-fs": "^4.2.4", @@ -10956,18 +10987,18 @@ } }, "node_modules/es-abstract": { - "version": "1.22.4", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.22.4.tgz", - "integrity": "sha512-vZYJlk2u6qHYxBOTjAeg7qUxHdNfih64Uu2J8QqWgXZ2cri0ZpJAkzDUK/q593+mvKwlxyaxr6F1Q+3LKoQRgg==", + "version": "1.22.5", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.22.5.tgz", + "integrity": "sha512-oW69R+4q2wG+Hc3KZePPZxOiisRIqfKBVo/HLx94QcJeWGU/8sZhCvc829rd1kS366vlJbzBfXf9yWwf0+Ko7w==", "dev": true, "dependencies": { "array-buffer-byte-length": "^1.0.1", "arraybuffer.prototype.slice": "^1.0.3", - "available-typed-arrays": "^1.0.6", + "available-typed-arrays": "^1.0.7", "call-bind": "^1.0.7", "es-define-property": "^1.0.0", "es-errors": "^1.3.0", - "es-set-tostringtag": "^2.0.2", + "es-set-tostringtag": "^2.0.3", "es-to-primitive": "^1.2.1", "function.prototype.name": "^1.1.6", "get-intrinsic": "^1.2.4", @@ -10975,15 +11006,15 @@ "globalthis": "^1.0.3", "gopd": "^1.0.1", "has-property-descriptors": "^1.0.2", - "has-proto": "^1.0.1", + "has-proto": "^1.0.3", "has-symbols": "^1.0.3", "hasown": "^2.0.1", "internal-slot": "^1.0.7", "is-array-buffer": "^3.0.4", "is-callable": "^1.2.7", - "is-negative-zero": "^2.0.2", + "is-negative-zero": "^2.0.3", "is-regex": "^1.1.4", - "is-shared-array-buffer": "^1.0.2", + "is-shared-array-buffer": "^1.0.3", "is-string": "^1.0.7", "is-typed-array": "^1.1.13", "is-weakref": "^1.0.2", @@ -10996,10 +11027,10 @@ "string.prototype.trim": "^1.2.8", "string.prototype.trimend": "^1.0.7", "string.prototype.trimstart": "^1.0.7", - "typed-array-buffer": "^1.0.1", - "typed-array-byte-length": "^1.0.0", - "typed-array-byte-offset": "^1.0.0", - "typed-array-length": "^1.0.4", + "typed-array-buffer": "^1.0.2", + "typed-array-byte-length": "^1.0.1", + "typed-array-byte-offset": "^1.0.2", + "typed-array-length": "^1.0.5", "unbox-primitive": "^1.0.2", "which-typed-array": "^1.1.14" }, @@ -11296,12 +11327,12 @@ } }, "node_modules/eslint-config-next": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-14.1.0.tgz", - "integrity": "sha512-SBX2ed7DoRFXC6CQSLc/SbLY9Ut6HxNB2wPTcoIWjUMd7aF7O/SIE7111L8FdZ9TXsNV4pulUDnfthpyPtbFUg==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-14.1.1.tgz", + "integrity": "sha512-OLyw2oHzwE0M0EODGYMbjksDQKSshQWBzYY+Nkoxoe3+Q5G0lpb9EkekyDk7Foz9BMfotbYShJrgYoBEAVqU4Q==", "dev": true, "dependencies": { - "@next/eslint-plugin-next": "14.1.0", + "@next/eslint-plugin-next": "14.1.1", "@rushstack/eslint-patch": "^1.3.3", "@typescript-eslint/parser": "^5.4.2 || ^6.0.0", "eslint-import-resolver-node": "^0.3.6", @@ -11379,9 +11410,9 @@ } }, "node_modules/eslint-module-utils": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.8.0.tgz", - "integrity": "sha512-aWajIYfsqCKRDgUfjEXNN/JlrzauMuSEy5sbd7WXbtW3EH6A6MpwEh42c7qD+MqQo9QMJ6fWLAeIJynx0g6OAw==", + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.8.1.tgz", + "integrity": "sha512-rXDXR3h7cs7dy9RNpUlQf80nX31XWJEyGq1tRMo+6GsO5VmTe4UTwtmonAD4ZkAsrfMVDA2wlGJ3790Ys+D49Q==", "dev": true, "dependencies": { "debug": "^3.2.7" @@ -11520,27 +11551,29 @@ } }, "node_modules/eslint-plugin-react": { - "version": "7.33.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.33.2.tgz", - "integrity": "sha512-73QQMKALArI8/7xGLNI/3LylrEYrlKZSb5C9+q3OtOewTnMQi5cT+aE9E41sLCmli3I9PGGmD1yiZydyo4FEPw==", + "version": "7.34.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.34.0.tgz", + "integrity": "sha512-MeVXdReleBTdkz/bvcQMSnCXGi+c9kvy51IpinjnJgutl3YTHWsDdke7Z1ufZpGfDG8xduBDKyjtB9JH1eBKIQ==", "dev": true, "dependencies": { - "array-includes": "^3.1.6", - "array.prototype.flatmap": "^1.3.1", - "array.prototype.tosorted": "^1.1.1", + "array-includes": "^3.1.7", + "array.prototype.findlast": "^1.2.4", + "array.prototype.flatmap": "^1.3.2", + "array.prototype.toreversed": "^1.1.2", + "array.prototype.tosorted": "^1.1.3", "doctrine": "^2.1.0", - "es-iterator-helpers": "^1.0.12", + "es-iterator-helpers": "^1.0.17", "estraverse": "^5.3.0", "jsx-ast-utils": "^2.4.1 || ^3.0.0", "minimatch": "^3.1.2", - "object.entries": "^1.1.6", - "object.fromentries": "^2.0.6", - "object.hasown": "^1.1.2", - "object.values": "^1.1.6", + "object.entries": "^1.1.7", + "object.fromentries": "^2.0.7", + "object.hasown": "^1.1.3", + "object.values": "^1.1.7", "prop-types": "^15.8.1", - "resolve": "^2.0.0-next.4", + "resolve": "^2.0.0-next.5", "semver": "^6.3.1", - "string.prototype.matchall": "^4.0.8" + "string.prototype.matchall": "^4.0.10" }, "engines": { "node": ">=4" @@ -11855,14 +11888,14 @@ } }, "node_modules/express": { - "version": "4.18.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", - "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "version": "4.18.3", + "resolved": "https://registry.npmjs.org/express/-/express-4.18.3.tgz", + "integrity": "sha512-6VyCijWQ+9O7WuVMTRBTl+cjNNIzD5cY5mQ1WM8r/LEkI2u8EYpOotESNwzNlyCn3g+dmjKYI6BmNneSr/FSRw==", "dev": true, "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.1", + "body-parser": "1.20.2", "content-disposition": "0.5.4", "content-type": "~1.0.4", "cookie": "0.5.0", @@ -13187,9 +13220,9 @@ "dev": true }, "node_modules/html-entities": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.4.0.tgz", - "integrity": "sha512-igBTJcNNNhvZFRtm8uA6xMY6xYleeDwn3PeBCkDz7tHttv4F2hsDI2aPgNERWzvRcNYHNT3ymRaQzllmXj4YsQ==", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.5.2.tgz", + "integrity": "sha512-K//PSRMQk4FZ78Kyau+mZurHn3FH0Vwr+H36eE0rPbeYkRRi9YxceYPhuN60UwWorxyKHhqoAJl2OFKa4BVtaA==", "dev": true, "funding": [ { @@ -14678,9 +14711,9 @@ } }, "node_modules/lucide-react": { - "version": "0.340.0", - "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.340.0.tgz", - "integrity": "sha512-mWzYhbyy2d+qKuKHh+GWElPwa+kIquTnKbmSLGWOuZy+bjfZCkYD8DQWVFlqI4mQwc4HNxcqcOvtQ7ZS2PwURg==", + "version": "0.344.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.344.0.tgz", + "integrity": "sha512-6YyBnn91GB45VuVT96bYCOKElbJzUHqp65vX8cDcu55MQL9T969v4dhGClpljamuI/+KMO9P6w9Acq1CVQGvIQ==", "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0" } @@ -14695,9 +14728,9 @@ } }, "node_modules/magic-string": { - "version": "0.30.7", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.7.tgz", - "integrity": "sha512-8vBuFF/I/+OSLRmdf2wwFCJCz+nSn0m6DPvGH1fS/KiQoSaR+sETbov0eIk9KhEKy8CYqIkIAnbohxT/4H0kuA==", + "version": "0.30.8", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.8.tgz", + "integrity": "sha512-ISQTe55T2ao7XtlAStud6qwYPZjE4GK1S/BeVPus4jrq6JuOnQ00YKQC581RWhR122W7msZV263KzVeLoqidyQ==", "dev": true, "dependencies": { "@jridgewell/sourcemap-codec": "^1.4.15" @@ -14749,9 +14782,9 @@ } }, "node_modules/material-symbols": { - "version": "0.15.0", - "resolved": "https://registry.npmjs.org/material-symbols/-/material-symbols-0.15.0.tgz", - "integrity": "sha512-216LRlmN8fZb0CoIOaQBmRZ55BptWcd7z//0v7dXQA6aogsvI9Qp1nMQ5jZ44dbgBXntUQzWdB5Q2D+6bJXioA==" + "version": "0.16.0", + "resolved": "https://registry.npmjs.org/material-symbols/-/material-symbols-0.16.0.tgz", + "integrity": "sha512-ZgX0TEQQnaAPvy9jSg6WEoBfkkL95URJ5dGHsqU9cIdXVB3K6/T+IH8l7E1ZAl4HFmaZ0hMPBBQz8DoBkoaygg==" }, "node_modules/md5.js": { "version": "1.3.5", @@ -15151,11 +15184,11 @@ "dev": true }, "node_modules/next": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/next/-/next-14.1.0.tgz", - "integrity": "sha512-wlzrsbfeSU48YQBjZhDzOwhWhGsy+uQycR8bHAOt1LY1bn3zZEcDyHQOEoN3aWzQ8LHCAJ1nqrWCc9XF2+O45Q==", + "version": "14.1.1", + "resolved": "https://registry.npmjs.org/next/-/next-14.1.1.tgz", + "integrity": "sha512-McrGJqlGSHeaz2yTRPkEucxQKe5Zq7uPwyeHNmJaZNY4wx9E9QdxmTp310agFRoMuIYgQrCrT3petg13fSVOww==", "dependencies": { - "@next/env": "14.1.0", + "@next/env": "14.1.1", "@swc/helpers": "0.5.2", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", @@ -15170,15 +15203,15 @@ "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "14.1.0", - "@next/swc-darwin-x64": "14.1.0", - "@next/swc-linux-arm64-gnu": "14.1.0", - "@next/swc-linux-arm64-musl": "14.1.0", - "@next/swc-linux-x64-gnu": "14.1.0", - "@next/swc-linux-x64-musl": "14.1.0", - "@next/swc-win32-arm64-msvc": "14.1.0", - "@next/swc-win32-ia32-msvc": "14.1.0", - "@next/swc-win32-x64-msvc": "14.1.0" + "@next/swc-darwin-arm64": "14.1.1", + "@next/swc-darwin-x64": "14.1.1", + "@next/swc-linux-arm64-gnu": "14.1.1", + "@next/swc-linux-arm64-musl": "14.1.1", + "@next/swc-linux-x64-gnu": "14.1.1", + "@next/swc-linux-x64-musl": "14.1.1", + "@next/swc-win32-arm64-msvc": "14.1.1", + "@next/swc-win32-ia32-msvc": "14.1.1", + "@next/swc-win32-x64-msvc": "14.1.1" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", @@ -15476,15 +15509,16 @@ } }, "node_modules/nypm": { - "version": "0.3.6", - "resolved": "https://registry.npmjs.org/nypm/-/nypm-0.3.6.tgz", - "integrity": "sha512-2CATJh3pd6CyNfU5VZM7qSwFu0ieyabkEdnogE30Obn1czrmOYiZ8DOZLe1yBdLKWoyD3Mcy2maUs+0MR3yVjQ==", + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/nypm/-/nypm-0.3.8.tgz", + "integrity": "sha512-IGWlC6So2xv6V4cIDmoV0SwwWx7zLG086gyqkyumteH2fIgCAM4nDVFB2iDRszDvmdSVW9xb1N+2KjQ6C7d4og==", "dev": true, "dependencies": { - "citty": "^0.1.5", + "citty": "^0.1.6", + "consola": "^3.2.3", "execa": "^8.0.1", "pathe": "^1.1.2", - "ufo": "^1.3.2" + "ufo": "^1.4.0" }, "bin": { "nypm": "dist/cli.mjs" @@ -15646,13 +15680,13 @@ } }, "node_modules/object-is": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.5.tgz", - "integrity": "sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==", + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.6.tgz", + "integrity": "sha512-F8cZ+KfGlSGi09lJT7/Nd6KJZ9ygtvYC0/UYYLI9nmQKLMnydpB9yvbv9K1uSkEu7FU9vYPmVwLg328tX+ot3Q==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1" }, "engines": { "node": ">= 0.4" @@ -16606,9 +16640,9 @@ "integrity": "sha512-WuxUnVtlWL1OfZFQFuqvnvs6MiAGk9UNsBostyBOB0Is9wb5uRESevA6rnl/rkksXaGX3GzZhPup5d6Vp1nFew==" }, "node_modules/prebuild-install": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.1.tgz", - "integrity": "sha512-jAXscXWMcCK8GgCoHOfIr0ODh5ai8mj63L2nWrjuAgXE6tDyYGnx4/8o/rCgU+B4JSyZBKbeZqzhtwtC3ovxjw==", + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.2.tgz", + "integrity": "sha512-UnNke3IQb6sgarcZIDU3gbMeTp/9SSU1DAIkil7PrqG1vZlBtY5msYccSKSHDqa3hNg436IXK+SNImReuA1wEQ==", "dev": true, "dependencies": { "detect-libc": "^2.0.0", @@ -17116,9 +17150,9 @@ } }, "node_modules/raw-body": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz", - "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", "dev": true, "dependencies": { "bytes": "3.1.2", @@ -17556,15 +17590,15 @@ } }, "node_modules/recast": { - "version": "0.23.4", - "resolved": "https://registry.npmjs.org/recast/-/recast-0.23.4.tgz", - "integrity": "sha512-qtEDqIZGVcSZCHniWwZWbRy79Dc6Wp3kT/UmDA2RJKBPg7+7k51aQBZirHmUGn5uvHf2rg8DkjizrN26k61ATw==", + "version": "0.23.5", + "resolved": "https://registry.npmjs.org/recast/-/recast-0.23.5.tgz", + "integrity": "sha512-M67zIddJiwXdfPQRYKJ0qZO1SLdH1I0hYeb0wzxA+pNOvAZiQHulWzuk+fYsEWRQ8VfZrgjyucqsCOtCyM01/A==", "dev": true, "dependencies": { - "assert": "^2.0.0", "ast-types": "^0.16.1", "esprima": "~4.0.0", "source-map": "~0.6.1", + "tiny-invariant": "^1.3.3", "tslib": "^2.0.1" }, "engines": { @@ -17581,9 +17615,9 @@ } }, "node_modules/recharts": { - "version": "2.12.1", - "resolved": "https://registry.npmjs.org/recharts/-/recharts-2.12.1.tgz", - "integrity": "sha512-35vUCEBPf+pM+iVgSgVTn86faKya5pc4JO6cYJL63qOK2zDEyzDn20Tdj+CDI/3z+VcpKyQ8ZBQ9OiQ+vuAbjg==", + "version": "2.12.2", + "resolved": "https://registry.npmjs.org/recharts/-/recharts-2.12.2.tgz", + "integrity": "sha512-9bpxjXSF5g81YsKkTSlaX7mM4b6oYI1mIYck6YkUcWuL3tomADccI51/6thY4LmvhYuRTwpfrOvE80Zc3oBRfQ==", "dependencies": { "clsx": "^2.0.0", "eventemitter3": "^4.0.1", @@ -18343,12 +18377,12 @@ } }, "node_modules/side-channel": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.5.tgz", - "integrity": "sha512-QcgiIWV4WV7qWExbN5llt6frQB/lBven9pqliLXfGPB+K9ZYXxDozp0wLkHS24kWCm+6YXH/f0HhnObZnZOBnQ==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", "dev": true, "dependencies": { - "call-bind": "^1.0.6", + "call-bind": "^1.0.7", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.4", "object-inspect": "^1.13.1" @@ -20022,13 +20056,13 @@ } }, "node_modules/unplugin": { - "version": "1.7.1", - "resolved": "https://registry.npmjs.org/unplugin/-/unplugin-1.7.1.tgz", - "integrity": "sha512-JqzORDAPxxs8ErLV4x+LL7bk5pk3YlcWqpSNsIkAZj972KzFZLClc/ekppahKkOczGkwIG6ElFgdOgOlK4tXZw==", + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/unplugin/-/unplugin-1.8.0.tgz", + "integrity": "sha512-yGEQsodWICmgt7asHF7QzqDZYeEP9h14vyd9Lul98UnYf29pLZZLwI09z2QdTjwU/FCkum1SRvsK7cx232X8NA==", "dev": true, "dependencies": { "acorn": "^8.11.3", - "chokidar": "^3.5.3", + "chokidar": "^3.6.0", "webpack-sources": "^3.2.3", "webpack-virtual-modules": "^0.6.1" } diff --git a/ui/package.json b/ui/package.json index 41575fcd7e..1586141202 100644 --- a/ui/package.json +++ b/ui/package.json @@ -32,17 +32,17 @@ "@radix-ui/react-toggle": "^1.0.3", "@radix-ui/react-toggle-group": "^1.0.4", "@radix-ui/react-tooltip": "^1.0.7", - "@tremor/react": "^3.14.0", - "@types/node": "^20.11.20", - "@types/react": "^18.2.58", + "@tremor/react": "^3.14.1", + "@types/node": "^20.11.24", + "@types/react": "^18.2.62", "@types/react-dom": "^18.2.19", "classnames": "^2.5.1", "long": "^5.2.3", - "lucide-react": "^0.340.0", - "material-symbols": "^0.15.0", + "lucide-react": "^0.344.0", + "material-symbols": "^0.16.0", "moment": "^2.30.1", "moment-timezone": "^0.5.45", - "next": "^14.1.0", + "next": "^14.1.1", "next-auth": "^4.24.6", "prop-types": "^15.8.1", "protobufjs": "^7.2.6", @@ -65,10 +65,10 @@ "@storybook/nextjs": "^7.6.17", "@storybook/react": "^7.3.0", "@storybook/testing-library": "^0.2.2", - "autoprefixer": "^10.4.17", + "autoprefixer": "^10.4.18", "copy-webpack-plugin": "^12.0.2", "eslint": "^8.57.0", - "eslint-config-next": "^14.1.0", + "eslint-config-next": "^14.1.1", "eslint-config-prettier": "^9.1.0", "eslint-plugin-storybook": "^0.8.0", "gh-pages": "^6.1.1", From 6612e76d56c473f71755a7364f866e496ba32708 Mon Sep 17 00:00:00 2001 From: Amogh Bharadwaj Date: Tue, 5 Mar 2024 17:51:40 +0530 Subject: [PATCH 05/13] CDC Flow: fix state wipe (#1434) Pause mirror second time onwards was not being respected as the state was not being wiped if only idle timeout / batch size is edited, because when additionTables is empty we just return nil before setting state to nil. This made flowConfigUpdate perenially not nil, which caused the mirror to never enter the pause loop. This PR moves the setting of state to nil outside of processConfigUpdates and also adds logs --- flow/workflows/cdc_flow.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/flow/workflows/cdc_flow.go b/flow/workflows/cdc_flow.go index 419b11b7e8..2713190062 100644 --- a/flow/workflows/cdc_flow.go +++ b/flow/workflows/cdc_flow.go @@ -108,7 +108,9 @@ func (w *CDCFlowWorkflowExecution) processCDCFlowConfigUpdate(ctx workflow.Conte mirrorNameSearch map[string]interface{}, ) error { flowConfigUpdate := state.FlowConfigUpdate + if flowConfigUpdate != nil { + w.logger.Info("processing CDCFlowConfigUpdate", slog.Any("updatedState", flowConfigUpdate)) if len(flowConfigUpdate.AdditionalTables) == 0 { return nil } @@ -118,6 +120,7 @@ func (w *CDCFlowWorkflowExecution) processCDCFlowConfigUpdate(ctx workflow.Conte } state.CurrentFlowStatus = protos.FlowStatus_STATUS_SNAPSHOT + w.logger.Info("altering publication for additional tables") alterPublicationAddAdditionalTablesCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ StartToCloseTimeout: 5 * time.Minute, }) @@ -130,6 +133,7 @@ func (w *CDCFlowWorkflowExecution) processCDCFlowConfigUpdate(ctx workflow.Conte return err } + w.logger.Info("additional tables added to publication") additionalTablesUUID := GetUUID(ctx) childAdditionalTablesCDCFlowID := GetChildWorkflowID("additional-cdc-flow", cfg.FlowJobName, additionalTablesUUID) additionalTablesCfg := proto.Clone(cfg).(*protos.FlowConnectionConfigs) @@ -163,9 +167,7 @@ func (w *CDCFlowWorkflowExecution) processCDCFlowConfigUpdate(ctx workflow.Conte maps.Copy(state.SyncFlowOptions.TableNameSchemaMapping, res.SyncFlowOptions.TableNameSchemaMapping) state.SyncFlowOptions.TableMappings = append(state.SyncFlowOptions.TableMappings, flowConfigUpdate.AdditionalTables...) - - // finished processing, wipe it - state.FlowConfigUpdate = nil + w.logger.Info("additional tables added to sync flow") } return nil } @@ -268,6 +270,9 @@ func CDCFlowWorkflow( if err != nil { return state, err } + w.logger.Info("wiping flow state after state update processing") + // finished processing, wipe it + state.FlowConfigUpdate = nil state.ActiveSignal = model.NoopSignal } } From 3e3e2ab6bac937830cd3dca0f553b9b863439f11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Tue, 5 Mar 2024 15:05:26 +0000 Subject: [PATCH 06/13] Test qrep pause/unpause (#1388) As is apt to happen when adding tests, also fix pause/unpause for qrep/xmin 1. `waitForNewRows` would prevent pausing; use a selector to wait on signals while waiting on new rows while waiting on context cancelation 2. `CurrentFlowStatus` wasn't being set to RUNNING when exiting from PAUSED As an aside, it turns out all the qrep/xmin tests are doing `InitialCopyOnly` besides this new test Also remove `DisableWaitForNewRows` because it's unused & diverges testing from reality --- docker-compose-dev.yml | 2 +- docker-compose.yml | 2 +- flow/e2e/congen.go | 34 +++++------- flow/e2e/postgres/qrep_flow_pg_test.go | 63 +++++++++++++++++++++ flow/e2e/test_utils.go | 5 +- flow/workflows/qrep_flow.go | 76 +++++++++++--------------- flow/workflows/xmin_flow.go | 18 ++++-- protos/flow.proto | 2 +- 8 files changed, 127 insertions(+), 75 deletions(-) diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index 8a8181ba61..210fc507d9 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -56,7 +56,7 @@ services: catalog: condition: service_healthy environment: - - DB=postgres12 + - DB=postgresql - DB_PORT=5432 - POSTGRES_USER=postgres - POSTGRES_PWD=postgres diff --git a/docker-compose.yml b/docker-compose.yml index 7ac7e19bb5..2e0a83be2b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -49,7 +49,7 @@ services: catalog: condition: service_healthy environment: - - DB=postgres12 + - DB=postgresql - DB_PORT=5432 - POSTGRES_USER=postgres - POSTGRES_PWD=postgres diff --git a/flow/e2e/congen.go b/flow/e2e/congen.go index 603c6f0b7e..82387c9ff3 100644 --- a/flow/e2e/congen.go +++ b/flow/e2e/congen.go @@ -218,25 +218,19 @@ type QRepFlowConnectionGenerationConfig struct { // GenerateQRepConfig generates a qrep config for testing. func (c *QRepFlowConnectionGenerationConfig) GenerateQRepConfig( query string, watermark string, -) (*protos.QRepConfig, error) { - ret := &protos.QRepConfig{} - ret.FlowJobName = c.FlowJobName - ret.WatermarkTable = c.WatermarkTable - ret.DestinationTableIdentifier = c.DestinationTableIdentifier - - postgresPeer := GeneratePostgresPeer() - ret.SourcePeer = postgresPeer - - ret.DestinationPeer = c.Destination - - ret.Query = query - ret.WatermarkColumn = watermark - - ret.StagingPath = c.StagingPath - ret.WriteMode = &protos.QRepWriteMode{ - WriteType: protos.QRepWriteType_QREP_WRITE_MODE_APPEND, +) *protos.QRepConfig { + return &protos.QRepConfig{ + FlowJobName: c.FlowJobName, + WatermarkTable: c.WatermarkTable, + DestinationTableIdentifier: c.DestinationTableIdentifier, + SourcePeer: GeneratePostgresPeer(), + DestinationPeer: c.Destination, + Query: query, + WatermarkColumn: watermark, + StagingPath: c.StagingPath, + WriteMode: &protos.QRepWriteMode{ + WriteType: protos.QRepWriteType_QREP_WRITE_MODE_APPEND, + }, + NumRowsPerPartition: 1000, } - ret.NumRowsPerPartition = 1000 - - return ret, nil } diff --git a/flow/e2e/postgres/qrep_flow_pg_test.go b/flow/e2e/postgres/qrep_flow_pg_test.go index a7a3672119..fb49d3d242 100644 --- a/flow/e2e/postgres/qrep_flow_pg_test.go +++ b/flow/e2e/postgres/qrep_flow_pg_test.go @@ -17,6 +17,7 @@ import ( "github.com/PeerDB-io/peer-flow/e2e" "github.com/PeerDB-io/peer-flow/e2eshared" "github.com/PeerDB-io/peer-flow/generated/protos" + "github.com/PeerDB-io/peer-flow/model" "github.com/PeerDB-io/peer-flow/shared" ) @@ -324,3 +325,65 @@ func (s PeerFlowE2ETestSuitePG) Test_No_Rows_QRep_PG() { e2e.EnvWaitForFinished(s.t, env, 3*time.Minute) require.NoError(s.t, env.Error()) } + +func (s PeerFlowE2ETestSuitePG) Test_Pause() { + numRows := 10 + + srcTable := "qrep_pause" + s.setupSourceTable(srcTable, numRows) + + dstTable := "qrep_pause_dst" + + srcSchemaQualified := fmt.Sprintf("%s_%s.%s", "e2e_test", s.suffix, srcTable) + dstSchemaQualified := fmt.Sprintf("%s_%s.%s", "e2e_test", s.suffix, dstTable) + + query := fmt.Sprintf("SELECT * FROM e2e_test_%s.%s WHERE updated_at BETWEEN {{.start}} AND {{.end}}", + s.suffix, srcTable) + + config, err := e2e.CreateQRepWorkflowConfig( + "test_qrep_pause_pg", + srcSchemaQualified, + dstSchemaQualified, + query, + e2e.GeneratePostgresPeer(), + "", + true, + "_PEERDB_SYNCED_AT", + ) + require.NoError(s.t, err) + config.InitialCopyOnly = false + + tc := e2e.NewTemporalClient(s.t) + env := e2e.RunQrepFlowWorkflow(tc, config) + e2e.SignalWorkflow(env, model.FlowSignal, model.PauseSignal) + + e2e.EnvWaitFor(s.t, env, 3*time.Minute, "pausing", func() bool { + response, err := env.Query(shared.QRepFlowStateQuery) + if err != nil { + s.t.Log(err) + return false + } + var state *protos.QRepFlowState + err = response.Get(&state) + if err != nil { + s.t.Fatal("decode failed", err) + } + return state.CurrentFlowStatus == protos.FlowStatus_STATUS_PAUSED + }) + e2e.SignalWorkflow(env, model.FlowSignal, model.NoopSignal) + e2e.EnvWaitFor(s.t, env, time.Minute, "unpausing", func() bool { + response, err := env.Query(shared.QRepFlowStateQuery) + if err != nil { + s.t.Fatal(err) + } + var state *protos.QRepFlowState + err = response.Get(&state) + if err != nil { + s.t.Fatal("decode failed", err) + } + return state.CurrentFlowStatus == protos.FlowStatus_STATUS_RUNNING + }) + + env.Cancel() + e2e.RequireEnvCanceled(s.t, env) +} diff --git a/flow/e2e/test_utils.go b/flow/e2e/test_utils.go index 92d04d1f3d..8592eb406e 100644 --- a/flow/e2e/test_utils.go +++ b/flow/e2e/test_utils.go @@ -396,10 +396,7 @@ func CreateQRepWorkflowConfig( watermark := "updated_at" - qrepConfig, err := connectionGen.GenerateQRepConfig(query, watermark) - if err != nil { - return nil, err - } + qrepConfig := connectionGen.GenerateQRepConfig(query, watermark) qrepConfig.InitialCopyOnly = true qrepConfig.SyncedAtColName = syncedAtCol qrepConfig.SetupWatermarkTableOnDestination = setupDst diff --git a/flow/workflows/qrep_flow.go b/flow/workflows/qrep_flow.go index c80e76c01f..839eab9ddc 100644 --- a/flow/workflows/qrep_flow.go +++ b/flow/workflows/qrep_flow.go @@ -46,19 +46,6 @@ func NewQRepFlowState() *protos.QRepFlowState { } } -// returns a new empty QRepFlowState -func NewQRepFlowStateForTesting() *protos.QRepFlowState { - return &protos.QRepFlowState{ - LastPartition: &protos.QRepPartition{ - PartitionId: "not-applicable-partition", - Range: nil, - }, - NumPartitionsProcessed: 0, - NeedsResync: true, - DisableWaitForNewRows: true, - } -} - // NewQRepFlowExecution creates a new instance of QRepFlowExecution. func NewQRepFlowExecution(ctx workflow.Context, config *protos.QRepConfig, runUUID string) *QRepFlowExecution { return &QRepFlowExecution{ @@ -99,7 +86,7 @@ func (q *QRepFlowExecution) SetupMetadataTables(ctx workflow.Context) error { } func (q *QRepFlowExecution) getTableSchema(ctx workflow.Context, tableName string) (*protos.TableSchema, error) { - q.logger.Info("fetching schema for table - ", tableName) + q.logger.Info("fetching schema for table", slog.String("table", tableName)) ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ StartToCloseTimeout: 5 * time.Minute, @@ -291,20 +278,39 @@ func (q *QRepFlowExecution) consolidatePartitions(ctx workflow.Context) error { return nil } -func (q *QRepFlowExecution) waitForNewRows(ctx workflow.Context, lastPartition *protos.QRepPartition) error { +func (q *QRepFlowExecution) waitForNewRows( + ctx workflow.Context, + signalChan model.TypedReceiveChannel[model.CDCFlowSignal], + lastPartition *protos.QRepPartition, +) error { q.logger.Info("idling until new rows are detected") + var done bool + var doneErr error + selector := workflow.NewNamedSelector(ctx, "WaitForNewRows") + ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ StartToCloseTimeout: 16 * 365 * 24 * time.Hour, // 16 years HeartbeatTimeout: time.Minute, }) + fWait := workflow.ExecuteActivity(ctx, flowable.QRepWaitUntilNewRows, q.config, lastPartition) + selector.AddReceive(ctx.Done(), func(_ workflow.ReceiveChannel, _ bool) {}) + selector.AddFuture(fWait, func(f workflow.Future) { + doneErr = f.Get(ctx, nil) + done = true + }) + signalChan.AddToSelector(selector, func(val model.CDCFlowSignal, _ bool) { + q.activeSignal = model.FlowSignalHandler(q.activeSignal, val, q.logger) + }) - if err := workflow.ExecuteActivity(ctx, flowable.QRepWaitUntilNewRows, q.config, - lastPartition).Get(ctx, nil); err != nil { - return fmt.Errorf("failed while idling for new rows: %w", err) + for ctx.Err() == nil && ((!done && q.activeSignal != model.PauseSignal) || selector.HasPending()) { + selector.Select(ctx) } - return nil + if err := ctx.Err(); err != nil { + return err + } + return doneErr } func (q *QRepFlowExecution) handleTableCreationForResync(ctx workflow.Context, state *protos.QRepFlowState) error { @@ -364,16 +370,6 @@ func (q *QRepFlowExecution) handleTableRenameForResync(ctx workflow.Context, sta return nil } -func (q *QRepFlowExecution) receiveAndHandleSignalAsync(signalChan model.TypedReceiveChannel[model.CDCFlowSignal]) { - for { - val, ok := signalChan.ReceiveAsync() - if !ok { - break - } - q.activeSignal = model.FlowSignalHandler(q.activeSignal, val, q.logger) - } -} - func setWorkflowQueries(ctx workflow.Context, state *protos.QRepFlowState) error { // Support an Update for the current status of the qrep flow. err := workflow.SetUpdateHandler(ctx, shared.FlowStatusUpdate, func(status *protos.FlowStatus) error { @@ -452,6 +448,7 @@ func QRepFlowWorkflow( return err } } + state.CurrentFlowStatus = protos.FlowStatus_STATUS_RUNNING } maxParallelWorkers := 16 @@ -475,13 +472,13 @@ func QRepFlowWorkflow( return err } - logger.Info("fetching partitions to replicate for peer flow - ", config.FlowJobName) + logger.Info("fetching partitions to replicate for peer flow") partitions, err := q.GetPartitions(ctx, state.LastPartition) if err != nil { return fmt.Errorf("failed to get partitions: %w", err) } - logger.Info("partitions to replicate - ", len(partitions.Partitions)) + logger.Info(fmt.Sprintf("%d partitions to replicate", len(partitions.Partitions))) if err := q.processPartitions(ctx, maxParallelWorkers, partitions.Partitions); err != nil { return err } @@ -501,29 +498,22 @@ func QRepFlowWorkflow( return err } - logger.Info("partitions processed - ", len(partitions.Partitions)) + logger.Info(fmt.Sprintf("%d partitions processed", len(partitions.Partitions))) state.NumPartitionsProcessed += uint64(len(partitions.Partitions)) if len(partitions.Partitions) > 0 { state.LastPartition = partitions.Partitions[len(partitions.Partitions)-1] } - if !state.DisableWaitForNewRows { - // sleep for a while and continue the workflow - err = q.waitForNewRows(ctx, state.LastPartition) - if err != nil { - return err - } + err = q.waitForNewRows(ctx, signalChan, state.LastPartition) + if err != nil { + return err } logger.Info("Continuing as new workflow", slog.Any("Last Partition", state.LastPartition), - slog.Any("Number of Partitions Processed", state.NumPartitionsProcessed)) + slog.Uint64("Number of Partitions Processed", state.NumPartitionsProcessed)) - q.receiveAndHandleSignalAsync(signalChan) - if err := ctx.Err(); err != nil { - return err - } if q.activeSignal == model.PauseSignal { state.CurrentFlowStatus = protos.FlowStatus_STATUS_PAUSED } diff --git a/flow/workflows/xmin_flow.go b/flow/workflows/xmin_flow.go index c5334221c6..777daba38b 100644 --- a/flow/workflows/xmin_flow.go +++ b/flow/workflows/xmin_flow.go @@ -47,6 +47,7 @@ func XminFlowWorkflow( return err } } + state.CurrentFlowStatus = protos.FlowStatus_STATUS_RUNNING } err = q.SetupWatermarkTableOnDestination(ctx) @@ -100,14 +101,21 @@ func XminFlowWorkflow( Range: &protos.PartitionRange{Range: &protos.PartitionRange_IntRange{IntRange: &protos.IntPartitionRange{Start: lastPartition}}}, } - logger.Info("Continuing as new workflow", - slog.Any("Last Partition", state.LastPartition), - slog.Any("Number of Partitions Processed", state.NumPartitionsProcessed)) - - q.receiveAndHandleSignalAsync(signalChan) if err := ctx.Err(); err != nil { return err } + for { + val, ok := signalChan.ReceiveAsync() + if !ok { + break + } + q.activeSignal = model.FlowSignalHandler(q.activeSignal, val, q.logger) + } + + logger.Info("Continuing as new workflow", + slog.Any("Last Partition", state.LastPartition), + slog.Uint64("Number of Partitions Processed", state.NumPartitionsProcessed)) + if q.activeSignal == model.PauseSignal { state.CurrentFlowStatus = protos.FlowStatus_STATUS_PAUSED } diff --git a/protos/flow.proto b/protos/flow.proto index f5d804ae71..0ab9b94a35 100644 --- a/protos/flow.proto +++ b/protos/flow.proto @@ -322,7 +322,7 @@ message QRepFlowState { QRepPartition last_partition = 1; uint64 num_partitions_processed = 2; bool needs_resync = 3; - bool disable_wait_for_new_rows = 4; + bool disable_wait_for_new_rows = 4; // deprecated FlowStatus current_flow_status = 5; } From 5ea04a80da1e3f55afb31ad9811e41f0517b7980 Mon Sep 17 00:00:00 2001 From: Kevin Biju <52661649+heavycrystal@users.noreply.github.com> Date: Tue, 5 Mar 2024 22:28:05 +0530 Subject: [PATCH 07/13] revert change to Temporal database plugin (#1437) `postgresql` appears to be a plugin that doesn't support Temporal [advanced visibility](https://docs.temporal.io/visibility#advanced-visibility) which we use in `flow-api`. It now crashes on startup. Reverting the change made in #1388 --- docker-compose-dev.yml | 2 +- docker-compose.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index 210fc507d9..8a8181ba61 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -56,7 +56,7 @@ services: catalog: condition: service_healthy environment: - - DB=postgresql + - DB=postgres12 - DB_PORT=5432 - POSTGRES_USER=postgres - POSTGRES_PWD=postgres diff --git a/docker-compose.yml b/docker-compose.yml index 2e0a83be2b..7ac7e19bb5 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -49,7 +49,7 @@ services: catalog: condition: service_healthy environment: - - DB=postgresql + - DB=postgres12 - DB_PORT=5432 - POSTGRES_USER=postgres - POSTGRES_PWD=postgres From 0a3241925fb6d52d2382e87e990603beea523d2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Tue, 5 Mar 2024 18:24:43 +0000 Subject: [PATCH 08/13] Fix & test cdc rollover (#1436) Customer workflow recently got stuck after sync-stop, add test covering cdc_flow returning ContinueAsNew multiple times cdc_flow had custom retry logic to pass tests before they were switched to integration tests, get rid of that, it was racy with signals, rely on temporal retries for workflow errors & restarting cdc entirely if sync/normalize finish for whatever reason --- flow/e2e/postgres/peer_flow_pg_test.go | 45 +++++++- flow/e2e/snowflake/qrep_flow_sf_test.go | 4 +- flow/workflows/cdc_flow.go | 144 ++++++++++-------------- 3 files changed, 105 insertions(+), 88 deletions(-) diff --git a/flow/e2e/postgres/peer_flow_pg_test.go b/flow/e2e/postgres/peer_flow_pg_test.go index 192ee85ba9..fa55bbb3fc 100644 --- a/flow/e2e/postgres/peer_flow_pg_test.go +++ b/flow/e2e/postgres/peer_flow_pg_test.go @@ -1060,7 +1060,50 @@ func (s PeerFlowE2ETestSuitePG) Test_Supported_Mixed_Case_Table() { e2e.RequireEnvCanceled(s.t, env) } -// test don't work, make it work later +func (s PeerFlowE2ETestSuitePG) Test_ContinueAsNew() { + srcTableName := s.attachSchemaSuffix("test_continueasnew") + dstTableName := s.attachSchemaSuffix("test_continueasnew_dst") + + _, err := s.Conn().Exec(context.Background(), fmt.Sprintf(` + CREATE TABLE IF NOT EXISTS %s ( + id SERIAL PRIMARY KEY, + key TEXT NOT NULL, + value TEXT NOT NULL + ); + `, srcTableName)) + require.NoError(s.t, err) + + connectionGen := e2e.FlowConnectionGenerationConfig{ + FlowJobName: s.attachSuffix("test_continueasnew_flow"), + TableNameMapping: map[string]string{srcTableName: dstTableName}, + Destination: s.peer, + } + + flowConnConfig := connectionGen.GenerateFlowConnectionConfigs() + flowConnConfig.MaxBatchSize = 2 + flowConnConfig.IdleTimeoutSeconds = 10 + + tc := e2e.NewTemporalClient(s.t) + env := e2e.ExecutePeerflow(tc, peerflow.CDCFlowWorkflow, flowConnConfig, nil) + + e2e.SetupCDCFlowStatusQuery(s.t, env, connectionGen) + for i := range 144 { + testKey := fmt.Sprintf("test_key_%d", i) + testValue := fmt.Sprintf("test_value_%d", i) + _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` + INSERT INTO %s(key, value) VALUES ($1, $2) + `, srcTableName), testKey, testValue) + e2e.EnvNoError(s.t, env, err) + } + s.t.Log("Inserted 144 rows into the source table") + + e2e.EnvWaitFor(s.t, env, 3*time.Minute, "normalize 72 syncs", func() bool { + return s.comparePGTables(srcTableName, dstTableName, "id,key,value") == nil + }) + env.Cancel() + + e2e.RequireEnvCanceled(s.t, env) +} func (s PeerFlowE2ETestSuitePG) Test_Dynamic_Mirror_Config_Via_Signals() { srcTable1Name := s.attachSchemaSuffix("test_dynconfig_1") diff --git a/flow/e2e/snowflake/qrep_flow_sf_test.go b/flow/e2e/snowflake/qrep_flow_sf_test.go index 7a417238a6..d7ce60d81b 100644 --- a/flow/e2e/snowflake/qrep_flow_sf_test.go +++ b/flow/e2e/snowflake/qrep_flow_sf_test.go @@ -149,7 +149,7 @@ func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF_S3() { qrepConfig.SetupWatermarkTableOnDestination = true env := e2e.RunQrepFlowWorkflow(tc, qrepConfig) - e2e.EnvWaitForFinished(s.t, env, 3*time.Minute) + e2e.EnvWaitForFinished(s.t, env, 5*time.Minute) require.NoError(s.t, env.Error()) sel := e2e.GetOwnersSelectorStringsSF() @@ -226,7 +226,7 @@ func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF_S3_Integration() qrepConfig.SetupWatermarkTableOnDestination = true env := e2e.RunQrepFlowWorkflow(tc, qrepConfig) - e2e.EnvWaitForFinished(s.t, env, 3*time.Minute) + e2e.EnvWaitForFinished(s.t, env, 5*time.Minute) require.NoError(s.t, env.Error()) sel := e2e.GetOwnersSelectorStringsSF() diff --git a/flow/workflows/cdc_flow.go b/flow/workflows/cdc_flow.go index 2713190062..14ca09092d 100644 --- a/flow/workflows/cdc_flow.go +++ b/flow/workflows/cdc_flow.go @@ -53,22 +53,6 @@ func NewCDCFlowWorkflowState(cfg *protos.FlowConnectionConfigs) *CDCFlowWorkflow } } -// CDCFlowWorkflowExecution represents the state for execution of a peer flow. -type CDCFlowWorkflowExecution struct { - flowExecutionID string - logger log.Logger - syncFlowFuture workflow.ChildWorkflowFuture - normFlowFuture workflow.ChildWorkflowFuture -} - -// NewCDCFlowWorkflowExecution creates a new instance of PeerFlowWorkflowExecution. -func NewCDCFlowWorkflowExecution(ctx workflow.Context, flowName string) *CDCFlowWorkflowExecution { - return &CDCFlowWorkflowExecution{ - flowExecutionID: workflow.GetInfo(ctx).WorkflowExecution.ID, - logger: log.With(workflow.GetLogger(ctx), slog.String(string(shared.FlowNameKey), flowName)), - } -} - func GetSideEffect[T any](ctx workflow.Context, f func(workflow.Context) T) T { sideEffect := workflow.SideEffect(ctx, func(ctx workflow.Context) interface{} { return f(ctx) @@ -103,24 +87,26 @@ const ( maxSyncsPerCdcFlow = 32 ) -func (w *CDCFlowWorkflowExecution) processCDCFlowConfigUpdate(ctx workflow.Context, +func processCDCFlowConfigUpdate( + ctx workflow.Context, + logger log.Logger, cfg *protos.FlowConnectionConfigs, state *CDCFlowWorkflowState, mirrorNameSearch map[string]interface{}, ) error { flowConfigUpdate := state.FlowConfigUpdate if flowConfigUpdate != nil { - w.logger.Info("processing CDCFlowConfigUpdate", slog.Any("updatedState", flowConfigUpdate)) + logger.Info("processing CDCFlowConfigUpdate", slog.Any("updatedState", flowConfigUpdate)) if len(flowConfigUpdate.AdditionalTables) == 0 { return nil } if shared.AdditionalTablesHasOverlap(state.SyncFlowOptions.TableMappings, flowConfigUpdate.AdditionalTables) { - w.logger.Warn("duplicate source/destination tables found in additionalTables") + logger.Warn("duplicate source/destination tables found in additionalTables") return nil } state.CurrentFlowStatus = protos.FlowStatus_STATUS_SNAPSHOT - w.logger.Info("altering publication for additional tables") + logger.Info("altering publication for additional tables") alterPublicationAddAdditionalTablesCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ StartToCloseTimeout: 5 * time.Minute, }) @@ -129,11 +115,11 @@ func (w *CDCFlowWorkflowExecution) processCDCFlowConfigUpdate(ctx workflow.Conte flowable.AddTablesToPublication, cfg, flowConfigUpdate.AdditionalTables) if err := alterPublicationAddAdditionalTablesFuture.Get(ctx, nil); err != nil { - w.logger.Error("failed to alter publication for additional tables: ", err) + logger.Error("failed to alter publication for additional tables: ", err) return err } - w.logger.Info("additional tables added to publication") + logger.Info("additional tables added to publication") additionalTablesUUID := GetUUID(ctx) childAdditionalTablesCDCFlowID := GetChildWorkflowID("additional-cdc-flow", cfg.FlowJobName, additionalTablesUUID) additionalTablesCfg := proto.Clone(cfg).(*protos.FlowConnectionConfigs) @@ -167,13 +153,14 @@ func (w *CDCFlowWorkflowExecution) processCDCFlowConfigUpdate(ctx workflow.Conte maps.Copy(state.SyncFlowOptions.TableNameSchemaMapping, res.SyncFlowOptions.TableNameSchemaMapping) state.SyncFlowOptions.TableMappings = append(state.SyncFlowOptions.TableMappings, flowConfigUpdate.AdditionalTables...) - w.logger.Info("additional tables added to sync flow") + logger.Info("additional tables added to sync flow") } return nil } -func (w *CDCFlowWorkflowExecution) addCdcPropertiesSignalListener( +func addCdcPropertiesSignalListener( ctx workflow.Context, + logger log.Logger, selector workflow.Selector, state *CDCFlowWorkflowState, ) { @@ -189,21 +176,13 @@ func (w *CDCFlowWorkflowExecution) addCdcPropertiesSignalListener( // do this irrespective of additional tables being present, for auto unpausing state.FlowConfigUpdate = cdcConfigUpdate - w.logger.Info("CDC Signal received. Parameters on signal reception:", + logger.Info("CDC Signal received. Parameters on signal reception:", slog.Int("BatchSize", int(state.SyncFlowOptions.BatchSize)), slog.Int("IdleTimeout", int(state.SyncFlowOptions.IdleTimeoutSeconds)), slog.Any("AdditionalTables", cdcConfigUpdate.AdditionalTables)) }) } -func (w *CDCFlowWorkflowExecution) startSyncFlow(ctx workflow.Context, config *protos.FlowConnectionConfigs, options *protos.SyncFlowOptions) { - w.syncFlowFuture = workflow.ExecuteChildWorkflow(ctx, SyncFlowWorkflow, config, options) -} - -func (w *CDCFlowWorkflowExecution) startNormFlow(ctx workflow.Context, config *protos.FlowConnectionConfigs) { - w.normFlowFuture = workflow.ExecuteChildWorkflow(ctx, NormalizeFlowWorkflow, config, nil) -} - func CDCFlowWorkflow( ctx workflow.Context, cfg *protos.FlowConnectionConfigs, @@ -217,7 +196,7 @@ func CDCFlowWorkflow( state = NewCDCFlowWorkflowState(cfg) } - w := NewCDCFlowWorkflowExecution(ctx, cfg.FlowJobName) + logger := log.With(workflow.GetLogger(ctx), slog.String(string(shared.FlowNameKey), cfg.FlowJobName)) flowSignalChan := model.FlowSignal.GetSignalChannel(ctx) err := workflow.SetQueryHandler(ctx, shared.CDCFlowStateQuery, func() (CDCFlowWorkflowState, error) { @@ -248,9 +227,9 @@ func CDCFlowWorkflow( selector := workflow.NewNamedSelector(ctx, "PauseLoop") selector.AddReceive(ctx.Done(), func(_ workflow.ReceiveChannel, _ bool) {}) flowSignalChan.AddToSelector(selector, func(val model.CDCFlowSignal, _ bool) { - state.ActiveSignal = model.FlowSignalHandler(state.ActiveSignal, val, w.logger) + state.ActiveSignal = model.FlowSignalHandler(state.ActiveSignal, val, logger) }) - w.addCdcPropertiesSignalListener(ctx, selector, state) + addCdcPropertiesSignalListener(ctx, logger, selector, state) startTime := workflow.Now(ctx) state.CurrentFlowStatus = protos.FlowStatus_STATUS_PAUSED @@ -258,7 +237,7 @@ func CDCFlowWorkflow( for state.ActiveSignal == model.PauseSignal { // only place we block on receive, so signal processing is immediate for state.ActiveSignal == model.PauseSignal && state.FlowConfigUpdate == nil && ctx.Err() == nil { - w.logger.Info("mirror has been paused", slog.Any("duration", time.Since(startTime))) + logger.Info("mirror has been paused", slog.Any("duration", time.Since(startTime))) selector.Select(ctx) } if err := ctx.Err(); err != nil { @@ -266,18 +245,18 @@ func CDCFlowWorkflow( } if state.FlowConfigUpdate != nil { - err = w.processCDCFlowConfigUpdate(ctx, cfg, state, mirrorNameSearch) + err = processCDCFlowConfigUpdate(ctx, logger, cfg, state, mirrorNameSearch) if err != nil { return state, err } - w.logger.Info("wiping flow state after state update processing") + logger.Info("wiping flow state after state update processing") // finished processing, wipe it state.FlowConfigUpdate = nil state.ActiveSignal = model.NoopSignal } } - w.logger.Info("mirror has been resumed after ", time.Since(startTime)) + logger.Info("mirror has been resumed after ", time.Since(startTime)) state.CurrentFlowStatus = protos.FlowStatus_STATUS_RUNNING } @@ -346,7 +325,7 @@ func CDCFlowWorkflow( state.SyncFlowOptions.TableNameSchemaMapping, ) if err := snapshotFlowFuture.Get(snapshotFlowCtx, nil); err != nil { - w.logger.Error("snapshot flow failed", slog.Any("error", err)) + logger.Error("snapshot flow failed", slog.Any("error", err)) return state, fmt.Errorf("failed to execute snapshot workflow: %w", err) } @@ -385,7 +364,7 @@ func CDCFlowWorkflow( } state.CurrentFlowStatus = protos.FlowStatus_STATUS_RUNNING - w.logger.Info("executed setup flow and snapshot flow") + logger.Info("executed setup flow and snapshot flow") // if initial_copy_only is opted for, we end the flow here. if cfg.InitialSnapshotOnly { @@ -424,70 +403,56 @@ func CDCFlowWorkflow( handleError := func(name string, err error) { var panicErr *temporal.PanicError if errors.As(err, &panicErr) { - w.logger.Error( + logger.Error( "panic in flow", slog.String("name", name), slog.Any("error", panicErr.Error()), slog.String("stack", panicErr.StackTrace()), ) } else { - w.logger.Error("error in flow", slog.String("name", name), slog.Any("error", err)) + logger.Error("error in flow", slog.String("name", name), slog.Any("error", err)) } } - finishSyncNormalize := func() { - restart = true - _ = model.SyncStopSignal.SignalChildWorkflow(ctx, w.syncFlowFuture, struct{}{}).Get(ctx, nil) - } + syncFlowFuture := workflow.ExecuteChildWorkflow(syncCtx, SyncFlowWorkflow, cfg, state.SyncFlowOptions) + normFlowFuture := workflow.ExecuteChildWorkflow(normCtx, NormalizeFlowWorkflow, cfg, nil) mainLoopSelector := workflow.NewNamedSelector(ctx, "MainLoop") mainLoopSelector.AddReceive(ctx.Done(), func(_ workflow.ReceiveChannel, _ bool) {}) - - var handleNormFlow, handleSyncFlow func(workflow.Future) - handleSyncFlow = func(f workflow.Future) { + mainLoopSelector.AddFuture(syncFlowFuture, func(f workflow.Future) { err := f.Get(ctx, nil) if err != nil { handleError("sync", err) } - if restart { - w.logger.Info("sync finished, finishing normalize") - w.syncFlowFuture = nil - _ = model.NormalizeSignal.SignalChildWorkflow(ctx, w.normFlowFuture, model.NormalizePayload{ + logger.Info("sync finished, finishing normalize") + syncFlowFuture = nil + restart = true + if normFlowFuture != nil { + err = model.NormalizeSignal.SignalChildWorkflow(ctx, normFlowFuture, model.NormalizePayload{ Done: true, SyncBatchID: -1, }).Get(ctx, nil) - } else { - w.logger.Warn("sync flow ended, restarting", slog.Any("error", err)) - w.startSyncFlow(syncCtx, cfg, state.SyncFlowOptions) - mainLoopSelector.AddFuture(w.syncFlowFuture, handleSyncFlow) + if err != nil { + logger.Warn("failed to signal normalize done, finishing", slog.Any("error", err)) + finished = true + } } - } - handleNormFlow = func(f workflow.Future) { + }) + mainLoopSelector.AddFuture(normFlowFuture, func(f workflow.Future) { err := f.Get(ctx, nil) if err != nil { handleError("normalize", err) } - if restart { - w.logger.Info("normalize finished") - w.normFlowFuture = nil - finished = true - } else { - w.logger.Warn("normalize flow ended, restarting", slog.Any("error", err)) - w.startNormFlow(normCtx, cfg) - mainLoopSelector.AddFuture(w.normFlowFuture, handleNormFlow) - } - } - - w.startSyncFlow(syncCtx, cfg, state.SyncFlowOptions) - mainLoopSelector.AddFuture(w.syncFlowFuture, handleSyncFlow) - - w.startNormFlow(normCtx, cfg) - mainLoopSelector.AddFuture(w.normFlowFuture, handleNormFlow) + logger.Info("normalize finished, finishing") + normFlowFuture = nil + restart = true + finished = true + }) flowSignalChan.AddToSelector(mainLoopSelector, func(val model.CDCFlowSignal, _ bool) { - state.ActiveSignal = model.FlowSignalHandler(state.ActiveSignal, val, w.logger) + state.ActiveSignal = model.FlowSignalHandler(state.ActiveSignal, val, logger) }) syncResultChan := model.SyncResultSignal.GetSignalChannel(ctx) @@ -504,7 +469,9 @@ func CDCFlowWorkflow( normChan := model.NormalizeSignal.GetSignalChannel(ctx) normChan.AddToSelector(mainLoopSelector, func(payload model.NormalizePayload, _ bool) { - _ = model.NormalizeSignal.SignalChildWorkflow(ctx, w.normFlowFuture, payload).Get(ctx, nil) + if normFlowFuture != nil { + _ = model.NormalizeSignal.SignalChildWorkflow(ctx, normFlowFuture, payload).Get(ctx, nil) + } maps.Copy(state.SyncFlowOptions.TableNameSchemaMapping, payload.TableNameSchemaMapping) }) @@ -514,13 +481,13 @@ func CDCFlowWorkflow( if !parallel { normDoneChan := model.NormalizeDoneSignal.GetSignalChannel(ctx) normDoneChan.AddToSelector(mainLoopSelector, func(x struct{}, _ bool) { - if w.syncFlowFuture != nil { - _ = model.NormalizeDoneSignal.SignalChildWorkflow(ctx, w.syncFlowFuture, x).Get(ctx, nil) + if syncFlowFuture != nil { + _ = model.NormalizeDoneSignal.SignalChildWorkflow(ctx, syncFlowFuture, x).Get(ctx, nil) } }) } - w.addCdcPropertiesSignalListener(ctx, mainLoopSelector, state) + addCdcPropertiesSignalListener(ctx, logger, mainLoopSelector, state) state.CurrentFlowStatus = protos.FlowStatus_STATUS_RUNNING for { @@ -529,12 +496,19 @@ func CDCFlowWorkflow( mainLoopSelector.Select(ctx) } if err := ctx.Err(); err != nil { - w.logger.Info("mirror canceled", slog.Any("error", err)) + logger.Info("mirror canceled", slog.Any("error", err)) return state, err } if state.ActiveSignal == model.PauseSignal || syncCount >= maxSyncsPerCdcFlow { - finishSyncNormalize() + restart = true + if syncFlowFuture != nil { + err := model.SyncStopSignal.SignalChildWorkflow(ctx, syncFlowFuture, struct{}{}).Get(ctx, nil) + if err != nil { + logger.Warn("failed to send sync-stop, finishing", slog.Any("error", err)) + finished = true + } + } } if restart { @@ -547,7 +521,7 @@ func CDCFlowWorkflow( } if err := ctx.Err(); err != nil { - w.logger.Info("mirror canceled", slog.Any("error", err)) + logger.Info("mirror canceled", slog.Any("error", err)) return nil, err } From f8f8d642690d5b1d9e368e3ce97c49c78e137765 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 6 Mar 2024 15:07:45 +0000 Subject: [PATCH 09/13] Enable all govet lints besides fieldalignment/shadow (#1441) shadow would need to ignore shadowing `err`, & fieldalignment raises many warnings which can be evaluated in a future PR to reduce memory footprint --- flow/.golangci.yml | 5 +++++ flow/cmd/handler.go | 4 ++-- flow/cmd/peer_data.go | 4 ++-- flow/connectors/clickhouse/qrep_avro_sync.go | 3 --- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/flow/.golangci.yml b/flow/.golangci.yml index 50582063f1..1b03b3c3a3 100644 --- a/flow/.golangci.yml +++ b/flow/.golangci.yml @@ -57,6 +57,11 @@ linters-settings: settings: hugeParam: sizeThreshold: 512 + govet: + enable-all: true + disable: + - fieldalignment + - shadow stylecheck: checks: - all diff --git a/flow/cmd/handler.go b/flow/cmd/handler.go index f4a7a7cc62..554e177dc1 100644 --- a/flow/cmd/handler.go +++ b/flow/cmd/handler.go @@ -373,8 +373,8 @@ func (h *FlowRequestHandler) ShutdownFlow( } if req.RemoveFlowEntry { - delErr := h.removeFlowEntryInCatalog(ctx, req.FlowJobName) - if delErr != nil { + err := h.removeFlowEntryInCatalog(ctx, req.FlowJobName) + if err != nil { slog.Error("unable to remove flow job entry", slog.String(string(shared.FlowNameKey), req.FlowJobName), slog.Any("error", err), diff --git a/flow/cmd/peer_data.go b/flow/cmd/peer_data.go index f4846280ed..7ec28329e2 100644 --- a/flow/cmd/peer_data.go +++ b/flow/cmd/peer_data.go @@ -23,9 +23,9 @@ func (h *FlowRequestHandler) getPGPeerConfig(ctx context.Context, peerName strin return nil, err } - unmarshalErr := proto.Unmarshal(pgPeerOptions, &pgPeerConfig) + err = proto.Unmarshal(pgPeerOptions, &pgPeerConfig) if err != nil { - return nil, unmarshalErr + return nil, err } return &pgPeerConfig, nil diff --git a/flow/connectors/clickhouse/qrep_avro_sync.go b/flow/connectors/clickhouse/qrep_avro_sync.go index bf119143cc..f4631ca4f4 100644 --- a/flow/connectors/clickhouse/qrep_avro_sync.go +++ b/flow/connectors/clickhouse/qrep_avro_sync.go @@ -43,9 +43,6 @@ func (s *ClickhouseAvroSyncMethod) CopyStageToDestination(ctx context.Context, a avroFileUrl := fmt.Sprintf("https://%s.s3.%s.amazonaws.com/%s", s3o.Bucket, s.connector.creds.Region, avroFile.FilePath) - if err != nil { - return err - } //nolint:gosec query := fmt.Sprintf("INSERT INTO %s SELECT * FROM s3('%s','%s','%s', 'Avro')", s.config.DestinationTableIdentifier, avroFileUrl, From 373bfb2621765dd95c7003b943bc0fd734eac36e Mon Sep 17 00:00:00 2001 From: Amogh Bharadwaj Date: Thu, 7 Mar 2024 01:58:41 +0530 Subject: [PATCH 10/13] Clickhouse: specify columns more in insert select (#1442) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR is an attempt to fix #1439 , by explicitly specifying the columns in `select * from s3(...)` in Qrep for Clickhouse. Functionally tested Co-authored-by: Philip Dubé --- flow/connectors/clickhouse/qrep_avro_sync.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flow/connectors/clickhouse/qrep_avro_sync.go b/flow/connectors/clickhouse/qrep_avro_sync.go index f4631ca4f4..2a6d1f217e 100644 --- a/flow/connectors/clickhouse/qrep_avro_sync.go +++ b/flow/connectors/clickhouse/qrep_avro_sync.go @@ -130,12 +130,12 @@ func (s *ClickhouseAvroSyncMethod) SyncQRepRecords( continue } - selector = append(selector, colName) + selector = append(selector, "`"+colName+"`") } selectorStr := strings.Join(selector, ",") //nolint:gosec - query := fmt.Sprintf("INSERT INTO %s(%s) SELECT * FROM s3('%s','%s','%s', 'Avro')", - config.DestinationTableIdentifier, selectorStr, avroFileUrl, + query := fmt.Sprintf("INSERT INTO %s(%s) SELECT %s FROM s3('%s','%s','%s', 'Avro')", + config.DestinationTableIdentifier, selectorStr, selectorStr, avroFileUrl, s.connector.creds.AccessKeyID, s.connector.creds.SecretAccessKey) _, err = s.connector.database.ExecContext(ctx, query) From 6b47812b9ff398a6018719b503c7478867a83ce9 Mon Sep 17 00:00:00 2001 From: Kunal Gupta <39487888+iamKunalGupta@users.noreply.github.com> Date: Thu, 7 Mar 2024 05:01:07 +0530 Subject: [PATCH 11/13] fix(telemetry): issue with aws sns (#1444) `subject[:100]` is byte based, not character based, causing AWS to reject control characters Properly limit to 100 characters, rather than 100 bytes --- flow/shared/telemetry/sns_message_sender.go | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/flow/shared/telemetry/sns_message_sender.go b/flow/shared/telemetry/sns_message_sender.go index 42bdd026a7..67cf3eebae 100644 --- a/flow/shared/telemetry/sns_message_sender.go +++ b/flow/shared/telemetry/sns_message_sender.go @@ -5,6 +5,7 @@ import ( "crypto/sha256" "encoding/hex" "strings" + "unicode" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" @@ -37,7 +38,19 @@ func (s *SNSMessageSenderImpl) SendMessage(ctx context.Context, subject string, h := sha256.New() h.Write([]byte(deduplicationString)) deduplicationHash := hex.EncodeToString(h.Sum(nil)) - + // AWS SNS Subject constraints + var messageSubjectBuilder strings.Builder + maxSubjectSize := 99 + for currentLength, char := range subject { + if unicode.IsPrint(char) { + messageSubjectBuilder.WriteRune(char) + } else { + messageSubjectBuilder.WriteRune(' ') + } + if currentLength > maxSubjectSize { + break + } + } publish, err := s.client.Publish(ctx, &sns.PublishInput{ Message: aws.String(body), MessageAttributes: map[string]types.MessageAttributeValue{ @@ -66,7 +79,7 @@ func (s *SNSMessageSenderImpl) SendMessage(ctx context.Context, subject string, StringValue: aws.String(deduplicationHash), }, }, - Subject: aws.String(subject[:100]), + Subject: aws.String(messageSubjectBuilder.String()), TopicArn: aws.String(s.topic), }) if err != nil { From b2135280ae797fdafa80ea88d556a72004f37931 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Thu, 7 Mar 2024 14:02:07 +0000 Subject: [PATCH 12/13] POC: generic Test_Simple_Flow (#1440) Lay out basic design for generic connector testing, replacing `Test_Simple_Flow` with a single definition used for PG/SF/BQ More can be ported in followup PRs, so when new connectors are added they only implement `GenericSuite` to have e2e tests Also fix hstore comparison so it works with Snowflake --- .github/workflows/flow.yml | 4 +- .../postgres/postgres_schema_delta_test.go | 40 ++--- flow/e2e/bigquery/bigquery.go | 102 ++++++++++++ flow/e2e/bigquery/peer_flow_bq_test.go | 149 +---------------- flow/e2e/bigquery/qrep_flow_bq_test.go | 11 +- flow/e2e/congen.go | 36 ++-- flow/e2e/generic/peer_flow_test.go | 82 +++++++++ flow/e2e/postgres/peer_flow_pg_test.go | 48 +----- flow/e2e/postgres/postgres.go | 79 +++++++++ flow/e2e/postgres/qrep_flow_pg_test.go | 51 +----- flow/e2e/s3/qrep_flow_s3_test.go | 30 ++-- flow/e2e/snowflake/peer_flow_sf_test.go | 155 +----------------- flow/e2e/snowflake/snowflake.go | 110 +++++++++++++ .../snowflake/snowflake_schema_delta_test.go | 10 +- .../e2e/sqlserver/qrep_flow_sqlserver_test.go | 24 +-- flow/e2e/test_utils.go | 37 ++++- flow/e2eshared/e2eshared.go | 8 +- flow/model/qvalue/qvalue.go | 6 +- flow/workflows/cdc_flow.go | 1 - flow/workflows/setup_flow.go | 2 +- 20 files changed, 495 insertions(+), 490 deletions(-) create mode 100644 flow/e2e/bigquery/bigquery.go create mode 100644 flow/e2e/generic/peer_flow_test.go create mode 100644 flow/e2e/postgres/postgres.go create mode 100644 flow/e2e/snowflake/snowflake.go diff --git a/.github/workflows/flow.yml b/.github/workflows/flow.yml index 5f6899b15b..38bc636f51 100644 --- a/.github/workflows/flow.yml +++ b/.github/workflows/flow.yml @@ -12,7 +12,7 @@ jobs: matrix: runner: [ubicloud-standard-16-ubuntu-2204-arm] runs-on: ${{ matrix.runner }} - timeout-minutes: 40 + timeout-minutes: 30 services: catalog: image: imresamu/postgis:15-3.4-alpine @@ -96,7 +96,7 @@ jobs: temporal operator search-attribute create --name MirrorName --type Text --namespace default ./peer-flow worker & ./peer-flow snapshot-worker & - go test -p 32 ./... -timeout 1200s + go test -p 32 ./... -timeout 900s working-directory: ./flow env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} diff --git a/flow/connectors/postgres/postgres_schema_delta_test.go b/flow/connectors/postgres/postgres_schema_delta_test.go index c059c36836..e5603b02d9 100644 --- a/flow/connectors/postgres/postgres_schema_delta_test.go +++ b/flow/connectors/postgres/postgres_schema_delta_test.go @@ -196,23 +196,25 @@ func (s PostgresSchemaDeltaTestSuite) TestAddDropWhitespaceColumnNames() { } func TestPostgresSchemaDeltaTestSuite(t *testing.T) { - e2eshared.RunSuite(t, SetupSuite, func(s PostgresSchemaDeltaTestSuite) { - teardownTx, err := s.connector.conn.Begin(context.Background()) - require.NoError(s.t, err) - defer func() { - err := teardownTx.Rollback(context.Background()) - if err != pgx.ErrTxClosed { - require.NoError(s.t, err) - } - }() - _, err = teardownTx.Exec(context.Background(), fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", - s.schema)) - require.NoError(s.t, err) - err = teardownTx.Commit(context.Background()) - require.NoError(s.t, err) - - require.NoError(s.t, s.connector.ConnectionActive(context.Background())) - require.NoError(s.t, s.connector.Close()) - require.Error(s.t, s.connector.ConnectionActive(context.Background())) - }) + e2eshared.RunSuite(t, SetupSuite) +} + +func (s PostgresSchemaDeltaTestSuite) Teardown() { + teardownTx, err := s.connector.conn.Begin(context.Background()) + require.NoError(s.t, err) + defer func() { + err := teardownTx.Rollback(context.Background()) + if err != pgx.ErrTxClosed { + require.NoError(s.t, err) + } + }() + _, err = teardownTx.Exec(context.Background(), fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", + s.schema)) + require.NoError(s.t, err) + err = teardownTx.Commit(context.Background()) + require.NoError(s.t, err) + + require.NoError(s.t, s.connector.ConnectionActive(context.Background())) + require.NoError(s.t, s.connector.Close()) + require.Error(s.t, s.connector.ConnectionActive(context.Background())) } diff --git a/flow/e2e/bigquery/bigquery.go b/flow/e2e/bigquery/bigquery.go new file mode 100644 index 0000000000..73f5c38d6e --- /dev/null +++ b/flow/e2e/bigquery/bigquery.go @@ -0,0 +1,102 @@ +package e2e_bigquery + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/jackc/pgx/v5" + + connpostgres "github.com/PeerDB-io/peer-flow/connectors/postgres" + "github.com/PeerDB-io/peer-flow/e2e" + "github.com/PeerDB-io/peer-flow/generated/protos" + "github.com/PeerDB-io/peer-flow/model" + "github.com/PeerDB-io/peer-flow/shared" +) + +type PeerFlowE2ETestSuiteBQ struct { + t *testing.T + + bqSuffix string + conn *connpostgres.PostgresConnector + bqHelper *BigQueryTestHelper +} + +func (s PeerFlowE2ETestSuiteBQ) T() *testing.T { + return s.t +} + +func (s PeerFlowE2ETestSuiteBQ) Conn() *pgx.Conn { + return s.conn.Conn() +} + +func (s PeerFlowE2ETestSuiteBQ) Connector() *connpostgres.PostgresConnector { + return s.conn +} + +func (s PeerFlowE2ETestSuiteBQ) Suffix() string { + return s.bqSuffix +} + +func (s PeerFlowE2ETestSuiteBQ) Peer() *protos.Peer { + return s.bqHelper.Peer +} + +func (s PeerFlowE2ETestSuiteBQ) DestinationTable(table string) string { + return table +} + +func (s PeerFlowE2ETestSuiteBQ) GetRows(tableName string, colsString string) (*model.QRecordBatch, error) { + s.t.Helper() + qualifiedTableName := fmt.Sprintf("`%s.%s`", s.bqHelper.Config.DatasetId, tableName) + bqSelQuery := fmt.Sprintf("SELECT %s FROM %s ORDER BY id", colsString, qualifiedTableName) + s.t.Logf("running query on bigquery: %s", bqSelQuery) + return s.bqHelper.ExecuteAndProcessQuery(bqSelQuery) +} + +func (s PeerFlowE2ETestSuiteBQ) GetRowsWhere(tableName string, colsString string, where string) (*model.QRecordBatch, error) { + s.t.Helper() + qualifiedTableName := fmt.Sprintf("`%s.%s`", s.bqHelper.Config.DatasetId, tableName) + bqSelQuery := fmt.Sprintf("SELECT %s FROM %s WHERE %s ORDER BY id", colsString, qualifiedTableName, where) + s.t.Logf("running query on bigquery: %s", bqSelQuery) + return s.bqHelper.ExecuteAndProcessQuery(bqSelQuery) +} + +func (s PeerFlowE2ETestSuiteBQ) Teardown() { + e2e.TearDownPostgres(s) + + err := s.bqHelper.DropDataset(s.bqHelper.Config.DatasetId) + if err != nil { + s.t.Fatalf("failed to tear down bigquery: %v", err) + } +} + +func SetupSuite(t *testing.T) PeerFlowE2ETestSuiteBQ { + t.Helper() + + suffix := shared.RandomString(8) + tsSuffix := time.Now().Format("20060102150405") + bqSuffix := fmt.Sprintf("bq_%s_%s", strings.ToLower(suffix), tsSuffix) + conn, err := e2e.SetupPostgres(t, bqSuffix) + if err != nil || conn == nil { + t.Fatalf("failed to setup postgres: %v", err) + } + + bqHelper, err := NewBigQueryTestHelper() + if err != nil { + t.Fatalf("Failed to create helper: %v", err) + } + + err = bqHelper.RecreateDataset() + if err != nil { + t.Fatalf("Failed to recreate dataset: %v", err) + } + + return PeerFlowE2ETestSuiteBQ{ + t: t, + bqSuffix: bqSuffix, + conn: conn, + bqHelper: bqHelper, + } +} diff --git a/flow/e2e/bigquery/peer_flow_bq_test.go b/flow/e2e/bigquery/peer_flow_bq_test.go index 14e95deda2..ec28b5f97b 100644 --- a/flow/e2e/bigquery/peer_flow_bq_test.go +++ b/flow/e2e/bigquery/peer_flow_bq_test.go @@ -4,76 +4,23 @@ import ( "context" "errors" "fmt" - "strings" "testing" "time" "github.com/jackc/pgerrcode" - "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgconn" - "github.com/joho/godotenv" "github.com/stretchr/testify/require" - connpostgres "github.com/PeerDB-io/peer-flow/connectors/postgres" "github.com/PeerDB-io/peer-flow/connectors/utils" "github.com/PeerDB-io/peer-flow/e2e" "github.com/PeerDB-io/peer-flow/e2eshared" "github.com/PeerDB-io/peer-flow/generated/protos" - "github.com/PeerDB-io/peer-flow/model" "github.com/PeerDB-io/peer-flow/model/qvalue" - "github.com/PeerDB-io/peer-flow/shared" peerflow "github.com/PeerDB-io/peer-flow/workflows" ) -type PeerFlowE2ETestSuiteBQ struct { - t *testing.T - - bqSuffix string - conn *connpostgres.PostgresConnector - bqHelper *BigQueryTestHelper -} - -func (s PeerFlowE2ETestSuiteBQ) T() *testing.T { - return s.t -} - -func (s PeerFlowE2ETestSuiteBQ) Conn() *pgx.Conn { - return s.conn.Conn() -} - -func (s PeerFlowE2ETestSuiteBQ) Connector() *connpostgres.PostgresConnector { - return s.conn -} - -func (s PeerFlowE2ETestSuiteBQ) Suffix() string { - return s.bqSuffix -} - -func (s PeerFlowE2ETestSuiteBQ) GetRows(tableName string, colsString string) (*model.QRecordBatch, error) { - s.t.Helper() - qualifiedTableName := fmt.Sprintf("`%s.%s`", s.bqHelper.Config.DatasetId, tableName) - bqSelQuery := fmt.Sprintf("SELECT %s FROM %s ORDER BY id", colsString, qualifiedTableName) - s.t.Logf("running query on bigquery: %s", bqSelQuery) - return s.bqHelper.ExecuteAndProcessQuery(bqSelQuery) -} - -func (s PeerFlowE2ETestSuiteBQ) GetRowsWhere(tableName string, colsString string, where string) (*model.QRecordBatch, error) { - s.t.Helper() - qualifiedTableName := fmt.Sprintf("`%s.%s`", s.bqHelper.Config.DatasetId, tableName) - bqSelQuery := fmt.Sprintf("SELECT %s FROM %s WHERE %s ORDER BY id", colsString, qualifiedTableName, where) - s.t.Logf("running query on bigquery: %s", bqSelQuery) - return s.bqHelper.ExecuteAndProcessQuery(bqSelQuery) -} - func TestPeerFlowE2ETestSuiteBQ(t *testing.T) { - e2eshared.RunSuite(t, setupSuite, func(s PeerFlowE2ETestSuiteBQ) { - e2e.TearDownPostgres(s) - - err := s.bqHelper.DropDataset(s.bqHelper.Config.DatasetId) - if err != nil { - s.t.Fatalf("failed to tear down bigquery: %v", err) - } - }) + e2eshared.RunSuite(t, SetupSuite) } func (s PeerFlowE2ETestSuiteBQ) checkJSONValue(tableName, colName, fieldName, value string) error { @@ -147,52 +94,6 @@ func (s *PeerFlowE2ETestSuiteBQ) checkPeerdbColumns(dstQualified string, softDel return nil } -// setupBigQuery sets up the bigquery connection. -func setupBigQuery(t *testing.T) *BigQueryTestHelper { - t.Helper() - - bqHelper, err := NewBigQueryTestHelper() - if err != nil { - t.Fatalf("Failed to create helper: %v", err) - } - - err = bqHelper.RecreateDataset() - if err != nil { - t.Fatalf("Failed to recreate dataset: %v", err) - } - - return bqHelper -} - -// Implement SetupAllSuite interface to setup the test suite -func setupSuite(t *testing.T) PeerFlowE2ETestSuiteBQ { - t.Helper() - - err := godotenv.Load() - if err != nil { - // it's okay if the .env file is not present - // we will use the default values - t.Log("Unable to load .env file, using default values from env") - } - - suffix := shared.RandomString(8) - tsSuffix := time.Now().Format("20060102150405") - bqSuffix := fmt.Sprintf("bq_%s_%s", strings.ToLower(suffix), tsSuffix) - conn, err := e2e.SetupPostgres(t, bqSuffix) - if err != nil || conn == nil { - t.Fatalf("failed to setup postgres: %v", err) - } - - bq := setupBigQuery(t) - - return PeerFlowE2ETestSuiteBQ{ - t: t, - bqSuffix: bqSuffix, - conn: conn, - bqHelper: bq, - } -} - func (s PeerFlowE2ETestSuiteBQ) Test_Invalid_Connection_Config() { tc := e2e.NewTemporalClient(s.t) @@ -268,54 +169,6 @@ func (s PeerFlowE2ETestSuiteBQ) Test_Char_ColType_Error() { e2e.RequireEnvCanceled(s.t, env) } -// Test_Complete_Simple_Flow_BQ tests a complete flow with data in the source table. -// The test inserts 10 rows into the source table and verifies that the data is -// correctly synced to the destination table after sync flow completes. -func (s PeerFlowE2ETestSuiteBQ) Test_Complete_Simple_Flow_BQ() { - tc := e2e.NewTemporalClient(s.t) - - srcTableName := s.attachSchemaSuffix("test_simple_flow_bq") - dstTableName := "test_simple_flow_bq" - - _, err := s.Conn().Exec(context.Background(), fmt.Sprintf(` - CREATE TABLE IF NOT EXISTS %s ( - id SERIAL PRIMARY KEY, - key TEXT NOT NULL, - value TEXT NOT NULL - ); - `, srcTableName)) - require.NoError(s.t, err) - - connectionGen := e2e.FlowConnectionGenerationConfig{ - FlowJobName: s.attachSuffix("test_complete_simple_flow"), - TableNameMapping: map[string]string{srcTableName: dstTableName}, - Destination: s.bqHelper.Peer, - CdcStagingPath: "", - } - - flowConnConfig := connectionGen.GenerateFlowConnectionConfigs() - flowConnConfig.MaxBatchSize = 100 - - env := e2e.ExecutePeerflow(tc, peerflow.CDCFlowWorkflow, flowConnConfig, nil) - e2e.SetupCDCFlowStatusQuery(s.t, env, connectionGen) - - // insert 10 rows into the source table - for i := range 10 { - testKey := fmt.Sprintf("test_key_%d", i) - testValue := fmt.Sprintf("test_value_%d", i) - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s(key, value) VALUES ($1, $2) - `, srcTableName), testKey, testValue) - e2e.EnvNoError(s.t, env, err) - } - s.t.Log("Inserted 10 rows into the source table") - - e2e.EnvWaitForEqualTables(env, s, "normalize inserts", dstTableName, "id,key,value") - - env.Cancel() - e2e.RequireEnvCanceled(s.t, env) -} - func (s PeerFlowE2ETestSuiteBQ) Test_Toast_BQ() { tc := e2e.NewTemporalClient(s.t) diff --git a/flow/e2e/bigquery/qrep_flow_bq_test.go b/flow/e2e/bigquery/qrep_flow_bq_test.go index c7f6a5c7f8..ae0841f153 100644 --- a/flow/e2e/bigquery/qrep_flow_bq_test.go +++ b/flow/e2e/bigquery/qrep_flow_bq_test.go @@ -45,14 +45,9 @@ func (s PeerFlowE2ETestSuiteBQ) setupTimeTable(tableName string) { rows = append(rows, row) _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO e2e_test_%s.%s ( - watermark_ts, - mytimestamp, - mytztimestamp, - medieval, - mybaddate, - mydate - ) VALUES %s; + INSERT INTO e2e_test_%s.%s ( + watermark_ts, mytimestamp, mytztimestamp, medieval, mybaddate, mydate + ) VALUES %s; `, s.bqSuffix, tableName, strings.Join(rows, ","))) require.NoError(s.t, err) } diff --git a/flow/e2e/congen.go b/flow/e2e/congen.go index 82387c9ff3..2fd3c180e5 100644 --- a/flow/e2e/congen.go +++ b/flow/e2e/congen.go @@ -162,32 +162,36 @@ func GeneratePostgresPeer() *protos.Peer { type FlowConnectionGenerationConfig struct { FlowJobName string + TableMappings []*protos.TableMapping TableNameMapping map[string]string Destination *protos.Peer CdcStagingPath string SoftDelete bool } -// GenerateSnowflakePeer generates a snowflake peer config for testing. -func GenerateSnowflakePeer(snowflakeConfig *protos.SnowflakeConfig) (*protos.Peer, error) { - ret := &protos.Peer{} - ret.Name = "test_snowflake_peer" - ret.Type = protos.DBType_SNOWFLAKE - - ret.Config = &protos.Peer_SnowflakeConfig{ - SnowflakeConfig: snowflakeConfig, +func TableMappings(s GenericSuite, tables ...string) []*protos.TableMapping { + if len(tables)&1 != 0 { + panic("must receive even number of table names") } - - return ret, nil + tm := make([]*protos.TableMapping, 0, len(tables)/2) + for i := 0; i < len(tables); i += 2 { + tm = append(tm, &protos.TableMapping{ + SourceTableIdentifier: AttachSchema(s, tables[i]), + DestinationTableIdentifier: s.DestinationTable(tables[i+1]), + }) + } + return tm } func (c *FlowConnectionGenerationConfig) GenerateFlowConnectionConfigs() *protos.FlowConnectionConfigs { - tblMappings := []*protos.TableMapping{} - for k, v := range c.TableNameMapping { - tblMappings = append(tblMappings, &protos.TableMapping{ - SourceTableIdentifier: k, - DestinationTableIdentifier: v, - }) + tblMappings := c.TableMappings + if tblMappings == nil { + for k, v := range c.TableNameMapping { + tblMappings = append(tblMappings, &protos.TableMapping{ + SourceTableIdentifier: k, + DestinationTableIdentifier: v, + }) + } } ret := &protos.FlowConnectionConfigs{ diff --git a/flow/e2e/generic/peer_flow_test.go b/flow/e2e/generic/peer_flow_test.go new file mode 100644 index 0000000000..20c5847df4 --- /dev/null +++ b/flow/e2e/generic/peer_flow_test.go @@ -0,0 +1,82 @@ +package e2e_generic + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/PeerDB-io/peer-flow/e2e" + "github.com/PeerDB-io/peer-flow/e2e/bigquery" + "github.com/PeerDB-io/peer-flow/e2e/postgres" + "github.com/PeerDB-io/peer-flow/e2e/snowflake" + "github.com/PeerDB-io/peer-flow/e2eshared" + peerflow "github.com/PeerDB-io/peer-flow/workflows" +) + +func TestGenericPG(t *testing.T) { + e2eshared.RunSuite(t, SetupGenericSuite(e2e_postgres.SetupSuite)) +} + +func TestGenericSF(t *testing.T) { + e2eshared.RunSuite(t, SetupGenericSuite(e2e_snowflake.SetupSuite)) +} + +func TestGenericBQ(t *testing.T) { + e2eshared.RunSuite(t, SetupGenericSuite(e2e_bigquery.SetupSuite)) +} + +type Generic struct { + e2e.GenericSuite +} + +func SetupGenericSuite[T e2e.GenericSuite](f func(t *testing.T) T) func(t *testing.T) Generic { + return func(t *testing.T) Generic { + t.Helper() + return Generic{f(t)} + } +} + +func (s Generic) Test_Simple_Flow() { + t := s.T() + srcTable := "test_simple" + dstTable := "test_simple_dst" + srcSchemaTable := e2e.AttachSchema(s, srcTable) + + _, err := s.Connector().Conn().Exec(context.Background(), fmt.Sprintf(` + CREATE TABLE IF NOT EXISTS %s ( + id SERIAL PRIMARY KEY, + key TEXT NOT NULL, + value TEXT NOT NULL, + myh HSTORE NOT NULL + ); + `, srcSchemaTable)) + require.NoError(t, err) + + connectionGen := e2e.FlowConnectionGenerationConfig{ + FlowJobName: e2e.AddSuffix(s, "test_simple"), + TableMappings: e2e.TableMappings(s, srcTable, dstTable), + Destination: s.Peer(), + } + flowConnConfig := connectionGen.GenerateFlowConnectionConfigs() + + tc := e2e.NewTemporalClient(t) + env := e2e.ExecutePeerflow(tc, peerflow.CDCFlowWorkflow, flowConnConfig, nil) + + e2e.SetupCDCFlowStatusQuery(t, env, connectionGen) + // insert 10 rows into the source table + for i := range 10 { + testKey := fmt.Sprintf("test_key_%d", i) + testValue := fmt.Sprintf("test_value_%d", i) + _, err = s.Connector().Conn().Exec(context.Background(), fmt.Sprintf(` + INSERT INTO %s(key, value, myh) VALUES ($1, $2, '"a"=>"b"') + `, srcSchemaTable), testKey, testValue) + e2e.EnvNoError(t, env, err) + } + t.Log("Inserted 10 rows into the source table") + + e2e.EnvWaitForEqualTablesWithNames(env, s, "normalizing 10 rows", srcTable, dstTable, `id,key,value,myh`) + env.Cancel() + e2e.RequireEnvCanceled(t, env) +} diff --git a/flow/e2e/postgres/peer_flow_pg_test.go b/flow/e2e/postgres/peer_flow_pg_test.go index fa55bbb3fc..2e69376b01 100644 --- a/flow/e2e/postgres/peer_flow_pg_test.go +++ b/flow/e2e/postgres/peer_flow_pg_test.go @@ -77,52 +77,6 @@ func (s PeerFlowE2ETestSuitePG) WaitForSchema( }) } -func (s PeerFlowE2ETestSuitePG) Test_Simple_Flow_PG() { - srcTableName := s.attachSchemaSuffix("test_simple_flow") - dstTableName := s.attachSchemaSuffix("test_simple_flow_dst") - - _, err := s.Conn().Exec(context.Background(), fmt.Sprintf(` - CREATE TABLE IF NOT EXISTS %s ( - id SERIAL PRIMARY KEY, - key TEXT NOT NULL, - value TEXT NOT NULL, - myh HSTORE NOT NULL - ); - `, srcTableName)) - require.NoError(s.t, err) - - connectionGen := e2e.FlowConnectionGenerationConfig{ - FlowJobName: s.attachSuffix("test_simple_flow"), - TableNameMapping: map[string]string{srcTableName: dstTableName}, - Destination: s.peer, - } - - flowConnConfig := connectionGen.GenerateFlowConnectionConfigs() - flowConnConfig.MaxBatchSize = 100 - - tc := e2e.NewTemporalClient(s.t) - env := e2e.ExecutePeerflow(tc, peerflow.CDCFlowWorkflow, flowConnConfig, nil) - - e2e.SetupCDCFlowStatusQuery(s.t, env, connectionGen) - // insert 10 rows into the source table - for i := range 10 { - testKey := fmt.Sprintf("test_key_%d", i) - testValue := fmt.Sprintf("test_value_%d", i) - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s(key, value, myh) VALUES ($1, $2, '"a"=>"b"') - `, srcTableName), testKey, testValue) - e2e.EnvNoError(s.t, env, err) - } - s.t.Log("Inserted 10 rows into the source table") - - e2e.EnvWaitFor(s.t, env, 3*time.Minute, "normalize 10 rows", func() bool { - return s.comparePGTables(srcTableName, dstTableName, "id,key,value") == nil - }) - env.Cancel() - - e2e.RequireEnvCanceled(s.t, env) -} - func (s PeerFlowE2ETestSuitePG) Test_Geospatial_PG() { srcTableName := s.attachSchemaSuffix("test_geospatial_pg") dstTableName := s.attachSchemaSuffix("test_geospatial_pg_dst") @@ -1097,7 +1051,7 @@ func (s PeerFlowE2ETestSuitePG) Test_ContinueAsNew() { } s.t.Log("Inserted 144 rows into the source table") - e2e.EnvWaitFor(s.t, env, 3*time.Minute, "normalize 72 syncs", func() bool { + e2e.EnvWaitFor(s.t, env, 4*time.Minute, "normalize 72 syncs", func() bool { return s.comparePGTables(srcTableName, dstTableName, "id,key,value") == nil }) env.Cancel() diff --git a/flow/e2e/postgres/postgres.go b/flow/e2e/postgres/postgres.go new file mode 100644 index 0000000000..23ca778c8d --- /dev/null +++ b/flow/e2e/postgres/postgres.go @@ -0,0 +1,79 @@ +package e2e_postgres + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/jackc/pgx/v5" + "github.com/stretchr/testify/require" + + connpostgres "github.com/PeerDB-io/peer-flow/connectors/postgres" + "github.com/PeerDB-io/peer-flow/e2e" + "github.com/PeerDB-io/peer-flow/generated/protos" + "github.com/PeerDB-io/peer-flow/model" + "github.com/PeerDB-io/peer-flow/shared" +) + +type PeerFlowE2ETestSuitePG struct { + t *testing.T + + conn *connpostgres.PostgresConnector + peer *protos.Peer + suffix string +} + +func (s PeerFlowE2ETestSuitePG) T() *testing.T { + return s.t +} + +func (s PeerFlowE2ETestSuitePG) Connector() *connpostgres.PostgresConnector { + return s.conn +} + +func (s PeerFlowE2ETestSuitePG) Conn() *pgx.Conn { + return s.conn.Conn() +} + +func (s PeerFlowE2ETestSuitePG) Suffix() string { + return s.suffix +} + +func (s PeerFlowE2ETestSuitePG) Peer() *protos.Peer { + return s.peer +} + +func (s PeerFlowE2ETestSuitePG) DestinationTable(table string) string { + return e2e.AttachSchema(s, table) +} + +func (s PeerFlowE2ETestSuitePG) GetRows(table string, cols string) (*model.QRecordBatch, error) { + s.t.Helper() + pgQueryExecutor := s.conn.NewQRepQueryExecutor("testflow", "testpart") + pgQueryExecutor.SetTestEnv(true) + + return pgQueryExecutor.ExecuteAndProcessQuery( + context.Background(), + fmt.Sprintf(`SELECT %s FROM e2e_test_%s.%s ORDER BY id`, cols, s.suffix, connpostgres.QuoteIdentifier(table)), + ) +} + +func SetupSuite(t *testing.T) PeerFlowE2ETestSuitePG { + t.Helper() + + suffix := "pg_" + strings.ToLower(shared.RandomString(8)) + conn, err := e2e.SetupPostgres(t, suffix) + require.NoError(t, err, "failed to setup postgres") + + return PeerFlowE2ETestSuitePG{ + t: t, + conn: conn, + peer: e2e.GeneratePostgresPeer(), + suffix: suffix, + } +} + +func (s PeerFlowE2ETestSuitePG) Teardown() { + e2e.TearDownPostgres(s) +} diff --git a/flow/e2e/postgres/qrep_flow_pg_test.go b/flow/e2e/postgres/qrep_flow_pg_test.go index fb49d3d242..abb7867d24 100644 --- a/flow/e2e/postgres/qrep_flow_pg_test.go +++ b/flow/e2e/postgres/qrep_flow_pg_test.go @@ -10,7 +10,6 @@ import ( "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgtype" - "github.com/joho/godotenv" "github.com/stretchr/testify/require" connpostgres "github.com/PeerDB-io/peer-flow/connectors/postgres" @@ -21,56 +20,8 @@ import ( "github.com/PeerDB-io/peer-flow/shared" ) -type PeerFlowE2ETestSuitePG struct { - t *testing.T - - conn *connpostgres.PostgresConnector - peer *protos.Peer - suffix string -} - -func (s PeerFlowE2ETestSuitePG) T() *testing.T { - return s.t -} - -func (s PeerFlowE2ETestSuitePG) Conn() *pgx.Conn { - return s.conn.Conn() -} - -func (s PeerFlowE2ETestSuitePG) Connector() *connpostgres.PostgresConnector { - return s.conn -} - -func (s PeerFlowE2ETestSuitePG) Suffix() string { - return s.suffix -} - func TestPeerFlowE2ETestSuitePG(t *testing.T) { - e2eshared.RunSuite(t, SetupSuite, func(s PeerFlowE2ETestSuitePG) { - e2e.TearDownPostgres(s) - }) -} - -func SetupSuite(t *testing.T) PeerFlowE2ETestSuitePG { - t.Helper() - - err := godotenv.Load() - if err != nil { - // it's okay if the .env file is not present - // we will use the default values - t.Log("Unable to load .env file, using default values from env") - } - - suffix := "pg_" + strings.ToLower(shared.RandomString(8)) - conn, err := e2e.SetupPostgres(t, suffix) - require.NoError(t, err, "failed to setup postgres") - - return PeerFlowE2ETestSuitePG{ - t: t, - conn: conn, - peer: e2e.GeneratePostgresPeer(), - suffix: suffix, - } + e2eshared.RunSuite(t, SetupSuite) } func (s PeerFlowE2ETestSuitePG) setupSourceTable(tableName string, rowCount int) { diff --git a/flow/e2e/s3/qrep_flow_s3_test.go b/flow/e2e/s3/qrep_flow_s3_test.go index 240f3a78b9..d59378d52c 100644 --- a/flow/e2e/s3/qrep_flow_s3_test.go +++ b/flow/e2e/s3/qrep_flow_s3_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/joho/godotenv" "github.com/stretchr/testify/require" connpostgres "github.com/PeerDB-io/peer-flow/connectors/postgres" @@ -36,21 +35,12 @@ func (s PeerFlowE2ETestSuiteS3) Suffix() string { return s.suffix } -func tearDownSuite(s PeerFlowE2ETestSuiteS3) { - e2e.TearDownPostgres(s) - - err := s.s3Helper.CleanUp(context.Background()) - if err != nil { - require.Fail(s.t, "failed to clean up s3", err) - } -} - func TestPeerFlowE2ETestSuiteS3(t *testing.T) { - e2eshared.RunSuite(t, SetupSuiteS3, tearDownSuite) + e2eshared.RunSuite(t, SetupSuiteS3) } func TestPeerFlowE2ETestSuiteGCS(t *testing.T) { - e2eshared.RunSuite(t, SetupSuiteGCS, tearDownSuite) + e2eshared.RunSuite(t, SetupSuiteGCS) } func (s PeerFlowE2ETestSuiteS3) setupSourceTable(tableName string, rowCount int) { @@ -63,13 +53,6 @@ func (s PeerFlowE2ETestSuiteS3) setupSourceTable(tableName string, rowCount int) func setupSuite(t *testing.T, gcs bool) PeerFlowE2ETestSuiteS3 { t.Helper() - err := godotenv.Load() - if err != nil { - // it's okay if the .env file is not present - // we will use the default values - t.Log("Unable to load .env file, using default values from env") - } - suffix := "s3_" + strings.ToLower(shared.RandomString(8)) conn, err := e2e.SetupPostgres(t, suffix) if err != nil || conn == nil { @@ -89,6 +72,15 @@ func setupSuite(t *testing.T, gcs bool) PeerFlowE2ETestSuiteS3 { } } +func (s PeerFlowE2ETestSuiteS3) Teardown() { + e2e.TearDownPostgres(s) + + err := s.s3Helper.CleanUp(context.Background()) + if err != nil { + require.Fail(s.t, "failed to clean up s3", err) + } +} + func SetupSuiteS3(t *testing.T) PeerFlowE2ETestSuiteS3 { t.Helper() return setupSuite(t, false) diff --git a/flow/e2e/snowflake/peer_flow_sf_test.go b/flow/e2e/snowflake/peer_flow_sf_test.go index 9eaf491e47..525d2c7256 100644 --- a/flow/e2e/snowflake/peer_flow_sf_test.go +++ b/flow/e2e/snowflake/peer_flow_sf_test.go @@ -9,176 +9,27 @@ import ( "time" "github.com/jackc/pgerrcode" - "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgconn" - "github.com/joho/godotenv" "github.com/stretchr/testify/require" - connpostgres "github.com/PeerDB-io/peer-flow/connectors/postgres" - connsnowflake "github.com/PeerDB-io/peer-flow/connectors/snowflake" "github.com/PeerDB-io/peer-flow/connectors/utils" "github.com/PeerDB-io/peer-flow/e2e" "github.com/PeerDB-io/peer-flow/e2eshared" "github.com/PeerDB-io/peer-flow/generated/protos" - "github.com/PeerDB-io/peer-flow/model" "github.com/PeerDB-io/peer-flow/model/qvalue" - "github.com/PeerDB-io/peer-flow/shared" peerflow "github.com/PeerDB-io/peer-flow/workflows" ) -type PeerFlowE2ETestSuiteSF struct { - t *testing.T - - pgSuffix string - conn *connpostgres.PostgresConnector - sfHelper *SnowflakeTestHelper - connector *connsnowflake.SnowflakeConnector -} - -func (s PeerFlowE2ETestSuiteSF) T() *testing.T { - return s.t -} - -func (s PeerFlowE2ETestSuiteSF) Connector() *connpostgres.PostgresConnector { - return s.conn -} - -func (s PeerFlowE2ETestSuiteSF) Conn() *pgx.Conn { - return s.Connector().Conn() -} - -func (s PeerFlowE2ETestSuiteSF) Suffix() string { - return s.pgSuffix -} - -func (s PeerFlowE2ETestSuiteSF) GetRows(tableName string, sfSelector string) (*model.QRecordBatch, error) { - s.t.Helper() - qualifiedTableName := fmt.Sprintf(`%s.%s.%s`, s.sfHelper.testDatabaseName, s.sfHelper.testSchemaName, tableName) - sfSelQuery := fmt.Sprintf(`SELECT %s FROM %s ORDER BY id`, sfSelector, qualifiedTableName) - s.t.Logf("running query on snowflake: %s", sfSelQuery) - return s.sfHelper.ExecuteAndProcessQuery(sfSelQuery) -} - func TestPeerFlowE2ETestSuiteSF(t *testing.T) { - e2eshared.RunSuite(t, SetupSuite, func(s PeerFlowE2ETestSuiteSF) { - e2e.TearDownPostgres(s) - - if s.sfHelper != nil { - err := s.sfHelper.Cleanup() - if err != nil { - s.t.Fatalf("failed to tear down Snowflake: %v", err) - } - } - - err := s.connector.Close() - if err != nil { - s.t.Fatalf("failed to close Snowflake connector: %v", err) - } - }) + e2eshared.RunSuite(t, SetupSuite) } func (s PeerFlowE2ETestSuiteSF) attachSchemaSuffix(tableName string) string { - return fmt.Sprintf("e2e_test_%s.%s", s.pgSuffix, tableName) + return e2e.AttachSchema(s, tableName) } func (s PeerFlowE2ETestSuiteSF) attachSuffix(input string) string { - return fmt.Sprintf("%s_%s", input, s.pgSuffix) -} - -func SetupSuite(t *testing.T) PeerFlowE2ETestSuiteSF { - t.Helper() - - err := godotenv.Load() - if err != nil { - // it's okay if the .env file is not present - // we will use the default values - t.Log("Unable to load .env file, using default values from env") - } - - suffix := shared.RandomString(8) - tsSuffix := time.Now().Format("20060102150405") - pgSuffix := fmt.Sprintf("sf_%s_%s", strings.ToLower(suffix), tsSuffix) - - conn, err := e2e.SetupPostgres(t, pgSuffix) - if err != nil || conn == nil { - t.Fatalf("failed to setup Postgres: %v", err) - } - - sfHelper, err := NewSnowflakeTestHelper() - if err != nil { - t.Fatalf("failed to setup Snowflake: %v", err) - } - - connector, err := connsnowflake.NewSnowflakeConnector( - context.Background(), - sfHelper.Config, - ) - require.NoError(t, err) - - suite := PeerFlowE2ETestSuiteSF{ - t: t, - pgSuffix: pgSuffix, - conn: conn, - sfHelper: sfHelper, - connector: connector, - } - - return suite -} - -func (s PeerFlowE2ETestSuiteSF) Test_Complete_Simple_Flow_SF() { - tc := e2e.NewTemporalClient(s.t) - - tableName := "test_simple_flow_sf" - srcTableName := s.attachSchemaSuffix(tableName) - dstTableName := fmt.Sprintf("%s.%s", s.sfHelper.testSchemaName, tableName) - - _, err := s.Conn().Exec(context.Background(), fmt.Sprintf(` - CREATE TABLE IF NOT EXISTS %s ( - id SERIAL PRIMARY KEY, - key TEXT NOT NULL, - value TEXT NOT NULL - ); - `, srcTableName)) - require.NoError(s.t, err) - - connectionGen := e2e.FlowConnectionGenerationConfig{ - FlowJobName: s.attachSuffix(tableName), - TableNameMapping: map[string]string{srcTableName: dstTableName}, - Destination: s.sfHelper.Peer, - } - - flowConnConfig := connectionGen.GenerateFlowConnectionConfigs() - flowConnConfig.MaxBatchSize = 100 - - env := e2e.ExecutePeerflow(tc, peerflow.CDCFlowWorkflow, flowConnConfig, nil) - // wait for PeerFlowStatusQuery to finish setup - // and then insert 20 rows into the source table - e2e.SetupCDCFlowStatusQuery(s.t, env, connectionGen) - // insert 20 rows into the source table - for i := range 20 { - testKey := fmt.Sprintf("test_key_%d", i) - testValue := fmt.Sprintf("test_value_%d", i) - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s (key, value) VALUES ($1, $2) - `, srcTableName), testKey, testValue) - e2e.EnvNoError(s.t, env, err) - } - s.t.Log("Inserted 20 rows into the source table") - e2e.EnvWaitForEqualTables(env, s, "normalize table", tableName, "id,key,value") - - env.Cancel() - - e2e.RequireEnvCanceled(s.t, env) - - // check the number of rows where _PEERDB_SYNCED_AT is newer than 5 mins ago - // it should match the count. - newerSyncedAtQuery := fmt.Sprintf(` - SELECT COUNT(*) FROM %s WHERE _PEERDB_SYNCED_AT > CURRENT_TIMESTAMP() - INTERVAL '30 MINUTE' - `, dstTableName) - numNewRows, err := s.sfHelper.RunIntQuery(newerSyncedAtQuery) - require.NoError(s.t, err) - require.Equal(s.t, 20, numNewRows) + return e2e.AddSuffix(s, input) } func (s PeerFlowE2ETestSuiteSF) Test_Flow_ReplicaIdentity_Index_No_Pkey() { diff --git a/flow/e2e/snowflake/snowflake.go b/flow/e2e/snowflake/snowflake.go new file mode 100644 index 0000000000..45132ef601 --- /dev/null +++ b/flow/e2e/snowflake/snowflake.go @@ -0,0 +1,110 @@ +package e2e_snowflake + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/jackc/pgx/v5" + "github.com/stretchr/testify/require" + + connpostgres "github.com/PeerDB-io/peer-flow/connectors/postgres" + connsnowflake "github.com/PeerDB-io/peer-flow/connectors/snowflake" + "github.com/PeerDB-io/peer-flow/e2e" + "github.com/PeerDB-io/peer-flow/generated/protos" + "github.com/PeerDB-io/peer-flow/model" + "github.com/PeerDB-io/peer-flow/shared" +) + +type PeerFlowE2ETestSuiteSF struct { + t *testing.T + + pgSuffix string + conn *connpostgres.PostgresConnector + sfHelper *SnowflakeTestHelper + connector *connsnowflake.SnowflakeConnector +} + +func (s PeerFlowE2ETestSuiteSF) T() *testing.T { + return s.t +} + +func (s PeerFlowE2ETestSuiteSF) Connector() *connpostgres.PostgresConnector { + return s.conn +} + +func (s PeerFlowE2ETestSuiteSF) Conn() *pgx.Conn { + return s.Connector().Conn() +} + +func (s PeerFlowE2ETestSuiteSF) Suffix() string { + return s.pgSuffix +} + +func (s PeerFlowE2ETestSuiteSF) Peer() *protos.Peer { + return s.sfHelper.Peer +} + +func (s PeerFlowE2ETestSuiteSF) DestinationTable(table string) string { + return fmt.Sprintf("%s.%s", s.sfHelper.testSchemaName, table) +} + +func (s PeerFlowE2ETestSuiteSF) GetRows(tableName string, sfSelector string) (*model.QRecordBatch, error) { + s.t.Helper() + qualifiedTableName := fmt.Sprintf(`%s.%s.%s`, s.sfHelper.testDatabaseName, s.sfHelper.testSchemaName, tableName) + sfSelQuery := fmt.Sprintf(`SELECT %s FROM %s ORDER BY id`, sfSelector, qualifiedTableName) + s.t.Logf("running query on snowflake: %s", sfSelQuery) + return s.sfHelper.ExecuteAndProcessQuery(sfSelQuery) +} + +func SetupSuite(t *testing.T) PeerFlowE2ETestSuiteSF { + t.Helper() + + suffix := shared.RandomString(8) + tsSuffix := time.Now().Format("20060102150405") + pgSuffix := fmt.Sprintf("sf_%s_%s", strings.ToLower(suffix), tsSuffix) + + conn, err := e2e.SetupPostgres(t, pgSuffix) + if err != nil || conn == nil { + t.Fatalf("failed to setup Postgres: %v", err) + } + + sfHelper, err := NewSnowflakeTestHelper() + if err != nil { + t.Fatalf("failed to setup Snowflake: %v", err) + } + + connector, err := connsnowflake.NewSnowflakeConnector( + context.Background(), + sfHelper.Config, + ) + require.NoError(t, err) + + suite := PeerFlowE2ETestSuiteSF{ + t: t, + pgSuffix: pgSuffix, + conn: conn, + sfHelper: sfHelper, + connector: connector, + } + + return suite +} + +func (s PeerFlowE2ETestSuiteSF) Teardown() { + e2e.TearDownPostgres(s) + + if s.sfHelper != nil { + err := s.sfHelper.Cleanup() + if err != nil { + s.t.Fatalf("failed to tear down Snowflake: %v", err) + } + } + + err := s.connector.Close() + if err != nil { + s.t.Fatalf("failed to close Snowflake connector: %v", err) + } +} diff --git a/flow/e2e/snowflake/snowflake_schema_delta_test.go b/flow/e2e/snowflake/snowflake_schema_delta_test.go index 68c70e56aa..d607e90451 100644 --- a/flow/e2e/snowflake/snowflake_schema_delta_test.go +++ b/flow/e2e/snowflake/snowflake_schema_delta_test.go @@ -304,9 +304,11 @@ func (s SnowflakeSchemaDeltaTestSuite) TestAddWhitespaceColumnNames() { require.Equal(s.t, expectedTableSchema, output.TableNameSchemaMapping[tableName]) } +func (s SnowflakeSchemaDeltaTestSuite) Teardown() { + require.NoError(s.t, s.sfTestHelper.Cleanup()) + require.NoError(s.t, s.connector.Close()) +} + func TestSnowflakeSchemaDeltaTestSuite(t *testing.T) { - e2eshared.RunSuite(t, setupSchemaDeltaSuite, func(s SnowflakeSchemaDeltaTestSuite) { - require.NoError(s.t, s.sfTestHelper.Cleanup()) - require.NoError(s.t, s.connector.Close()) - }) + e2eshared.RunSuite(t, setupSchemaDeltaSuite) } diff --git a/flow/e2e/sqlserver/qrep_flow_sqlserver_test.go b/flow/e2e/sqlserver/qrep_flow_sqlserver_test.go index 448d0f32b7..153fa07cc6 100644 --- a/flow/e2e/sqlserver/qrep_flow_sqlserver_test.go +++ b/flow/e2e/sqlserver/qrep_flow_sqlserver_test.go @@ -11,7 +11,6 @@ import ( "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgtype" - "github.com/joho/godotenv" "github.com/stretchr/testify/require" "github.com/PeerDB-io/peer-flow/connectors/postgres" @@ -48,26 +47,21 @@ func (s PeerFlowE2ETestSuiteSQLServer) Suffix() string { } func TestCDCFlowE2ETestSuiteSQLServer(t *testing.T) { - e2eshared.RunSuite(t, SetupSuite, func(s PeerFlowE2ETestSuiteSQLServer) { - e2e.TearDownPostgres(s) + e2eshared.RunSuite(t, SetupSuite) +} - if s.sqlsHelper != nil { - err := s.sqlsHelper.CleanUp() - require.NoError(s.t, err) - } - }) +func (s PeerFlowE2ETestSuiteSQLServer) Teardown() { + e2e.TearDownPostgres(s) + + if s.sqlsHelper != nil { + err := s.sqlsHelper.CleanUp() + require.NoError(s.t, err) + } } func SetupSuite(t *testing.T) PeerFlowE2ETestSuiteSQLServer { t.Helper() - err := godotenv.Load() - if err != nil { - // it's okay if the .env file is not present - // we will use the default values - t.Log("Unable to load .env file, using default values from env") - } - suffix := "sqls_" + strings.ToLower(shared.RandomString(8)) conn, err := e2e.SetupPostgres(t, suffix) if err != nil { diff --git a/flow/e2e/test_utils.go b/flow/e2e/test_utils.go index 8592eb406e..c018f32df2 100644 --- a/flow/e2e/test_utils.go +++ b/flow/e2e/test_utils.go @@ -15,6 +15,7 @@ import ( "github.com/jackc/pgerrcode" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgconn" + "github.com/joho/godotenv" "github.com/stretchr/testify/require" "go.temporal.io/api/enums/v1" "go.temporal.io/sdk/client" @@ -33,7 +34,14 @@ import ( peerflow "github.com/PeerDB-io/peer-flow/workflows" ) +func init() { + // it's okay if the .env file is not present + // we will use the default values + _ = godotenv.Load() +} + type Suite interface { + e2eshared.Suite T() *testing.T Connector() *connpostgres.PostgresConnector Suffix() string @@ -44,6 +52,20 @@ type RowSource interface { GetRows(table, cols string) (*model.QRecordBatch, error) } +type GenericSuite interface { + RowSource + Peer() *protos.Peer + DestinationTable(table string) string +} + +func AttachSchema(s Suite, table string) string { + return fmt.Sprintf("e2e_test_%s.%s", s.Suffix(), table) +} + +func AddSuffix(s Suite, str string) string { + return fmt.Sprintf("%s_%s", str, s.Suffix()) +} + // Helper function to assert errors in go routines running concurrent to workflows // This achieves two goals: // 1. cancel workflow to avoid waiting on goroutine which has failed @@ -129,11 +151,13 @@ func EnvWaitForEqualTablesWithNames( pgRows, err := GetPgRows(suite.Connector(), suite.Suffix(), srcTable, cols) if err != nil { + t.Log(err) return false } rows, err := suite.GetRows(dstTable, cols) if err != nil { + t.Log(err) return false } @@ -163,18 +187,21 @@ func SetupCDCFlowStatusQuery(t *testing.T, env WorkflowRun, connectionGen FlowCo for { time.Sleep(time.Second) counter++ - response, err := env.Query(shared.CDCFlowStateQuery, connectionGen.FlowJobName) + response, err := env.Query(shared.FlowStatusQuery, connectionGen.FlowJobName) if err == nil { - var state peerflow.CDCFlowWorkflowState - err = response.Get(&state) + var status protos.FlowStatus + err = response.Get(&status) if err != nil { t.Fatal(err) - } else if state.CurrentFlowStatus == protos.FlowStatus_STATUS_RUNNING { + } else if status == protos.FlowStatus_STATUS_RUNNING { return + } else if counter > 30 { + env.Cancel() + t.Fatal("UNEXPECTED STATUS TIMEOUT", status) } } else if counter > 15 { env.Cancel() - t.Fatal("UNEXPECTED SETUP CDC TIMEOUT", err.Error()) + t.Fatal("UNEXPECTED STATUS QUERY TIMEOUT", err.Error()) } else if counter > 5 { // log the error for informational purposes t.Log(err.Error()) diff --git a/flow/e2eshared/e2eshared.go b/flow/e2eshared/e2eshared.go index b242ebb1ea..087ff58014 100644 --- a/flow/e2eshared/e2eshared.go +++ b/flow/e2eshared/e2eshared.go @@ -12,7 +12,11 @@ import ( "github.com/PeerDB-io/peer-flow/model/qvalue" ) -func RunSuite[T any](t *testing.T, setup func(t *testing.T) T, teardown func(T)) { +type Suite interface { + Teardown() +} + +func RunSuite[T Suite](t *testing.T, setup func(t *testing.T) T) { t.Helper() t.Parallel() @@ -26,7 +30,7 @@ func RunSuite[T any](t *testing.T, setup func(t *testing.T) T, teardown func(T)) subtest.Parallel() suite := setup(subtest) subtest.Cleanup(func() { - teardown(suite) + suite.Teardown() }) m.Func.Call([]reflect.Value{reflect.ValueOf(suite)}) }) diff --git a/flow/model/qvalue/qvalue.go b/flow/model/qvalue/qvalue.go index 972063d494..013e0ca9e1 100644 --- a/flow/model/qvalue/qvalue.go +++ b/flow/model/qvalue/qvalue.go @@ -8,6 +8,7 @@ import ( "math/big" "reflect" "strconv" + "strings" "time" "cloud.google.com/go/civil" @@ -271,11 +272,14 @@ func compareHstore(value1, value2 interface{}) bool { } return string(bytes) == str2 case string: + if v1 == str2 { + return true + } parsedHStore1, err := hstore_util.ParseHstore(v1) if err != nil { panic(err) } - return parsedHStore1 == str2 + return parsedHStore1 == strings.ReplaceAll(strings.ReplaceAll(str2, " ", ""), "\n", "") default: panic(fmt.Sprintf("invalid hstore value type %T: %v", value1, value1)) } diff --git a/flow/workflows/cdc_flow.go b/flow/workflows/cdc_flow.go index 14ca09092d..bab81b3b23 100644 --- a/flow/workflows/cdc_flow.go +++ b/flow/workflows/cdc_flow.go @@ -41,7 +41,6 @@ func NewCDCFlowWorkflowState(cfg *protos.FlowConnectionConfigs) *CDCFlowWorkflow tableMappings = append(tableMappings, proto.Clone(tableMapping).(*protos.TableMapping)) } return &CDCFlowWorkflowState{ - // 1 more than the limit of 10 ActiveSignal: model.NoopSignal, CurrentFlowStatus: protos.FlowStatus_STATUS_SETUP, FlowConfigUpdate: nil, diff --git a/flow/workflows/setup_flow.go b/flow/workflows/setup_flow.go index ccb699d8a4..ac3e66f93c 100644 --- a/flow/workflows/setup_flow.go +++ b/flow/workflows/setup_flow.go @@ -227,7 +227,7 @@ func (s *SetupFlowExecution) fetchTableSchemaAndSetupNormalizedTables( } normalizedTableMapping[normalizedTableName] = tableSchema - s.logger.Info("normalized table schema: ", normalizedTableName, " -> ", tableSchema) + s.logger.Info("normalized table schema", slog.String("table", normalizedTableName), slog.Any("schema", tableSchema)) } // now setup the normalized tables on the destination peer From d8a7bd61046b9463846e793dc686c55ec4f2612e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Thu, 7 Mar 2024 14:56:43 +0000 Subject: [PATCH 13/13] Fix cases of !BADKEY in datadog (#1443) Explicitly passing flow name in no longer necessary after #1357 --- flow/activities/flowable.go | 10 +++++----- flow/activities/snapshot_activity.go | 6 +++--- flow/cmd/mirror_status.go | 3 ++- flow/workflows/cdc_flow.go | 4 ++-- flow/workflows/qrep_flow.go | 6 +++--- flow/workflows/setup_flow.go | 2 +- flow/workflows/snapshot_flow.go | 10 +++++----- flow/workflows/xmin_flow.go | 2 +- 8 files changed, 22 insertions(+), 21 deletions(-) diff --git a/flow/activities/flowable.go b/flow/activities/flowable.go index 3900d07385..5d9c4e690f 100644 --- a/flow/activities/flowable.go +++ b/flow/activities/flowable.go @@ -301,7 +301,7 @@ func (a *FlowableActivity) SyncFlow( } shutdown := utils.HeartbeatRoutine(ctx, func() string { - return "transferring records for job - " + flowName + return "transferring records for job" }) defer shutdown() @@ -474,7 +474,7 @@ func (a *FlowableActivity) StartNormalize( defer connectors.CloseConnector(ctx, dstConn) shutdown := utils.HeartbeatRoutine(ctx, func() string { - return "normalizing records from batch for job - " + input.FlowConnectionConfigs.FlowJobName + return "normalizing records from batch for job" }) defer shutdown() @@ -542,7 +542,7 @@ func (a *FlowableActivity) GetQRepPartitions(ctx context.Context, defer connectors.CloseConnector(ctx, srcConn) shutdown := utils.HeartbeatRoutine(ctx, func() string { - return "getting partitions for job - " + config.FlowJobName + return "getting partitions for job" }) defer shutdown() @@ -725,7 +725,7 @@ func (a *FlowableActivity) ConsolidateQRepPartitions(ctx context.Context, config defer connectors.CloseConnector(ctx, dstConn) shutdown := utils.HeartbeatRoutine(ctx, func() string { - return "consolidating partitions for job - " + config.FlowJobName + return "consolidating partitions for job" }) defer shutdown() @@ -980,7 +980,7 @@ func (a *FlowableActivity) RenameTables(ctx context.Context, config *protos.Rena defer connectors.CloseConnector(ctx, dstConn) shutdown := utils.HeartbeatRoutine(ctx, func() string { - return "renaming tables for job - " + config.FlowJobName + return "renaming tables for job" }) defer shutdown() diff --git a/flow/activities/snapshot_activity.go b/flow/activities/snapshot_activity.go index 262d3d0dbd..84df6ecfab 100644 --- a/flow/activities/snapshot_activity.go +++ b/flow/activities/snapshot_activity.go @@ -32,7 +32,7 @@ func (a *SnapshotActivity) CloseSlotKeepAlive(ctx context.Context, flowJobName s connectors.CloseConnector(ctx, s.connector) delete(a.SnapshotConnections, flowJobName) } - a.Alerter.LogFlowEvent(ctx, flowJobName, "Ended Snapshot Flow Job - "+flowJobName) + a.Alerter.LogFlowEvent(ctx, flowJobName, "Ended Snapshot Flow Job") return nil } @@ -50,7 +50,7 @@ func (a *SnapshotActivity) SetupReplication( return nil, nil } - a.Alerter.LogFlowEvent(ctx, config.FlowJobName, "Started Snapshot Flow Job - "+config.FlowJobName) + a.Alerter.LogFlowEvent(ctx, config.FlowJobName, "Started Snapshot Flow Job") conn, err := connectors.GetCDCPullConnector(ctx, config.PeerConnectionConfig) if err != nil { @@ -84,7 +84,7 @@ func (a *SnapshotActivity) SetupReplication( var slotInfo connpostgres.SlotCreationResult select { case slotInfo = <-slotSignal.SlotCreated: - logger.Info("slot created", slotInfo.SlotName) + logger.Info("slot created", slog.String("SlotName", slotInfo.SlotName)) case err := <-replicationErr: closeConnectionForError(err) return nil, fmt.Errorf("failed to setup replication: %w", err) diff --git a/flow/cmd/mirror_status.go b/flow/cmd/mirror_status.go index e8160277f5..f72cd5311e 100644 --- a/flow/cmd/mirror_status.go +++ b/flow/cmd/mirror_status.go @@ -144,7 +144,8 @@ func (h *FlowRequestHandler) cloneTableSummary( rows, err := h.pool.Query(ctx, q, "clone_"+flowJobName+"_%") if err != nil { - slog.Error("unable to query initial load partition - "+flowJobName, slog.Any("error", err)) + slog.Error("unable to query initial load partition", + slog.String(string(shared.FlowNameKey), flowJobName), slog.Any("error", err)) return nil, fmt.Errorf("unable to query initial load partition - %s: %w", flowJobName, err) } diff --git a/flow/workflows/cdc_flow.go b/flow/workflows/cdc_flow.go index bab81b3b23..93e1d4659b 100644 --- a/flow/workflows/cdc_flow.go +++ b/flow/workflows/cdc_flow.go @@ -236,7 +236,7 @@ func CDCFlowWorkflow( for state.ActiveSignal == model.PauseSignal { // only place we block on receive, so signal processing is immediate for state.ActiveSignal == model.PauseSignal && state.FlowConfigUpdate == nil && ctx.Err() == nil { - logger.Info("mirror has been paused", slog.Any("duration", time.Since(startTime))) + logger.Info(fmt.Sprintf("mirror has been paused for %s", time.Since(startTime).Round(time.Second))) selector.Select(ctx) } if err := ctx.Err(); err != nil { @@ -255,7 +255,7 @@ func CDCFlowWorkflow( } } - logger.Info("mirror has been resumed after ", time.Since(startTime)) + logger.Info(fmt.Sprintf("mirror has been resumed after %s", time.Since(startTime).Round(time.Second))) state.CurrentFlowStatus = protos.FlowStatus_STATUS_RUNNING } diff --git a/flow/workflows/qrep_flow.go b/flow/workflows/qrep_flow.go index 839eab9ddc..2f0124e717 100644 --- a/flow/workflows/qrep_flow.go +++ b/flow/workflows/qrep_flow.go @@ -119,7 +119,7 @@ func (q *QRepFlowExecution) SetupWatermarkTableOnDestination(ctx workflow.Contex // fetch the schema for the watermark table watermarkTableSchema, err := q.getTableSchema(ctx, q.config.WatermarkTable) if err != nil { - q.logger.Error("failed to fetch schema for watermark table: ", err) + q.logger.Error("failed to fetch schema for watermark table", slog.Any("error", err)) return fmt.Errorf("failed to fetch schema for watermark table: %w", err) } @@ -161,7 +161,7 @@ func (q *QRepFlowExecution) GetPartitions( return nil, fmt.Errorf("failed to fetch partitions to replicate: %w", err) } - q.logger.Info("partitions to replicate - ", slog.Int("num_partitions", len(partitions.Partitions))) + q.logger.Info("partitions to replicate", slog.Int("num_partitions", len(partitions.Partitions))) return partitions, nil } @@ -439,7 +439,7 @@ func QRepFlowWorkflow( state.CurrentFlowStatus = protos.FlowStatus_STATUS_PAUSED for q.activeSignal == model.PauseSignal { - logger.Info("mirror has been paused", slog.Any("duration", time.Since(startTime))) + logger.Info(fmt.Sprintf("mirror has been paused for %s", time.Since(startTime).Round(time.Second))) // only place we block on receive, so signal processing is immediate val, ok, _ := signalChan.ReceiveWithTimeout(ctx, 1*time.Minute) if ok { diff --git a/flow/workflows/setup_flow.go b/flow/workflows/setup_flow.go index ac3e66f93c..0574f0d24e 100644 --- a/flow/workflows/setup_flow.go +++ b/flow/workflows/setup_flow.go @@ -192,7 +192,7 @@ func (s *SetupFlowExecution) fetchTableSchemaAndSetupNormalizedTables( var tblSchemaOutput *protos.GetTableSchemaBatchOutput if err := future.Get(ctx, &tblSchemaOutput); err != nil { - s.logger.Error("failed to fetch schema for source tables: ", err) + s.logger.Error("failed to fetch schema for source tables", slog.Any("error", err)) return nil, fmt.Errorf("failed to fetch schema for source table %s: %w", sourceTables, err) } diff --git a/flow/workflows/snapshot_flow.go b/flow/workflows/snapshot_flow.go index 050bc604e5..ce9ab27d78 100644 --- a/flow/workflows/snapshot_flow.go +++ b/flow/workflows/snapshot_flow.go @@ -30,7 +30,7 @@ func (s *SnapshotFlowExecution) setupReplication( ctx workflow.Context, ) (*protos.SetupReplicationOutput, error) { flowName := s.config.FlowJobName - s.logger.Info("setting up replication on source for peer flow - ", flowName) + s.logger.Info("setting up replication on source for peer flow") ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ StartToCloseTimeout: 4 * time.Hour, @@ -59,7 +59,7 @@ func (s *SnapshotFlowExecution) setupReplication( return nil, fmt.Errorf("failed to setup replication on source peer: %w", err) } - s.logger.Info("replication slot live for on source for peer flow - ", flowName) + s.logger.Info("replication slot live for on source for peer flow") return res, nil } @@ -68,7 +68,7 @@ func (s *SnapshotFlowExecution) closeSlotKeepAlive( ctx workflow.Context, ) error { flowName := s.config.FlowJobName - s.logger.Info("closing slot keep alive for peer flow - ", flowName) + s.logger.Info("closing slot keep alive for peer flow") ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ StartToCloseTimeout: 15 * time.Minute, @@ -78,7 +78,7 @@ func (s *SnapshotFlowExecution) closeSlotKeepAlive( return fmt.Errorf("failed to close slot keep alive for peer flow: %w", err) } - s.logger.Info("closed slot keep alive for peer flow - ", flowName) + s.logger.Info("closed slot keep alive for peer flow") return nil } @@ -226,7 +226,7 @@ func (s *SnapshotFlowExecution) cloneTablesWithSlot( return fmt.Errorf("failed to setup replication: %w", err) } - logger.Info("cloning tables in parallel: ", numTablesInParallel) + logger.Info(fmt.Sprintf("cloning %d tables in parallel", numTablesInParallel)) if err := s.cloneTables(ctx, slotInfo, numTablesInParallel); err != nil { return fmt.Errorf("failed to clone tables: %w", err) } diff --git a/flow/workflows/xmin_flow.go b/flow/workflows/xmin_flow.go index 777daba38b..4cd6deece7 100644 --- a/flow/workflows/xmin_flow.go +++ b/flow/workflows/xmin_flow.go @@ -38,7 +38,7 @@ func XminFlowWorkflow( state.CurrentFlowStatus = protos.FlowStatus_STATUS_PAUSED for q.activeSignal == model.PauseSignal { - logger.Info("mirror has been paused", slog.Any("duration", time.Since(startTime))) + logger.Info(fmt.Sprintf("mirror has been paused for %s", time.Since(startTime).Round(time.Second))) // only place we block on receive, so signal processing is immediate val, ok, _ := signalChan.ReceiveWithTimeout(ctx, 1*time.Minute) if ok {