From 3da8ac6af64379dbb9e6e1aba5cb4ed2b3699f09 Mon Sep 17 00:00:00 2001 From: Amogh-Bharadwaj Date: Thu, 7 Dec 2023 18:55:45 +0530 Subject: [PATCH] remove unused files and functions --- flow/connectors/postgres/bench_test.sql | 60 ------------------- .../postgres/qrep_partition_test.go | 28 --------- flow/e2e/postgres/timescale.sql | 53 ---------------- 3 files changed, 141 deletions(-) delete mode 100644 flow/connectors/postgres/bench_test.sql delete mode 100644 flow/e2e/postgres/timescale.sql diff --git a/flow/connectors/postgres/bench_test.sql b/flow/connectors/postgres/bench_test.sql deleted file mode 100644 index b7e4fcbbcf..0000000000 --- a/flow/connectors/postgres/bench_test.sql +++ /dev/null @@ -1,60 +0,0 @@ -CREATE SCHEMA IF NOT EXISTS bench; - -CREATE TABLE bench.large_table ( - id bigserial PRIMARY KEY, - text1 text, - text2 text, - text3 text, - text4 text, - uuid1 uuid, - uuid2 uuid, - uuid3 uuid, - float1 float8, - float2 float8, - float3 float8, - float4 float8, - int1 int, - int2 int, - int3 int, - int4 int -); - -DO -$do$ -DECLARE - counter bigint := 0; -BEGIN - WHILE counter < 5000000 LOOP -- adjust the number based on your needs - INSERT INTO bench.large_table( - text1, text2, text3, text4, - uuid1, uuid2, uuid3, - float1, float2, float3, float4, - int1, int2, int3, int4 - ) - VALUES ( - md5(random()::text), - md5(random()::text), - md5(random()::text), - md5(random()::text), - gen_random_uuid(), - gen_random_uuid(), - gen_random_uuid(), - random() * 1000000, - random() * 1000000, - random() * 1000000, - random() * 1000000, - floor(random() * 100000)::int, - floor(random() * 100000)::int, - floor(random() * 100000)::int, - floor(random() * 100000)::int - ); - counter := counter + 1; - - -- Print progress every 1000 rows - IF counter % 1000 = 0 THEN - RAISE NOTICE 'Inserted % rows', counter; - END IF; - END LOOP; -END -$do$ -; diff --git a/flow/connectors/postgres/qrep_partition_test.go b/flow/connectors/postgres/qrep_partition_test.go index a4e90a8544..9eead5ec34 100644 --- a/flow/connectors/postgres/qrep_partition_test.go +++ b/flow/connectors/postgres/qrep_partition_test.go @@ -10,7 +10,6 @@ import ( util "github.com/PeerDB-io/peer-flow/utils" "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/assert" - "google.golang.org/protobuf/types/known/timestamppb" ) type testCase struct { @@ -60,33 +59,6 @@ func newTestCaseForCTID(schema string, name string, rows uint32, expectedNum int } } -func (tc *testCase) appendPartition(start time.Time, end time.Time) *testCase { - tsRange := &protos.PartitionRange_TimestampRange{ - TimestampRange: &protos.TimestampPartitionRange{ - Start: timestamppb.New(start), - End: timestamppb.New(end), - }, - } - tc.want = append(tc.want, &protos.QRepPartition{ - PartitionId: "test_uuid", - Range: &protos.PartitionRange{ - Range: tsRange, - }, - }) - return tc -} - -func (tc *testCase) appendPartitions(start, end time.Time, numPartitions int) *testCase { - duration := end.Sub(start) - partitionDuration := duration / time.Duration(numPartitions) - for i := 0; i < numPartitions; i++ { - partitionStart := start.Add(time.Duration(i) * partitionDuration) - partitionEnd := start.Add(time.Duration(i+1) * partitionDuration) - tc.appendPartition(partitionStart, partitionEnd) - } - return tc -} - func TestGetQRepPartitions(t *testing.T) { // log.SetLevel(log.DebugLevel) diff --git a/flow/e2e/postgres/timescale.sql b/flow/e2e/postgres/timescale.sql deleted file mode 100644 index e635cba839..0000000000 --- a/flow/e2e/postgres/timescale.sql +++ /dev/null @@ -1,53 +0,0 @@ -CREATE PEER source_pg_2 FROM POSTGRES WITH -( - host = 'kevin-test-cluster.ctwiqpycdrx0.us-east-2.rds.amazonaws.com', - port = '5432', - user = 'postgres', - password = 'SUMM3RN!GHTS', - database = 'ts2' -); - -CREATE PEER target_ts_2 FROM POSTGRES WITH -( - host = '3.19.228.194', - port = '5432', - user = 'postgres', - password = 'T1mesc@l3', - database = 'dst2' -); - -CREATE TABLE public.diagnostics ( - id bigint, - "time" timestamp with time zone, - tags_id integer, - fuel_state double precision, - current_load double precision, - status double precision, - additional_tags jsonb, - primary key(id, "time") -); - -SELECT create_hypertable('diagnostics', 'time', chunk_time_interval => INTERVAL '12 hours'); - -CREATE TABLE public.readings ( - id bigint, - "time" timestamp with time zone, - tags_id integer, - latitude double precision, - longitude double precision, - elevation double precision, - velocity double precision, - heading double precision, - grade double precision, - fuel_consumption double precision, - additional_tags jsonb, - primary key(id, "time") -); - -SELECT create_hypertable('readings', 'time', chunk_time_interval => INTERVAL '12 hours'); - -CREATE MIRROR tstsv4 FROM source_pg_2 TO target_ts_2 WITH TABLE MAPPING(public.diagnostics:public.diagnostics,public.readings:public.readings); - -flow_worker1 | time="2023-08-30T06:47:18Z" level=info msg="RelationMessage => RelationID: 16747, Namespace: public, RelationName: fss1, Columns: [0x400175e360 0x400175e380]" -flow_worker1 | time="2023-08-30T06:47:18Z" level=info msg="23 1 id -1\n" -flow_worker1 | time="2023-08-30T06:47:18Z" level=info msg="20 0 c1 -1\n"