From 5a93212fc2e2abd48435005e4a1c14170bcb7e73 Mon Sep 17 00:00:00 2001 From: "vitess-bot[bot]" <108069721+vitess-bot[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 10:52:44 +0100 Subject: [PATCH] Cherry-pick bad431deed15cf6ffbc43f4aca1781a58e5849a2 with conflicts --- .../backup/vtbackup/backup_only_test.go | 2 - go/test/endtoend/backup/vtbackup/main_test.go | 1 - .../backup/vtctlbackup/backup_utils.go | 148 +++++++- .../backup/vtctlbackup/pitr_test_framework.go | 3 - go/test/endtoend/cellalias/cell_alias_test.go | 3 - go/test/endtoend/cluster/cluster_process.go | 1 - go/test/endtoend/cluster/cluster_util.go | 9 - .../endtoend/clustertest/add_keyspace_test.go | 1 - go/test/endtoend/clustertest/etcd_test.go | 3 - go/test/endtoend/clustertest/main_test.go | 1 - go/test/endtoend/clustertest/vtctld_test.go | 3 - go/test/endtoend/clustertest/vtgate_test.go | 2 - go/test/endtoend/clustertest/vttablet_test.go | 4 - .../encrypted_replication_test.go | 1 - .../encrypted_transport_test.go | 1 - go/test/endtoend/keyspace/keyspace_test.go | 9 - go/test/endtoend/messaging/main_test.go | 1 - go/test/endtoend/messaging/message_test.go | 3 - go/test/endtoend/mysqlctl/mysqlctl_test.go | 3 - go/test/endtoend/mysqlctld/mysqlctld_test.go | 3 - go/test/endtoend/mysqlserver/main_test.go | 1 - .../endtoend/mysqlserver/mysql_server_test.go | 9 - .../onlineddl/flow/onlineddl_flow_test.go | 5 +- .../onlineddl/revert/onlineddl_revert_test.go | 5 +- .../scheduler/onlineddl_scheduler_test.go | 5 - .../onlineddl/vrepl/onlineddl_vrepl_test.go | 5 +- .../onlineddl_vrepl_mini_stress_test.go | 5 +- .../onlineddl_vrepl_stress_suite_test.go | 5 +- .../vrepl_suite/onlineddl_vrepl_suite_test.go | 5 +- go/test/endtoend/preparestmt/main_test.go | 1 - .../endtoend/preparestmt/stmt_methods_test.go | 13 - .../recovery/pitr/shardedpitr_test.go | 3 +- .../recovery/unshardedrecovery/recovery.go | 2 - .../reparent/emergencyreparent/ers_test.go | 13 - .../reparent/newfeaturetest/reparent_test.go | 9 +- .../reparent_range_based_test.go | 1 - .../reparent/plannedreparent/reparent_test.go | 12 - .../endtoend/reparent/prscomplex/main_test.go | 1 - .../reparent/prssettingspool/main_test.go | 1 - .../reparent/semisync/semi_sync_test.go | 8 + .../vrepl/schemadiff_vrepl_suite_test.go | 6 +- .../endtoend/sharded/sharded_keyspace_test.go | 2 - go/test/endtoend/stress/stress_test.go | 2 - .../buffer/buffer_test_helpers.go | 1 - go/test/endtoend/tabletgateway/main_test.go | 1 - go/test/endtoend/tabletgateway/vtgate_test.go | 5 - .../endtoend/tabletmanager/commands_test.go | 5 - .../tabletmanager/custom_rule_topo_test.go | 1 - .../tabletmanager/lock_unlock_test.go | 4 - go/test/endtoend/tabletmanager/main_test.go | 1 - .../tabletmanager/primary/tablet_test.go | 3 - go/test/endtoend/tabletmanager/qps_test.go | 2 - .../replication_manager/tablet_test.go | 1 - .../tabletmanager/tablegc/tablegc_test.go | 1 - .../tabletmanager/tablet_health_test.go | 4 - .../tablet_security_policy_test.go | 3 - go/test/endtoend/tabletmanager/tablet_test.go | 2 - .../throttler_topo/throttler_test.go | 8 - go/test/endtoend/topoconncache/main_test.go | 1 - .../topoconncache/topo_conn_cache_test.go | 1 - go/test/endtoend/topotest/consul/main_test.go | 2 - go/test/endtoend/topotest/etcd2/main_test.go | 2 - go/test/endtoend/topotest/zk2/main_test.go | 2 - .../transaction/twopc/fuzz/main_test.go | 128 +++++++ .../endtoend/transaction/twopc/main_test.go | 347 ++++++++++++++++++ .../transaction/twopc/metric/main_test.go | 114 ++++++ .../transaction/twopc/stress/main_test.go | 127 +++++++ go/test/endtoend/utils/mysql_test.go | 1 - .../endtoend/utils/mysqlvsvitess/main_test.go | 1 - go/test/endtoend/vault/vault_test.go | 1 - .../endtoend/versionupgrade/upgrade_test.go | 4 - go/test/endtoend/vtadmin/main_test.go | 100 +++++ .../vtgate/concurrentdml/main_test.go | 5 - .../vtgate/connectiondrain/main_test.go | 185 ++++++++++ .../endtoend/vtgate/consolidator/main_test.go | 1 - .../vtgate/createdb_plugin/main_test.go | 2 - .../vtgate/errors_as_warnings/main_test.go | 1 - .../endtoend/vtgate/foreignkey/main_test.go | 2 - .../foreignkey/stress/fk_stress_test.go | 2 - .../endtoend/vtgate/gen4/column_name_test.go | 2 - go/test/endtoend/vtgate/gen4/main_test.go | 2 - .../vtgate/gen4/system_schema_test.go | 11 - go/test/endtoend/vtgate/godriver/main_test.go | 2 - go/test/endtoend/vtgate/grpc_api/main_test.go | 1 - .../keyspace_watches/keyspace_watch_test.go | 2 - go/test/endtoend/vtgate/main_test.go | 2 - go/test/endtoend/vtgate/misc_test.go | 2 - go/test/endtoend/vtgate/mysql80/main_test.go | 1 - go/test/endtoend/vtgate/mysql80/misc_test.go | 4 - .../vtgate/partialfailure/main_test.go | 1 - .../endtoend/vtgate/plan_tests/main_test.go | 230 ++++++++++++ .../endtoend/vtgate/prefixfanout/main_test.go | 4 - .../queries/aggregation/aggregation_test.go | 2 - .../vtgate/queries/aggregation/main_test.go | 1 - .../vtgate/queries/benchmark/main_test.go | 2 - .../vtgate/queries/derived/derived_test.go | 2 - .../vtgate/queries/derived/main_test.go | 1 - .../endtoend/vtgate/queries/dml/main_test.go | 2 - .../queries/foundrows/found_rows_test.go | 2 - .../vtgate/queries/foundrows/main_test.go | 1 - .../informationschema_test.go | 3 - .../queries/informationschema/main_test.go | 1 - .../endtoend/vtgate/queries/kill/main_test.go | 1 - .../queries/lookup_queries/main_test.go | 2 - .../endtoend/vtgate/queries/misc/main_test.go | 1 - .../endtoend/vtgate/queries/misc/misc_test.go | 2 - .../vtgate/queries/no_scatter/main_test.go | 1 - .../vtgate/queries/no_scatter/queries_test.go | 2 - .../vtgate/queries/normalize/main_test.go | 1 - .../vtgate/queries/orderby/main_test.go | 1 - .../vtgate/queries/orderby/orderby_test.go | 3 - .../without_schematracker/main_test.go | 1 - .../without_schematracker/orderby_test.go | 3 - .../vtgate/queries/random/main_test.go | 1 - .../vtgate/queries/random/random_test.go | 2 - .../vtgate/queries/reference/main_test.go | 1 - .../queries/reference/reference_test.go | 3 - .../vtgate/queries/subquery/main_test.go | 1 - .../vtgate/queries/subquery/subquery_test.go | 6 +- .../vtgate/queries/timeout/main_test.go | 1 - .../vtgate/queries/timeout/timeout_test.go | 5 +- .../endtoend/vtgate/queries/tpch/main_test.go | 1 - .../endtoend/vtgate/queries/tpch/tpch_test.go | 2 - .../vtgate/queries/union/main_test.go | 1 - .../vtgate/queries/union/union_test.go | 2 - .../vtgate/queries/vexplain/main_test.go | 1 - .../vtgate/queries/vexplain/vexplain_test.go | 2 - .../vtgate/readafterwrite/raw_test.go | 2 - .../endtoend/vtgate/reservedconn/main_test.go | 1 - .../reservedconn/reconnect1/main_test.go | 1 - .../reservedconn/reconnect2/main_test.go | 1 - .../reservedconn/reconnect3/main_test.go | 1 - .../reservedconn/reconnect4/main_test.go | 1 - .../vtgate/reservedconn/sysvar_test.go | 2 - .../endtoend/vtgate/reservedconn/udv_test.go | 3 - go/test/endtoend/vtgate/schema/schema_test.go | 2 - .../loadkeyspace/schema_load_keyspace_test.go | 2 - .../restarttablet/schema_restart_test.go | 3 - .../schematracker/sharded/st_sharded_test.go | 2 - .../sharded_prs/st_sharded_test.go | 2 - .../unsharded/st_unsharded_test.go | 3 - go/test/endtoend/vtgate/sec_vind/main_test.go | 2 - go/test/endtoend/vtgate/sequence/seq_test.go | 4 - .../tablet_healthcheck/reparent_test.go | 1 - .../correctness_test.go | 1 - .../vtgate/transaction/restart/main_test.go | 1 - .../rollback/txn_rollback_shutdown_test.go | 3 - .../vtgate/transaction/single/main_test.go | 1 - .../endtoend/vtgate/transaction/tx_test.go | 4 - .../endtoend/vtgate/unsharded/main_test.go | 10 - .../vtgate/vindex_bindvars/main_test.go | 3 - .../endtoend/vtgate/vschema/vschema_test.go | 1 - go/test/endtoend/vtorc/api/api_test.go | 1 - go/test/endtoend/vtorc/api/config_test.go | 203 ++++++++++ go/test/endtoend/vtorc/api/main_test.go | 3 - go/test/endtoend/vtorc/general/main_test.go | 3 - go/test/endtoend/vtorc/general/vtorc_test.go | 110 +++++- .../vtorc/primaryfailure/main_test.go | 3 - .../primaryfailure/primary_failure_test.go | 12 - test/config.json | 11 - 160 files changed, 1733 insertions(+), 390 deletions(-) create mode 100644 go/test/endtoend/transaction/twopc/fuzz/main_test.go create mode 100644 go/test/endtoend/transaction/twopc/main_test.go create mode 100644 go/test/endtoend/transaction/twopc/metric/main_test.go create mode 100644 go/test/endtoend/transaction/twopc/stress/main_test.go create mode 100644 go/test/endtoend/vtadmin/main_test.go create mode 100644 go/test/endtoend/vtgate/connectiondrain/main_test.go create mode 100644 go/test/endtoend/vtgate/plan_tests/main_test.go create mode 100644 go/test/endtoend/vtorc/api/config_test.go diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go index 4e018986100..c7a09c70d13 100644 --- a/go/test/endtoend/backup/vtbackup/backup_only_test.go +++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go @@ -58,7 +58,6 @@ func TestTabletInitialBackup(t *testing.T) { // - Take a Second Backup // - Bring up a second replica, and restore from the second backup // - list the backups, remove them - defer cluster.PanicHandler(t) waitForReplicationToCatchup([]cluster.Vttablet{*replica1, *replica2}) @@ -102,7 +101,6 @@ func TestTabletBackupOnly(t *testing.T) { // - Take a Second Backup // - Bring up a second replica, and restore from the second backup // - list the backups, remove them - defer cluster.PanicHandler(t) // Reset the tablet object values in order on init tablet in the next step. primary.VttabletProcess.ServingStatus = "NOT_SERVING" diff --git a/go/test/endtoend/backup/vtbackup/main_test.go b/go/test/endtoend/backup/vtbackup/main_test.go index 367956c9827..6e1840b2979 100644 --- a/go/test/endtoend/backup/vtbackup/main_test.go +++ b/go/test/endtoend/backup/vtbackup/main_test.go @@ -52,7 +52,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode, err := func() (int, error) { diff --git a/go/test/endtoend/backup/vtctlbackup/backup_utils.go b/go/test/endtoend/backup/vtctlbackup/backup_utils.go index 9227ce39516..b5a8bf51ef1 100644 --- a/go/test/endtoend/backup/vtctlbackup/backup_utils.go +++ b/go/test/endtoend/backup/vtctlbackup/backup_utils.go @@ -389,7 +389,6 @@ func TestBackup(t *testing.T, setupType int, streamMode string, stripes int, cDe }, // } - defer cluster.PanicHandler(t) // setup cluster for the testing code, err := LaunchCluster(setupType, streamMode, stripes, cDetails) require.Nilf(t, err, "setup failed with status code %d", code) @@ -1445,3 +1444,150 @@ func verifyTabletRestoreStats(t *testing.T, vars map[string]any) { } require.Contains(t, bd, "BackupStorage.File.File:Read") } +<<<<<<< HEAD +======= + +func getDefaultCommonArgs() []string { + return []string{ + "--vreplication_retry_delay", "1s", + "--degraded_threshold", "5s", + "--lock_tables_timeout", "5s", + "--watch_replication_stream", + "--enable_replication_reporter", + "--serving_state_grace_period", "1s", + } +} + +func setDefaultCommonArgs() { commonTabletArg = getDefaultCommonArgs() } + +// fetch the backup engine used on the last backup triggered by the end-to-end tests. +func getBackupEngineOfLastBackup(t *testing.T) string { + lastBackup := getLastBackup(t) + + manifest := readManifestFile(t, path.Join(localCluster.CurrentVTDATAROOT, "backups", keyspaceName, shardName, lastBackup)) + + return manifest.BackupMethod +} + +func getLastBackup(t *testing.T) string { + backups, err := localCluster.ListBackups(shardKsName) + require.NoError(t, err) + + return backups[len(backups)-1] +} + +func TestBackupEngineSelector(t *testing.T) { + defer setDefaultCommonArgs() + + // launch the custer with xtrabackup as the default engine + code, err := LaunchCluster(XtraBackup, "xbstream", 0, &CompressionDetails{CompressorEngineName: "pgzip"}) + require.Nilf(t, err, "setup failed with status code %d", code) + + defer TearDownCluster() + + localCluster.DisableVTOrcRecoveries(t) + defer func() { + localCluster.EnableVTOrcRecoveries(t) + }() + verifyInitialReplication(t) + + t.Run("backup with backup-engine=builtin", func(t *testing.T) { + // first try to backup with an alternative engine (builtin) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=builtin", primary.Alias) + require.NoError(t, err) + engineUsed := getBackupEngineOfLastBackup(t) + require.Equal(t, "builtin", engineUsed) + }) + + t.Run("backup with backup-engine=xtrabackup", func(t *testing.T) { + // then try to backup specifying the xtrabackup engine + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=xtrabackup", primary.Alias) + require.NoError(t, err) + engineUsed := getBackupEngineOfLastBackup(t) + require.Equal(t, "xtrabackup", engineUsed) + }) + + t.Run("backup without specifying backup-engine", func(t *testing.T) { + // check that by default we still use the xtrabackup engine if not specified + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", primary.Alias) + require.NoError(t, err) + engineUsed := getBackupEngineOfLastBackup(t) + require.Equal(t, "xtrabackup", engineUsed) + }) +} + +func TestRestoreAllowedBackupEngines(t *testing.T) { + defer setDefaultCommonArgs() + + backupMsg := "right after xtrabackup backup" + + cDetails := &CompressionDetails{CompressorEngineName: "pgzip"} + + // launch the custer with xtrabackup as the default engine + code, err := LaunchCluster(XtraBackup, "xbstream", 0, cDetails) + require.Nilf(t, err, "setup failed with status code %d", code) + + defer TearDownCluster() + + localCluster.DisableVTOrcRecoveries(t) + defer func() { + localCluster.EnableVTOrcRecoveries(t) + }() + verifyInitialReplication(t) + + t.Run("generate backups", func(t *testing.T) { + // lets take two backups, each using a different backup engine + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=builtin", primary.Alias) + require.NoError(t, err) + + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=xtrabackup", primary.Alias) + require.NoError(t, err) + }) + + // insert more data on the primary + _, err = primary.VttabletProcess.QueryTablet(fmt.Sprintf("insert into vt_insert_test (msg) values ('%s')", backupMsg), keyspaceName, true) + require.NoError(t, err) + + t.Run("restore replica and verify data", func(t *testing.T) { + // now bring up another replica, letting it restore from backup. + restoreWaitForBackup(t, "replica", cDetails, true) + err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, timeout) + require.NoError(t, err) + + // check the new replica has the data + cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) + result, err := replica2.VttabletProcess.QueryTablet( + fmt.Sprintf("select msg from vt_insert_test where msg='%s'", backupMsg), replica2.VttabletProcess.Keyspace, true) + require.NoError(t, err) + require.Equal(t, backupMsg, result.Named().Row().AsString("msg", "")) + }) + + t.Run("test broken restore", func(t *testing.T) { + // now lets break the last backup in the shard + err = os.Remove(path.Join(localCluster.CurrentVTDATAROOT, + "backups", keyspaceName, shardName, + getLastBackup(t), "backup.xbstream.gz")) + require.NoError(t, err) + + // and try to restore from it + err = localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", replica2.Alias) + require.Error(t, err) // this should fail + }) + + t.Run("test older working backup", func(t *testing.T) { + // now we retry but with the first backup + err = localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", "--allowed-backup-engines=builtin", replica2.Alias) + require.NoError(t, err) // this should succeed + + // make sure we are replicating after the restore is done + err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, timeout) + require.NoError(t, err) + cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) + + result, err := replica2.VttabletProcess.QueryTablet( + fmt.Sprintf("select msg from vt_insert_test where msg='%s'", backupMsg), replica2.VttabletProcess.Keyspace, true) + require.NoError(t, err) + require.Equal(t, backupMsg, result.Named().Row().AsString("msg", "")) + }) +} +>>>>>>> bad431deed (Remove broken panic handler (#17354)) diff --git a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go index 7f611d81ad6..87cfc53460c 100644 --- a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go +++ b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go @@ -92,7 +92,6 @@ func waitForReplica(t *testing.T, replicaIndex int) { // in between, it makes writes to the database, and takes notes: what data was available in what backup. // It then restores each and every one of those backups, in random order, and expects to find the specific data associated with the backup. func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) { - defer cluster.PanicHandler(t) t.Run(tcase.Name, func(t *testing.T) { // setup cluster for the testing @@ -330,7 +329,6 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) // ExecTestIncrementalBackupAndRestoreToPos func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTestCase) { - defer cluster.PanicHandler(t) var lastInsertedRowTimestamp time.Time insertRowOnPrimary := func(t *testing.T, hint string) { @@ -590,7 +588,6 @@ func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTes // Specifically, it's designed to test how incremental backups are taken by interleaved replicas, so that they successfully build on // one another. func ExecTestIncrementalBackupOnTwoTablets(t *testing.T, tcase *PITRTestCase) { - defer cluster.PanicHandler(t) t.Run(tcase.Name, func(t *testing.T) { // setup cluster for the testing diff --git a/go/test/endtoend/cellalias/cell_alias_test.go b/go/test/endtoend/cellalias/cell_alias_test.go index 07e8d687f4e..6e8f901a245 100644 --- a/go/test/endtoend/cellalias/cell_alias_test.go +++ b/go/test/endtoend/cellalias/cell_alias_test.go @@ -90,7 +90,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -232,7 +231,6 @@ func TestMain(m *testing.M) { } func TestAlias(t *testing.T) { - defer cluster.PanicHandler(t) insertInitialValues(t) defer deleteInitialValues(t) @@ -296,7 +294,6 @@ func TestAlias(t *testing.T) { } func TestAddAliasWhileVtgateUp(t *testing.T) { - defer cluster.PanicHandler(t) insertInitialValues(t) defer deleteInitialValues(t) diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go index 44636b3cdb6..215c440fd33 100644 --- a/go/test/endtoend/cluster/cluster_process.go +++ b/go/test/endtoend/cluster/cluster_process.go @@ -1021,7 +1021,6 @@ func (cluster *LocalProcessCluster) StreamTabletHealthUntil(ctx context.Context, // Teardown brings down the cluster by invoking teardown for individual processes func (cluster *LocalProcessCluster) Teardown() { - PanicHandler(nil) cluster.mx.Lock() defer cluster.mx.Unlock() if cluster.teardownCompleted { diff --git a/go/test/endtoend/cluster/cluster_util.go b/go/test/endtoend/cluster/cluster_util.go index 061e632dde7..cfc2071a746 100644 --- a/go/test/endtoend/cluster/cluster_util.go +++ b/go/test/endtoend/cluster/cluster_util.go @@ -126,15 +126,6 @@ func VerifyRowsInTablet(t *testing.T, vttablet *Vttablet, ksName string, expecte VerifyRowsInTabletForTable(t, vttablet, ksName, expectedRows, "vt_insert_test") } -// PanicHandler handles the panic in the testcase. -func PanicHandler(t testing.TB) { - err := recover() - if t == nil { - return - } - require.Nilf(t, err, "panic occured in testcase %v", t.Name()) -} - // ListBackups Lists back preset in shard func (cluster LocalProcessCluster) ListBackups(shardKsName string) ([]string, error) { output, err := cluster.VtctldClientProcess.ExecuteCommandWithOutput("GetBackups", shardKsName) diff --git a/go/test/endtoend/clustertest/add_keyspace_test.go b/go/test/endtoend/clustertest/add_keyspace_test.go index edee87d035e..b8422b52eb3 100644 --- a/go/test/endtoend/clustertest/add_keyspace_test.go +++ b/go/test/endtoend/clustertest/add_keyspace_test.go @@ -61,7 +61,6 @@ primary key (id) ) func TestAddKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) if err := clusterInstance.StartKeyspace(*testKeyspace, []string{"-80", "80-"}, 0, false); err != nil { log.Errorf("failed to AddKeyspace %v: %v", *testKeyspace, err) t.Fatal(err) diff --git a/go/test/endtoend/clustertest/etcd_test.go b/go/test/endtoend/clustertest/etcd_test.go index 5239d960c47..f47a002a3cf 100644 --- a/go/test/endtoend/clustertest/etcd_test.go +++ b/go/test/endtoend/clustertest/etcd_test.go @@ -24,12 +24,9 @@ import ( "github.com/stretchr/testify/require" clientv3 "go.etcd.io/etcd/client/v3" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestEtcdServer(t *testing.T) { - defer cluster.PanicHandler(t) // Confirm the basic etcd cluster health. etcdHealthURL := fmt.Sprintf("http://%s:%d/health", clusterInstance.Hostname, clusterInstance.TopoPort) diff --git a/go/test/endtoend/clustertest/main_test.go b/go/test/endtoend/clustertest/main_test.go index 35da40a3edb..3fc2524208b 100644 --- a/go/test/endtoend/clustertest/main_test.go +++ b/go/test/endtoend/clustertest/main_test.go @@ -60,7 +60,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/clustertest/vtctld_test.go b/go/test/endtoend/clustertest/vtctld_test.go index c61f7820bb7..18d06cf3299 100644 --- a/go/test/endtoend/clustertest/vtctld_test.go +++ b/go/test/endtoend/clustertest/vtctld_test.go @@ -30,8 +30,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) var ( @@ -44,7 +42,6 @@ var ( ) func TestVtctldProcess(t *testing.T) { - defer cluster.PanicHandler(t) url := fmt.Sprintf("http://%s:%d/api/keyspaces/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort) testURL(t, url, "keyspace url") diff --git a/go/test/endtoend/clustertest/vtgate_test.go b/go/test/endtoend/clustertest/vtgate_test.go index 2f72682a391..264b292f482 100644 --- a/go/test/endtoend/clustertest/vtgate_test.go +++ b/go/test/endtoend/clustertest/vtgate_test.go @@ -32,11 +32,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestVtgateProcess(t *testing.T) { - defer cluster.PanicHandler(t) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) diff --git a/go/test/endtoend/clustertest/vttablet_test.go b/go/test/endtoend/clustertest/vttablet_test.go index 5e7d5e27182..86fc2a6983c 100644 --- a/go/test/endtoend/clustertest/vttablet_test.go +++ b/go/test/endtoend/clustertest/vttablet_test.go @@ -25,12 +25,9 @@ import ( "testing" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestVttabletProcess(t *testing.T) { - defer cluster.PanicHandler(t) firstTabletPort := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].HTTPPort testURL(t, fmt.Sprintf("http://localhost:%d/debug/vars/", firstTabletPort), "tablet debug var url") resp, err := http.Get(fmt.Sprintf("http://localhost:%d/debug/vars", firstTabletPort)) @@ -48,7 +45,6 @@ func TestVttabletProcess(t *testing.T) { } func TestDeleteTablet(t *testing.T) { - defer cluster.PanicHandler(t) primary := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() require.NotNil(t, primary) _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("DeleteTablets", "--allow-primary", primary.Alias) diff --git a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go index 4c759ff577a..61b553a020d 100644 --- a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go +++ b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go @@ -42,7 +42,6 @@ var ( // This test makes sure that we can use SSL replication with Vitess func TestSecure(t *testing.T) { - defer cluster.PanicHandler(t) testReplicationBase(t, true) testReplicationBase(t, false) } diff --git a/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go b/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go index 9147b7b9080..789f93eb3ab 100644 --- a/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go +++ b/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go @@ -102,7 +102,6 @@ var ( ) func TestSecureTransport(t *testing.T) { - defer cluster.PanicHandler(t) flag.Parse() // initialize cluster diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index 2a665c66214..f65301b9bb4 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -81,7 +81,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -167,7 +166,6 @@ func checkDurabilityPolicy(t *testing.T, durabilityPolicy string) { } func TestGetSrvKeyspaceNames(t *testing.T) { - defer cluster.PanicHandler(t) data, err := clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell) require.Nil(t, err) @@ -180,7 +178,6 @@ func TestGetSrvKeyspaceNames(t *testing.T) { } func TestGetSrvKeyspacePartitions(t *testing.T) { - defer cluster.PanicHandler(t) shardedSrvKeyspace := getSrvKeyspace(t, cell, keyspaceShardedName) otherShardRefFound := false for _, partition := range shardedSrvKeyspace.Partitions { @@ -209,20 +206,17 @@ func TestGetSrvKeyspacePartitions(t *testing.T) { } func TestShardNames(t *testing.T) { - defer cluster.PanicHandler(t) output, err := clusterForKSTest.VtctldClientProcess.GetSrvKeyspaces(keyspaceShardedName, cell) require.NoError(t, err) require.NotNil(t, output[cell], "no srvkeyspace for cell %s", cell) } func TestGetKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) _, err := clusterForKSTest.VtctldClientProcess.GetKeyspace(keyspaceUnshardedName) require.Nil(t, err) } func TestDeleteKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) _ = clusterForKSTest.VtctldClientProcess.CreateKeyspace("test_delete_keyspace", sidecar.DefaultName) _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") _ = clusterForKSTest.InitTablet(&cluster.Vttablet{ @@ -353,7 +347,6 @@ func TestDeleteKeyspace(t *testing.T) { } */ func TestShardCountForAllKeyspaces(t *testing.T) { - defer cluster.PanicHandler(t) testShardCountForKeyspace(t, keyspaceUnshardedName, 1) testShardCountForKeyspace(t, keyspaceShardedName, 2) } @@ -370,7 +363,6 @@ func testShardCountForKeyspace(t *testing.T, keyspace string, count int) { } func TestShardNameForAllKeyspaces(t *testing.T) { - defer cluster.PanicHandler(t) testShardNameForKeyspace(t, keyspaceUnshardedName, []string{"test_ks_unsharded"}) testShardNameForKeyspace(t, keyspaceShardedName, []string{"-80", "80-"}) } @@ -389,7 +381,6 @@ func testShardNameForKeyspace(t *testing.T, keyspace string, shardNames []string } func TestKeyspaceToShardName(t *testing.T) { - defer cluster.PanicHandler(t) var id []byte srvKeyspace := getSrvKeyspace(t, cell, keyspaceShardedName) diff --git a/go/test/endtoend/messaging/main_test.go b/go/test/endtoend/messaging/main_test.go index 49477ebe631..c654869316b 100644 --- a/go/test/endtoend/messaging/main_test.go +++ b/go/test/endtoend/messaging/main_test.go @@ -104,7 +104,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { diff --git a/go/test/endtoend/messaging/message_test.go b/go/test/endtoend/messaging/message_test.go index 7e1190c16bb..e91a8dcc335 100644 --- a/go/test/endtoend/messaging/message_test.go +++ b/go/test/endtoend/messaging/message_test.go @@ -375,7 +375,6 @@ func TestUnsharded(t *testing.T) { // TestReparenting checks the client connection count after reparenting. func TestReparenting(t *testing.T) { - defer cluster.PanicHandler(t) name := "sharded_message" ctx := context.Background() @@ -435,7 +434,6 @@ func TestReparenting(t *testing.T) { // TestConnection validate the connection count and message streaming. func TestConnection(t *testing.T) { - defer cluster.PanicHandler(t) name := "sharded_message" @@ -494,7 +492,6 @@ func TestConnection(t *testing.T) { } func testMessaging(t *testing.T, name, ks string) { - defer cluster.PanicHandler(t) ctx := context.Background() stream, err := VtgateGrpcConn(ctx, clusterInstance) require.Nil(t, err) diff --git a/go/test/endtoend/mysqlctl/mysqlctl_test.go b/go/test/endtoend/mysqlctl/mysqlctl_test.go index 6c3d65226e3..cc4e8c5dce8 100644 --- a/go/test/endtoend/mysqlctl/mysqlctl_test.go +++ b/go/test/endtoend/mysqlctl/mysqlctl_test.go @@ -40,7 +40,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -139,7 +138,6 @@ func initCluster(shardNames []string, totalTabletsRequired int) { } func TestRestart(t *testing.T) { - defer cluster.PanicHandler(t) err := primaryTablet.MysqlctlProcess.Stop() require.NoError(t, err) primaryTablet.MysqlctlProcess.CleanupFiles(primaryTablet.TabletUID) @@ -148,7 +146,6 @@ func TestRestart(t *testing.T) { } func TestAutoDetect(t *testing.T) { - defer cluster.PanicHandler(t) err := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.Setup() require.NoError(t, err) diff --git a/go/test/endtoend/mysqlctld/mysqlctld_test.go b/go/test/endtoend/mysqlctld/mysqlctld_test.go index 328bc563377..46a4d71435c 100644 --- a/go/test/endtoend/mysqlctld/mysqlctld_test.go +++ b/go/test/endtoend/mysqlctld/mysqlctld_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -141,7 +140,6 @@ func initCluster(shardNames []string, totalTabletsRequired int) error { } func TestRestart(t *testing.T) { - defer cluster.PanicHandler(t) err := primaryTablet.MysqlctldProcess.Stop() require.Nil(t, err) require.Truef(t, primaryTablet.MysqlctldProcess.WaitForMysqlCtldShutdown(), "Mysqlctld has not stopped...") @@ -151,7 +149,6 @@ func TestRestart(t *testing.T) { } func TestAutoDetect(t *testing.T) { - defer cluster.PanicHandler(t) err := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.Setup() require.Nil(t, err, "error should be nil") diff --git a/go/test/endtoend/mysqlserver/main_test.go b/go/test/endtoend/mysqlserver/main_test.go index 18b169e33d7..20da69e18e8 100644 --- a/go/test/endtoend/mysqlserver/main_test.go +++ b/go/test/endtoend/mysqlserver/main_test.go @@ -61,7 +61,6 @@ END; ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() // setting grpc max size diff --git a/go/test/endtoend/mysqlserver/mysql_server_test.go b/go/test/endtoend/mysqlserver/mysql_server_test.go index 6b691582c66..ee6e973593b 100644 --- a/go/test/endtoend/mysqlserver/mysql_server_test.go +++ b/go/test/endtoend/mysqlserver/mysql_server_test.go @@ -35,14 +35,12 @@ import ( "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" _ "github.com/go-sql-driver/mysql" ) // TestMultiStmt checks that multiStatements=True and multiStatements=False work properly. func TestMultiStatement(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() // connect database with multiStatements=True @@ -70,7 +68,6 @@ func TestMultiStatement(t *testing.T) { // TestLargeComment add large comment in insert stmt and validate the insert process. func TestLargeComment(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -89,7 +86,6 @@ func TestLargeComment(t *testing.T) { // TestInsertLargerThenGrpcLimit insert blob larger then grpc limit and verify the error. func TestInsertLargerThenGrpcLimit(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() @@ -109,7 +105,6 @@ func TestInsertLargerThenGrpcLimit(t *testing.T) { // TestTimeout executes sleep(5) with query_timeout of 1 second, and verifies the error. func TestTimeout(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -125,7 +120,6 @@ func TestTimeout(t *testing.T) { // TestInvalidField tries to fetch invalid column and verifies the error. func TestInvalidField(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -141,7 +135,6 @@ func TestInvalidField(t *testing.T) { // TestWarnings validates the behaviour of SHOW WARNINGS. func TestWarnings(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -183,7 +176,6 @@ func TestWarnings(t *testing.T) { // TestSelectWithUnauthorizedUser verifies that an unauthorized user // is not able to read from the table. func TestSelectWithUnauthorizedUser(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() tmpVtParam := vtParams @@ -202,7 +194,6 @@ func TestSelectWithUnauthorizedUser(t *testing.T) { // TestPartitionedTable validates that partitioned tables are recognized by schema engine func TestPartitionedTable(t *testing.T) { - defer cluster.PanicHandler(t) tablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() diff --git a/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go b/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go index 772a5fa6fd0..a1aec227224 100644 --- a/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go +++ b/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go @@ -121,7 +121,6 @@ const ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -200,8 +199,12 @@ func TestMain(m *testing.M) { } +<<<<<<< HEAD func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) +======= +func TestOnlineDDLFlow(t *testing.T) { +>>>>>>> bad431deed (Remove broken panic handler (#17354)) ctx := context.Background() require.NotNil(t, clusterInstance) diff --git a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go index 0efed92f440..b7a59812cd7 100644 --- a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go +++ b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go @@ -134,7 +134,6 @@ type revertibleTestCase struct { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -200,8 +199,12 @@ func TestMain(m *testing.M) { } +<<<<<<< HEAD func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) +======= +func TestRevertSchemaChanges(t *testing.T) { +>>>>>>> bad431deed (Remove broken panic handler (#17354)) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go index 4362069af66..7579146bb9e 100644 --- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go +++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go @@ -226,7 +226,6 @@ func waitForMessage(t *testing.T, uuid string, messageSubstring string) { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -307,7 +306,6 @@ func TestSchemaChange(t *testing.T) { } func testScheduler(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -1268,7 +1266,6 @@ func testScheduler(t *testing.T) { } func testSingleton(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -1503,7 +1500,6 @@ DROP TABLE IF EXISTS stress_test }) } func testDeclarative(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -2175,7 +2171,6 @@ func testDeclarative(t *testing.T) { } func testForeignKeys(t *testing.T) { - defer cluster.PanicHandler(t) var ( createStatements = []string{ diff --git a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go index e5df3051612..cab4459e993 100644 --- a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go +++ b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go @@ -156,7 +156,6 @@ const ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -220,8 +219,12 @@ func TestMain(m *testing.M) { } +<<<<<<< HEAD func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) +======= +func TestVreplSchemaChanges(t *testing.T) { +>>>>>>> bad431deed (Remove broken panic handler (#17354)) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 2, len(shards)) diff --git a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go index 770f7f3ee93..b3da2718aaf 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go @@ -160,7 +160,6 @@ func nextOpOrder() int64 { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -229,8 +228,12 @@ func TestMain(m *testing.M) { } +<<<<<<< HEAD func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) +======= +func TestVreplMiniStressSchemaChanges(t *testing.T) { +>>>>>>> bad431deed (Remove broken panic handler (#17354)) ctx := context.Background() diff --git a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go index a3fa676d40b..f4448c7c098 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go @@ -408,7 +408,6 @@ func mysqlParams() *mysql.ConnParams { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -481,8 +480,12 @@ func TestMain(m *testing.M) { } +<<<<<<< HEAD func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) +======= +func TestVreplStressSchemaChanges(t *testing.T) { +>>>>>>> bad431deed (Remove broken panic handler (#17354)) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go index 6122a71aa44..a79bb98f303 100644 --- a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go @@ -63,7 +63,6 @@ const ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -126,8 +125,12 @@ func TestMain(m *testing.M) { } +<<<<<<< HEAD func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) +======= +func TestVreplSuiteSchemaChanges(t *testing.T) { +>>>>>>> bad431deed (Remove broken panic handler (#17354)) shards := clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) diff --git a/go/test/endtoend/preparestmt/main_test.go b/go/test/endtoend/preparestmt/main_test.go index 018e9d266fd..0e067062c94 100644 --- a/go/test/endtoend/preparestmt/main_test.go +++ b/go/test/endtoend/preparestmt/main_test.go @@ -162,7 +162,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { diff --git a/go/test/endtoend/preparestmt/stmt_methods_test.go b/go/test/endtoend/preparestmt/stmt_methods_test.go index 24fb58bff81..5768c6eec7a 100644 --- a/go/test/endtoend/preparestmt/stmt_methods_test.go +++ b/go/test/endtoend/preparestmt/stmt_methods_test.go @@ -27,20 +27,16 @@ import ( "github.com/icrowley/fake" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) // TestSelect simple select the data without any condition. func TestSelect(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() selectWhere(t, dbo, "") } func TestSelectDatabase(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() prepare, err := dbo.Prepare("select database()") @@ -58,7 +54,6 @@ func TestSelectDatabase(t *testing.T) { // TestInsertUpdateDelete validates all insert, update and // delete method on prepared statements. func TestInsertUpdateDelete(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() // prepare insert statement @@ -134,7 +129,6 @@ func testReplica(t *testing.T) { // testcount validates inserted rows count with expected count. func testcount(t *testing.T, dbo *sql.DB, except int) { - defer cluster.PanicHandler(t) r, err := dbo.Query("SELECT count(1) FROM " + tableName) require.Nil(t, err) @@ -148,7 +142,6 @@ func testcount(t *testing.T, dbo *sql.DB, except int) { // TestAutoIncColumns test insertion of row without passing // the value of auto increment columns (here it is id). func TestAutoIncColumns(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() // insert a row without id @@ -227,7 +220,6 @@ func reconnectAndTest(t *testing.T) { // TestColumnParameter query database using column // parameter. func TestColumnParameter(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -267,7 +259,6 @@ func TestColumnParameter(t *testing.T) { // TestWrongTableName query database using invalid // tablename and validate error. func TestWrongTableName(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() execWithError(t, dbo, []uint16{1146}, "select * from teseting_table;") @@ -319,7 +310,6 @@ func getStringToString(x sql.NullString) string { } func TestSelectDBA(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -381,7 +371,6 @@ func TestSelectDBA(t *testing.T) { } func TestSelectLock(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -417,7 +406,6 @@ func TestSelectLock(t *testing.T) { } func TestShowColumns(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -438,7 +426,6 @@ func TestShowColumns(t *testing.T) { } func TestBinaryColumn(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t, "interpolateParams=false") defer dbo.Close() diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go index 03fcf76b07c..453177f0983 100644 --- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go +++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go @@ -126,7 +126,6 @@ var ( // - asserting that restoring to restoreTime2 (going from 2 shards to 2 shards with past time) is working, it will assert for both shards // - asserting that restoring to restoreTime3 is working, we should get complete data after restoring, as we have in existing shards. func TestPITRRecovery(t *testing.T) { - defer cluster.PanicHandler(nil) initializeCluster(t) defer clusterInstance.Teardown() @@ -525,7 +524,6 @@ func launchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, binlogServer * tablet.MysqlctlProcess = *mysqlctlProcess extraArgs := []string{"--db-credentials-file", dbCredentialFile} tablet.MysqlctlProcess.InitDBFile = initDBFileWithPassword - tablet.VttabletProcess.DbPassword = mysqlPassword tablet.MysqlctlProcess.ExtraArgs = extraArgs err = tablet.MysqlctlProcess.Start() require.NoError(t, err) @@ -545,6 +543,7 @@ func launchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, binlogServer * clusterInstance.VtTabletExtraArgs, clusterInstance.DefaultCharset) tablet.Alias = tablet.VttabletProcess.TabletPath + tablet.VttabletProcess.DbPassword = mysqlPassword tablet.VttabletProcess.SupportsBackup = true tablet.VttabletProcess.Keyspace = restoreKeyspaceName tablet.VttabletProcess.ExtraArgs = []string{ diff --git a/go/test/endtoend/recovery/unshardedrecovery/recovery.go b/go/test/endtoend/recovery/unshardedrecovery/recovery.go index 1ebb7c2647f..ae6b152271b 100644 --- a/go/test/endtoend/recovery/unshardedrecovery/recovery.go +++ b/go/test/endtoend/recovery/unshardedrecovery/recovery.go @@ -72,7 +72,6 @@ var ( // TestMainImpl creates cluster for unsharded recovery testing. func TestMainImpl(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode, err := func() (int, error) { @@ -201,7 +200,6 @@ func TestMainImpl(m *testing.M) { // // 7. check that vtgate queries work correctly func TestRecoveryImpl(t *testing.T) { - defer cluster.PanicHandler(t) defer tabletsTeardown() verifyInitialReplication(t) diff --git a/go/test/endtoend/reparent/emergencyreparent/ers_test.go b/go/test/endtoend/reparent/emergencyreparent/ers_test.go index 584bccfdfb7..0d2eb8935d2 100644 --- a/go/test/endtoend/reparent/emergencyreparent/ers_test.go +++ b/go/test/endtoend/reparent/emergencyreparent/ers_test.go @@ -31,7 +31,6 @@ import ( ) func TestTrivialERS(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -56,7 +55,6 @@ func TestTrivialERS(t *testing.T) { } func TestReparentIgnoreReplicas(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -98,7 +96,6 @@ func TestReparentIgnoreReplicas(t *testing.T) { } func TestReparentDownPrimary(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -134,7 +131,6 @@ func TestReparentDownPrimary(t *testing.T) { } func TestReparentNoChoiceDownPrimary(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -170,7 +166,6 @@ func TestReparentNoChoiceDownPrimary(t *testing.T) { func TestSemiSyncSetupCorrectly(t *testing.T) { t.Run("semi-sync enabled", func(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -198,7 +193,6 @@ func TestSemiSyncSetupCorrectly(t *testing.T) { }) t.Run("semi-sync disabled", func(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "none") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -228,7 +222,6 @@ func TestSemiSyncSetupCorrectly(t *testing.T) { // TestERSPromoteRdonly tests that we never end up promoting a rdonly instance as the primary func TestERSPromoteRdonly(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -256,7 +249,6 @@ func TestERSPromoteRdonly(t *testing.T) { // TestERSPreventCrossCellPromotion tests that we promote a replica in the same cell as the previous primary if prevent cross cell promotion flag is set func TestERSPreventCrossCellPromotion(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -279,7 +271,6 @@ func TestERSPreventCrossCellPromotion(t *testing.T) { // TestPullFromRdonly tests that if a rdonly tablet is the most advanced, then our promoted primary should have // caught up to it by pulling transactions from it func TestPullFromRdonly(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -351,7 +342,6 @@ func TestPullFromRdonly(t *testing.T) { // replicas which do not have any replication status and also succeeds if the io thread // is stopped on the primary elect. func TestNoReplicationStatusAndIOThreadStopped(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -451,7 +441,6 @@ func TestERSForInitialization(t *testing.T) { } func TestRecoverWithMultipleFailures(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -479,7 +468,6 @@ func TestRecoverWithMultipleFailures(t *testing.T) { // TestERSFailFast tests that ERS will fail fast if it cannot find any tablet which can be safely promoted instead of promoting // a tablet and hanging while inserting a row in the reparent journal on getting semi-sync ACKs func TestERSFailFast(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -519,7 +507,6 @@ func TestERSFailFast(t *testing.T) { // TestReplicationStopped checks that ERS ignores the tablets that have sql thread stopped. // If there are more than 1, we also fail. func TestReplicationStopped(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets diff --git a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go index fb1aa0e49e6..034acb0f529 100644 --- a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go +++ b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go @@ -36,7 +36,6 @@ import ( // The test takes down the vttablets of the primary and a rdonly tablet and runs ERS with the // default values of remote_operation_timeout, lock-timeout flags and wait_replicas_timeout subflag. func TestRecoverWithMultipleVttabletFailures(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -68,7 +67,6 @@ func TestRecoverWithMultipleVttabletFailures(t *testing.T) { // and ERS succeeds. func TestSingleReplicaERS(t *testing.T) { // Set up a cluster with none durability policy - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "none") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -104,7 +102,6 @@ func TestSingleReplicaERS(t *testing.T) { // TestTabletRestart tests that a running tablet can be restarted and everything is still fine func TestTabletRestart(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -117,7 +114,6 @@ func TestTabletRestart(t *testing.T) { // Tests ensures that ChangeTabletType works even when semi-sync plugins are not loaded. func TestChangeTypeWithoutSemiSync(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "none") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -163,7 +159,6 @@ func TestChangeTypeWithoutSemiSync(t *testing.T) { // TestERSWithWriteInPromoteReplica tests that ERS doesn't fail even if there is a // write that happens when PromoteReplica is called. func TestERSWithWriteInPromoteReplica(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -181,8 +176,12 @@ func TestERSWithWriteInPromoteReplica(t *testing.T) { } func TestBufferingWithMultipleDisruptions(t *testing.T) { +<<<<<<< HEAD defer cluster.PanicHandler(t) clusterInstance := utils.SetupShardedReparentCluster(t) +======= + clusterInstance := utils.SetupShardedReparentCluster(t, "semi_sync") +>>>>>>> bad431deed (Remove broken panic handler (#17354)) defer utils.TeardownCluster(clusterInstance) // Stop all VTOrc instances, so that they don't interfere with the test. diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go index 2d89893569d..91471b1cebb 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go @@ -28,7 +28,6 @@ import ( ) func TestReparentGracefulRangeBased(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() utils.ShardName = "0000000000000000-ffffffffffffffff" diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go index e7fc7029ef0..0e564f02223 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go @@ -36,7 +36,6 @@ import ( ) func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -48,7 +47,6 @@ func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { } func TestReparentCrossCell(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -62,7 +60,6 @@ func TestReparentCrossCell(t *testing.T) { } func TestReparentGraceful(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -85,7 +82,6 @@ func TestReparentGraceful(t *testing.T) { // TestPRSWithDrainedLaggingTablet tests that PRS succeeds even if we have a lagging drained tablet func TestPRSWithDrainedLaggingTablet(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -112,7 +108,6 @@ func TestPRSWithDrainedLaggingTablet(t *testing.T) { } func TestReparentReplicaOffline(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -136,7 +131,6 @@ func TestReparentReplicaOffline(t *testing.T) { } func TestReparentAvoid(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -172,14 +166,12 @@ func TestReparentAvoid(t *testing.T) { } func TestReparentFromOutside(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) reparentFromOutside(t, clusterInstance, false) } func TestReparentFromOutsideWithNoPrimary(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -279,7 +271,6 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus } func TestReparentWithDownReplica(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -333,7 +324,6 @@ func TestReparentWithDownReplica(t *testing.T) { } func TestChangeTypeSemiSync(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -400,7 +390,6 @@ func TestChangeTypeSemiSync(t *testing.T) { // 1. When PRS is run with the cross_cell durability policy setup, then the semi-sync settings on all the tablets are as expected // 2. Bringing up a new vttablet should have its replication and semi-sync setup correctly without any manual intervention func TestCrossCellDurability(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "cross_cell") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -440,7 +429,6 @@ func TestCrossCellDurability(t *testing.T) { // TestFullStatus tests that the RPC FullStatus works as intended. func TestFullStatus(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets diff --git a/go/test/endtoend/reparent/prscomplex/main_test.go b/go/test/endtoend/reparent/prscomplex/main_test.go index 88e3d6c09fa..c2dafb8589f 100644 --- a/go/test/endtoend/reparent/prscomplex/main_test.go +++ b/go/test/endtoend/reparent/prscomplex/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/reparent/prssettingspool/main_test.go b/go/test/endtoend/reparent/prssettingspool/main_test.go index 872f1867c77..66d02e06c7e 100644 --- a/go/test/endtoend/reparent/prssettingspool/main_test.go +++ b/go/test/endtoend/reparent/prssettingspool/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/reparent/semisync/semi_sync_test.go b/go/test/endtoend/reparent/semisync/semi_sync_test.go index b0cb9cc1aec..5b798048e3c 100644 --- a/go/test/endtoend/reparent/semisync/semi_sync_test.go +++ b/go/test/endtoend/reparent/semisync/semi_sync_test.go @@ -28,7 +28,15 @@ import ( ) func TestSemiSyncUpgradeDowngrade(t *testing.T) { +<<<<<<< HEAD defer cluster.PanicHandler(t) +======= + ver, err := cluster.GetMajorVersion("vtgate") + require.NoError(t, err) + if ver != 21 { + t.Skip("We only want to run this test for v21 release") + } +>>>>>>> bad431deed (Remove broken panic handler (#17354)) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets diff --git a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go index 79cb4a0174e..c6fff7d21f0 100644 --- a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go +++ b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go @@ -69,7 +69,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -132,8 +131,12 @@ func TestMain(m *testing.M) { } +<<<<<<< HEAD func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) +======= +func TestSchemadiffSchemaChanges(t *testing.T) { +>>>>>>> bad431deed (Remove broken panic handler (#17354)) shards := clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -276,7 +279,6 @@ func testSingle(t *testing.T, testName string) { } // func TestRandomSchemaChanges(t *testing.T) { -// defer cluster.PanicHandler(t) // hints := &schemadiff.DiffHints{AutoIncrementStrategy: schemadiff.AutoIncrementIgnore} // // count := 20 diff --git a/go/test/endtoend/sharded/sharded_keyspace_test.go b/go/test/endtoend/sharded/sharded_keyspace_test.go index f311404ad7e..8ca6d644a61 100644 --- a/go/test/endtoend/sharded/sharded_keyspace_test.go +++ b/go/test/endtoend/sharded/sharded_keyspace_test.go @@ -73,7 +73,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -102,7 +101,6 @@ func TestMain(m *testing.M) { } func TestShardedKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) shard1 := clusterInstance.Keyspaces[0].Shards[0] shard2 := clusterInstance.Keyspaces[0].Shards[1] diff --git a/go/test/endtoend/stress/stress_test.go b/go/test/endtoend/stress/stress_test.go index 30a5ee69c1a..1bf716274d4 100644 --- a/go/test/endtoend/stress/stress_test.go +++ b/go/test/endtoend/stress/stress_test.go @@ -42,7 +42,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -84,7 +83,6 @@ func TestMain(m *testing.M) { // The stressor is started on its own goroutine while the end-to-end test // is executed on the same cluster. func TestSimpleStressTest(t *testing.T) { - defer cluster.PanicHandler(t) cfg := stress.DefaultConfig cfg.ConnParams = &vtParams diff --git a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go index ca4fe5f6094..920e2193453 100644 --- a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go +++ b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go @@ -272,7 +272,6 @@ type BufferingTest struct { } func (bt *BufferingTest) Test(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance, exitCode := bt.createCluster() if exitCode != 0 { t.Fatal("failed to start cluster") diff --git a/go/test/endtoend/tabletgateway/main_test.go b/go/test/endtoend/tabletgateway/main_test.go index 354be6969d3..cf179c49845 100644 --- a/go/test/endtoend/tabletgateway/main_test.go +++ b/go/test/endtoend/tabletgateway/main_test.go @@ -61,7 +61,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/tabletgateway/vtgate_test.go b/go/test/endtoend/tabletgateway/vtgate_test.go index 1f4f8758e16..d9c87fdc7f3 100644 --- a/go/test/endtoend/tabletgateway/vtgate_test.go +++ b/go/test/endtoend/tabletgateway/vtgate_test.go @@ -40,7 +40,6 @@ import ( ) func TestVtgateHealthCheck(t *testing.T) { - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) @@ -54,7 +53,6 @@ func TestVtgateHealthCheck(t *testing.T) { } func TestVtgateReplicationStatusCheck(t *testing.T) { - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) @@ -104,7 +102,6 @@ func TestVtgateReplicationStatusCheck(t *testing.T) { } func TestVtgateReplicationStatusCheckWithTabletTypeChange(t *testing.T) { - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) @@ -180,7 +177,6 @@ func retryNTimes(t *testing.T, maxRetries int, f func() bool) { func TestReplicaTransactions(t *testing.T) { // TODO(deepthi): this test seems to depend on previous test. Fix tearDown so that tests are independent - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) ctx := context.Background() @@ -287,7 +283,6 @@ func TestReplicaTransactions(t *testing.T) { // TestStreamingRPCStuck tests that StreamExecute calls don't get stuck on the vttablets if a client stop reading from a stream. func TestStreamingRPCStuck(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtConn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/tabletmanager/commands_test.go b/go/test/endtoend/tabletmanager/commands_test.go index 970e89c7037..52ba9cbd97f 100644 --- a/go/test/endtoend/tabletmanager/commands_test.go +++ b/go/test/endtoend/tabletmanager/commands_test.go @@ -29,7 +29,6 @@ import ( "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" @@ -44,7 +43,6 @@ var ( // TabletCommands tests the basic tablet commands func TestTabletCommands(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -167,7 +165,6 @@ func assertExecuteMultiFetch(t *testing.T, qr string) { func TestHook(t *testing.T) { // test a regular program works - defer cluster.PanicHandler(t) runHookAndAssert(t, []string{ "ExecuteHook", primaryTablet.Alias, "test.sh", "--", "--flag1", "--param1=hello"}, 0, false, "") @@ -208,7 +205,6 @@ func runHookAndAssert(t *testing.T, params []string, expectedStatus int64, expec func TestShardReplicationFix(t *testing.T) { // make sure the replica is in the replication graph, 2 nodes: 1 primary, 1 replica - defer cluster.PanicHandler(t) result, err := clusterInstance.VtctldClientProcess.GetShardReplication(keyspaceName, shardName, cell) require.Nil(t, err, "error should be Nil") require.NotNil(t, result[cell], "result should not be Nil") @@ -232,7 +228,6 @@ func TestShardReplicationFix(t *testing.T) { } func TestGetSchema(t *testing.T) { - defer cluster.PanicHandler(t) res, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetSchema", "--include-views", "--tables", "t1,v1", diff --git a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go index 0c6e056af36..e692bf94de4 100644 --- a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go +++ b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go @@ -33,7 +33,6 @@ import ( func TestTopoCustomRule(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) require.NoError(t, err) diff --git a/go/test/endtoend/tabletmanager/lock_unlock_test.go b/go/test/endtoend/tabletmanager/lock_unlock_test.go index 79286438698..f636f52c353 100644 --- a/go/test/endtoend/tabletmanager/lock_unlock_test.go +++ b/go/test/endtoend/tabletmanager/lock_unlock_test.go @@ -30,12 +30,10 @@ import ( "github.com/stretchr/testify/assert" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) // TestLockAndUnlock tests the lock ability by locking a replica and asserting it does not see changes func TestLockAndUnlock(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -76,7 +74,6 @@ func TestLockAndUnlock(t *testing.T) { // TestStartReplicationUntilAfter tests by writing three rows, noting the gtid after each, and then replaying them one by one func TestStartReplicationUntilAfter(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -130,7 +127,6 @@ func TestStartReplicationUntilAfter(t *testing.T) { // TestLockAndTimeout tests that the lock times out and updates can be seen after timeout func TestLockAndTimeout(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() primaryConn, err := mysql.Connect(ctx, &primaryTabletParams) diff --git a/go/test/endtoend/tabletmanager/main_test.go b/go/test/endtoend/tabletmanager/main_test.go index 52feaec0e17..eeace77972f 100644 --- a/go/test/endtoend/tabletmanager/main_test.go +++ b/go/test/endtoend/tabletmanager/main_test.go @@ -79,7 +79,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/tabletmanager/primary/tablet_test.go b/go/test/endtoend/tabletmanager/primary/tablet_test.go index 297e5540fac..aaff0ff00a0 100644 --- a/go/test/endtoend/tabletmanager/primary/tablet_test.go +++ b/go/test/endtoend/tabletmanager/primary/tablet_test.go @@ -69,7 +69,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -116,7 +115,6 @@ func TestMain(m *testing.M) { } func TestRepeatedInitShardPrimary(t *testing.T) { - defer cluster.PanicHandler(t) // Test that using InitShardPrimary can go back and forth between 2 hosts. // Make replica tablet as primary @@ -155,7 +153,6 @@ func TestRepeatedInitShardPrimary(t *testing.T) { } func TestPrimaryRestartSetsPTSTimestamp(t *testing.T) { - defer cluster.PanicHandler(t) // Test that PTS timestamp is set when we restart the PRIMARY vttablet. // PTS = PrimaryTermStart. // See StreamHealthResponse.primary_term_start_timestamp for details. diff --git a/go/test/endtoend/tabletmanager/qps_test.go b/go/test/endtoend/tabletmanager/qps_test.go index 0611feada12..0ce41f04a63 100644 --- a/go/test/endtoend/tabletmanager/qps_test.go +++ b/go/test/endtoend/tabletmanager/qps_test.go @@ -24,12 +24,10 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) func TestQPS(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ diff --git a/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go b/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go index 86b02244762..6620657c264 100644 --- a/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go +++ b/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go @@ -73,7 +73,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go index 685c361cef7..b1abec3a6b7 100644 --- a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go +++ b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go @@ -83,7 +83,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/tabletmanager/tablet_health_test.go b/go/test/endtoend/tabletmanager/tablet_health_test.go index bf3747fde29..061528682d2 100644 --- a/go/test/endtoend/tabletmanager/tablet_health_test.go +++ b/go/test/endtoend/tabletmanager/tablet_health_test.go @@ -39,7 +39,6 @@ import ( // TabletReshuffle test if a vttablet can be pointed at an existing mysql func TestTabletReshuffle(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -92,7 +91,6 @@ func TestTabletReshuffle(t *testing.T) { func TestHealthCheck(t *testing.T) { // Add one replica that starts not initialized - defer cluster.PanicHandler(t) ctx := context.Background() clusterInstance.DisableVTOrcRecoveries(t) defer clusterInstance.EnableVTOrcRecoveries(t) @@ -200,7 +198,6 @@ func TestHealthCheck(t *testing.T) { // TestHealthCheckSchemaChangeSignal tests the tables and views, which report their schemas have changed in the output of a StreamHealth. func TestHealthCheckSchemaChangeSignal(t *testing.T) { // Add one replica that starts not initialized - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(keyspaceName) @@ -381,7 +378,6 @@ func TestHealthCheckDrainedStateDoesNotShutdownQueryService(t *testing.T) { // - the query service won't be shutdown // Wait if tablet is not in service state - defer cluster.PanicHandler(t) clusterInstance.DisableVTOrcRecoveries(t) defer clusterInstance.EnableVTOrcRecoveries(t) err := rdonlyTablet.VttabletProcess.WaitForTabletStatus("SERVING") diff --git a/go/test/endtoend/tabletmanager/tablet_security_policy_test.go b/go/test/endtoend/tabletmanager/tablet_security_policy_test.go index b3b11405abb..90397a737ef 100644 --- a/go/test/endtoend/tabletmanager/tablet_security_policy_test.go +++ b/go/test/endtoend/tabletmanager/tablet_security_policy_test.go @@ -29,7 +29,6 @@ import ( ) func TestFallbackSecurityPolicy(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() mTablet := clusterInstance.NewVttabletInstance("replica", 0, "") @@ -84,7 +83,6 @@ func assertAllowedURLTest(t *testing.T, url string) { } func TestDenyAllSecurityPolicy(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() mTablet := clusterInstance.NewVttabletInstance("replica", 0, "") @@ -116,7 +114,6 @@ func TestDenyAllSecurityPolicy(t *testing.T) { } func TestReadOnlySecurityPolicy(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() mTablet := clusterInstance.NewVttabletInstance("replica", 0, "") diff --git a/go/test/endtoend/tabletmanager/tablet_test.go b/go/test/endtoend/tabletmanager/tablet_test.go index 1d8e897a4d2..2ded055230f 100644 --- a/go/test/endtoend/tabletmanager/tablet_test.go +++ b/go/test/endtoend/tabletmanager/tablet_test.go @@ -31,7 +31,6 @@ import ( // TestEnsureDB tests that vttablet creates the db as needed func TestEnsureDB(t *testing.T) { - defer cluster.PanicHandler(t) // Create new tablet tablet := clusterInstance.NewVttabletInstance("replica", 0, "") @@ -67,7 +66,6 @@ func TestEnsureDB(t *testing.T) { // TestResetReplicationParameters tests that the RPC ResetReplicationParameters works as intended. func TestResetReplicationParameters(t *testing.T) { - defer cluster.PanicHandler(t) // Create new tablet tablet := clusterInstance.NewVttabletInstance("replica", 0, "") diff --git a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go index c8251846fe6..512a3051c02 100644 --- a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go +++ b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go @@ -98,7 +98,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -253,7 +252,6 @@ func vtgateExec(t *testing.T, query string, expectError string) *sqltypes.Result } func TestInitialThrottler(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("validating OK response from disabled throttler", func(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) @@ -360,7 +358,6 @@ func TestInitialThrottler(t *testing.T) { } func TestThrottleViaApplySchema(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("throttling via ApplySchema", func(t *testing.T) { vtctlParams := &cluster.ApplySchemaParams{DDLStrategy: "online"} _, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput( @@ -406,7 +403,6 @@ func TestThrottleViaApplySchema(t *testing.T) { } func TestThrottlerAfterMetricsCollected(t *testing.T) { - defer cluster.PanicHandler(t) // By this time metrics will have been collected. We expect no lag, and something like: // {"StatusCode":200,"Value":0.282278,"Threshold":1,"Message":""} @@ -435,7 +431,6 @@ func TestThrottlerAfterMetricsCollected(t *testing.T) { } func TestLag(t *testing.T) { - defer cluster.PanicHandler(t) // Temporarily disable VTOrc recoveries because we want to // STOP replication specifically in order to increase the // lag and we DO NOT want VTOrc to try and fix this. @@ -519,7 +514,6 @@ func TestLag(t *testing.T) { } func TestNoReplicas(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("changing replica to RDONLY", func(t *testing.T) { err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "RDONLY") assert.NoError(t, err) @@ -537,7 +531,6 @@ func TestNoReplicas(t *testing.T) { } func TestCustomQuery(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("enabling throttler with custom query and threshold", func(t *testing.T) { _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, customThreshold, customQuery, nil) @@ -604,7 +597,6 @@ func TestCustomQuery(t *testing.T) { } func TestRestoreDefaultQuery(t *testing.T) { - defer cluster.PanicHandler(t) // Validate going back from custom-query to default-query (replication lag) still works. t.Run("enabling throttler with default query and threshold", func(t *testing.T) { diff --git a/go/test/endtoend/topoconncache/main_test.go b/go/test/endtoend/topoconncache/main_test.go index 26eb3918a0b..074bf875165 100644 --- a/go/test/endtoend/topoconncache/main_test.go +++ b/go/test/endtoend/topoconncache/main_test.go @@ -97,7 +97,6 @@ Topology: We create a keyspace with two shards , having 3 tablets each. Primarie to 'zone1' and replicas/rdonly belongs to cell2. */ func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { diff --git a/go/test/endtoend/topoconncache/topo_conn_cache_test.go b/go/test/endtoend/topoconncache/topo_conn_cache_test.go index 082ecc5717f..f676af318cd 100644 --- a/go/test/endtoend/topoconncache/topo_conn_cache_test.go +++ b/go/test/endtoend/topoconncache/topo_conn_cache_test.go @@ -37,7 +37,6 @@ import ( 4. 'ListAllTablets' should return all the new tablets. */ func TestVtctldListAllTablets(t *testing.T) { - defer cluster.PanicHandler(t) url := fmt.Sprintf("http://%s:%d/api/keyspaces/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort) testURL(t, url, "keyspace url") diff --git a/go/test/endtoend/topotest/consul/main_test.go b/go/test/endtoend/topotest/consul/main_test.go index 1c278864ced..69ecf3d0f8a 100644 --- a/go/test/endtoend/topotest/consul/main_test.go +++ b/go/test/endtoend/topotest/consul/main_test.go @@ -61,7 +61,6 @@ CREATE TABLE t1 ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -97,7 +96,6 @@ func TestMain(m *testing.M) { } func TestTopoRestart(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/topotest/etcd2/main_test.go b/go/test/endtoend/topotest/etcd2/main_test.go index db34bd2ee86..fa580cb2749 100644 --- a/go/test/endtoend/topotest/etcd2/main_test.go +++ b/go/test/endtoend/topotest/etcd2/main_test.go @@ -61,7 +61,6 @@ CREATE TABLE t1 ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -96,7 +95,6 @@ func TestMain(m *testing.M) { } func TestTopoDownServingQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/topotest/zk2/main_test.go b/go/test/endtoend/topotest/zk2/main_test.go index 816bbc72d72..1fc3374c888 100644 --- a/go/test/endtoend/topotest/zk2/main_test.go +++ b/go/test/endtoend/topotest/zk2/main_test.go @@ -61,7 +61,6 @@ CREATE TABLE t1 ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -97,7 +96,6 @@ func TestMain(m *testing.M) { } func TestTopoDownServingQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/transaction/twopc/fuzz/main_test.go b/go/test/endtoend/transaction/twopc/fuzz/main_test.go new file mode 100644 index 00000000000..4d168fbdde0 --- /dev/null +++ b/go/test/endtoend/transaction/twopc/fuzz/main_test.go @@ -0,0 +1,128 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fuzz + +import ( + "context" + _ "embed" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + vtgateGrpcAddress string + keyspaceName = "ks" + unshardedKeyspaceName = "uks" + cell = "zone1" + hostname = "localhost" + + //go:embed schema.sql + SchemaSQL string + + //go:embed vschema.json + VSchema string +) + +func TestMain(m *testing.M) { + flag.Parse() + + exitcode := func() int { + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + if err := clusterInstance.StartTopo(); err != nil { + return 1 + } + + // Reserve vtGate port in order to pass it to vtTablet + clusterInstance.VtgateGrpcPort = clusterInstance.GetAndReservePort() + + // Set extra args for twopc + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--transaction_mode", "TWOPC", + "--grpc_use_effective_callerid", + "--tablet_refresh_interval", "2s", + ) + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, + "--twopc_abandon_age", "1", + "--migration_check_interval", "2s", + ) + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: SchemaSQL, + VSchema: VSchema, + DurabilityPolicy: "semi_sync", + } + if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { + return 1 + } + + // Start an unsharded keyspace + unshardedKeyspace := &cluster.Keyspace{ + Name: unshardedKeyspaceName, + SchemaSQL: "", + VSchema: "{}", + DurabilityPolicy: "semi_sync", + } + if err := clusterInstance.StartUnshardedKeyspace(*unshardedKeyspace, 2, false); err != nil { + return 1 + } + + // Start Vtgate + if err := clusterInstance.StartVtgate(); err != nil { + return 1 + } + vtParams = clusterInstance.GetVTParams("") + vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort) + + return m.Run() + }() + os.Exit(exitcode) +} + +func start(t *testing.T) (*mysql.Conn, func()) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + cleanup(t) + + return conn, func() { + conn.Close() + cleanup(t) + } +} + +func cleanup(t *testing.T) { + + utils.ClearOutTable(t, vtParams, "twopc_fuzzer_insert") + utils.ClearOutTable(t, vtParams, "twopc_fuzzer_update") + utils.ClearOutTable(t, vtParams, "twopc_fuzzer_multi") + utils.ClearOutTable(t, vtParams, "twopc_t1") +} diff --git a/go/test/endtoend/transaction/twopc/main_test.go b/go/test/endtoend/transaction/twopc/main_test.go new file mode 100644 index 00000000000..6d09c174a4d --- /dev/null +++ b/go/test/endtoend/transaction/twopc/main_test.go @@ -0,0 +1,347 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transaction + +import ( + "context" + _ "embed" + "encoding/json" + "flag" + "fmt" + "io" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/utils" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" + twopcutil "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtgate/vtgateconn" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + mysqlParams mysql.ConnParams + vtParams mysql.ConnParams + vtgateGrpcAddress string + keyspaceName = "ks" + cell = "zone1" + hostname = "localhost" + sidecarDBName = "vt_ks" + + //go:embed schema.sql + SchemaSQL string + + //go:embed vschema.json + VSchema string +) + +func TestMain(m *testing.M) { + flag.Parse() + + exitcode := func() int { + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + if err := clusterInstance.StartTopo(); err != nil { + return 1 + } + + // Reserve vtGate port in order to pass it to vtTablet + clusterInstance.VtgateGrpcPort = clusterInstance.GetAndReservePort() + + // Set extra args for twopc + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--transaction_mode", "TWOPC", + "--grpc_use_effective_callerid", + ) + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, + "--twopc_abandon_age", "1", + "--queryserver-config-transaction-cap", "3", + "--queryserver-config-transaction-timeout", "400s", + "--queryserver-config-query-timeout", "9000s", + ) + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: SchemaSQL, + VSchema: VSchema, + SidecarDBName: sidecarDBName, + DurabilityPolicy: "semi_sync", + } + if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { + return 1 + } + + // Start Vtgate + if err := clusterInstance.StartVtgate(); err != nil { + return 1 + } + vtParams = clusterInstance.GetVTParams(keyspaceName) + vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort) + + clusterInstance.NewVTAdminProcess() + if err := clusterInstance.VtadminProcess.Setup(); err != nil { + return 1 + } + + // create mysql instance and connection parameters + conn, closer, err := utils.NewMySQL(clusterInstance, keyspaceName, SchemaSQL) + if err != nil { + fmt.Println(err) + return 1 + } + defer closer() + mysqlParams = conn + + return m.Run() + }() + os.Exit(exitcode) +} + +func start(t *testing.T) (*mysql.Conn, func()) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + cleanup(t) + + return conn, func() { + conn.Close() + cleanup(t) + } +} + +func cleanup(t *testing.T) { + twopcutil.ClearOutTable(t, vtParams, "twopc_user") + twopcutil.ClearOutTable(t, vtParams, "twopc_t1") + twopcutil.ClearOutTable(t, vtParams, "twopc_lookup") + twopcutil.ClearOutTable(t, vtParams, "lookup_unique") + twopcutil.ClearOutTable(t, vtParams, "lookup") + sm.reset() +} + +func startWithMySQL(t *testing.T) (utils.MySQLCompare, func()) { + mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) + require.NoError(t, err) + + deleteAll := func() { + tables := []string{"twopc_user"} + for _, table := range tables { + _, _ = mcmp.ExecAndIgnore("delete from " + table) + } + } + + deleteAll() + + return mcmp, func() { + deleteAll() + mcmp.Close() + } +} + +type extractInterestingValues func(dtidMap map[string]string, vals []sqltypes.Value) []sqltypes.Value + +var tables = map[string]extractInterestingValues{ + "ks.dt_state": func(dtidMap map[string]string, vals []sqltypes.Value) (out []sqltypes.Value) { + dtid := getDTID(dtidMap, vals[0].ToString()) + dtState := getDTState(vals[1]) + out = append(out, sqltypes.NewVarChar(dtid), sqltypes.NewVarChar(dtState.String())) + return + }, + "ks.dt_participant": func(dtidMap map[string]string, vals []sqltypes.Value) (out []sqltypes.Value) { + dtid := getDTID(dtidMap, vals[0].ToString()) + out = append([]sqltypes.Value{sqltypes.NewVarChar(dtid)}, vals[1:]...) + return + }, + "ks.redo_state": func(dtidMap map[string]string, vals []sqltypes.Value) (out []sqltypes.Value) { + dtid := getDTID(dtidMap, vals[0].ToString()) + dtState := getDTState(vals[1]) + out = append(out, sqltypes.NewVarChar(dtid), sqltypes.NewVarChar(dtState.String())) + return + }, + "ks.redo_statement": func(dtidMap map[string]string, vals []sqltypes.Value) (out []sqltypes.Value) { + dtid := getDTID(dtidMap, vals[0].ToString()) + stmt := getStatement(vals[2].ToString()) + out = append([]sqltypes.Value{sqltypes.NewVarChar(dtid)}, vals[1], sqltypes.TestValue(sqltypes.Blob, stmt)) + return + }, + "ks.twopc_user": func(_ map[string]string, vals []sqltypes.Value) []sqltypes.Value { return vals }, +} + +func getDTState(val sqltypes.Value) querypb.TransactionState { + s, _ := val.ToInt() + return querypb.TransactionState(s) +} + +func getDTID(dtidMap map[string]string, dtKey string) string { + dtid, exists := dtidMap[dtKey] + if !exists { + dtid = fmt.Sprintf("dtid-%d", len(dtidMap)+1) + dtidMap[dtKey] = dtid + } + return dtid +} + +func getStatement(stmt string) string { + var sKey string + var prefix string + switch { + case strings.HasPrefix(stmt, "savepoint"): + prefix = "savepoint-" + sKey = stmt[9:] + case strings.HasPrefix(stmt, "rollback to"): + prefix = "rollback-" + sKey = stmt[11:] + default: + return stmt + } + + sid, exists := sm.stmt[sKey] + if !exists { + sid = fmt.Sprintf("%d", len(sm.stmt)+1) + sm.stmt[sKey] = sid + } + return prefix + sid +} + +func runVStream(t *testing.T, ctx context.Context, ch chan *binlogdatapb.VEvent, vtgateConn *vtgateconn.VTGateConn) { + shards := []string{"-40", "40-80", "80-"} + shardGtids := make([]*binlogdatapb.ShardGtid, 0, len(shards)) + var seen = make(map[string]bool, len(shards)) + var wg sync.WaitGroup + for _, shard := range shards { + shardGtids = append(shardGtids, &binlogdatapb.ShardGtid{Keyspace: keyspaceName, Shard: shard, Gtid: "current"}) + seen[shard] = false + wg.Add(1) + } + vgtid := &binlogdatapb.VGtid{ShardGtids: shardGtids} + filter := &binlogdatapb.Filter{Rules: []*binlogdatapb.Rule{{Match: "/.*/"}}} + + vReader, err := vtgateConn.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, filter, nil) + require.NoError(t, err) + + go func() { + for { + evs, err := vReader.Recv() + if err == io.EOF || ctx.Err() != nil { + return + } + require.NoError(t, err) + + for _, ev := range evs { + // Mark VGTID event from each shard seen. + if ev.Type == binlogdatapb.VEventType_VGTID { + if !seen[ev.Shard] { + seen[ev.Shard] = true + wg.Done() + } + } + if ev.Type == binlogdatapb.VEventType_ROW || ev.Type == binlogdatapb.VEventType_FIELD { + ch <- ev + } + } + } + }() + + // Wait for VGTID event from all shards + wg.Wait() +} + +func retrieveTransitions(t *testing.T, ch chan *binlogdatapb.VEvent, tableMap map[string][]*querypb.Field, dtMap map[string]string) map[string][]string { + return retrieveTransitionsWithTimeout(t, ch, tableMap, dtMap, 1*time.Second) +} + +func retrieveTransitionsWithTimeout(t *testing.T, ch chan *binlogdatapb.VEvent, tableMap map[string][]*querypb.Field, dtMap map[string]string, timeout time.Duration) map[string][]string { + logTable := make(map[string][]string) + + keepWaiting := true + for keepWaiting { + select { + case re := <-ch: + if re.RowEvent != nil { + shard := re.RowEvent.Shard + tableName := re.RowEvent.TableName + fields, ok := tableMap[tableName] + require.Truef(t, ok, "table %s not found in fields map", tableName) + for _, rc := range re.RowEvent.RowChanges { + logEvent(logTable, dtMap, shard, tableName, fields, rc) + } + } + if re.FieldEvent != nil { + tableMap[re.FieldEvent.TableName] = re.FieldEvent.Fields + } + case <-time.After(timeout): + keepWaiting = false + } + } + return logTable +} + +func logEvent(logTable map[string][]string, dtMap map[string]string, shard string, tbl string, fields []*querypb.Field, rc *binlogdatapb.RowChange) { + key := fmt.Sprintf("%s:%s", tbl, shard) + + var eventType string + var vals []sqltypes.Value + switch { + case rc.Before == nil && rc.After == nil: + panic("do not expect row event with both before and after nil") + case rc.Before == nil: + eventType = "insert" + vals = sqltypes.MakeRowTrusted(fields, rc.After) + case rc.After == nil: + eventType = "delete" + vals = sqltypes.MakeRowTrusted(fields, rc.Before) + default: + eventType = "update" + vals = sqltypes.MakeRowTrusted(fields, rc.After) + } + execFunc, exists := tables[tbl] + if exists { + vals = execFunc(dtMap, vals) + } + logTable[key] = append(logTable[key], fmt.Sprintf("%s:%v", eventType, vals)) +} + +func prettyPrint(v interface{}) string { + b, err := json.MarshalIndent(v, "", " ") + if err != nil { + return fmt.Sprintf("got error marshalling: %v", err) + } + return string(b) +} + +type stmtMapper struct { + stmt map[string]string +} + +var sm = &stmtMapper{stmt: make(map[string]string)} + +func (sm *stmtMapper) reset() { + sm.stmt = make(map[string]string) +} diff --git a/go/test/endtoend/transaction/twopc/metric/main_test.go b/go/test/endtoend/transaction/twopc/metric/main_test.go new file mode 100644 index 00000000000..61a43017ef9 --- /dev/null +++ b/go/test/endtoend/transaction/twopc/metric/main_test.go @@ -0,0 +1,114 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transaction + +import ( + "context" + _ "embed" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + twopcutil "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + vtgateGrpcAddress string + keyspaceName = "ks" + cell = "zone1" + hostname = "localhost" + sidecarDBName = "vt_ks" + + //go:embed schema.sql + SchemaSQL string + + //go:embed vschema.json + VSchema string +) + +func TestMain(m *testing.M) { + flag.Parse() + + exitcode := func() int { + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + if err := clusterInstance.StartTopo(); err != nil { + return 1 + } + + // Reserve vtGate port in order to pass it to vtTablet + clusterInstance.VtgateGrpcPort = clusterInstance.GetAndReservePort() + + // Set extra args for twopc + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--transaction_mode", "TWOPC", + "--grpc_use_effective_callerid", + ) + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, + "--twopc_abandon_age", "1", + "--queryserver-config-transaction-cap", "100", + ) + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: SchemaSQL, + VSchema: VSchema, + SidecarDBName: sidecarDBName, + DurabilityPolicy: "semi_sync", + } + if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { + return 1 + } + + // Start Vtgate + if err := clusterInstance.StartVtgate(); err != nil { + return 1 + } + vtParams = clusterInstance.GetVTParams(keyspaceName) + vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort) + + return m.Run() + }() + os.Exit(exitcode) +} + +func start(t *testing.T) (*mysql.Conn, func()) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + cleanup(t) + + return conn, func() { + conn.Close() + cleanup(t) + } +} + +func cleanup(t *testing.T) { + twopcutil.ClearOutTable(t, vtParams, "twopc_user") + twopcutil.ClearOutTable(t, vtParams, "twopc_t1") +} diff --git a/go/test/endtoend/transaction/twopc/stress/main_test.go b/go/test/endtoend/transaction/twopc/stress/main_test.go new file mode 100644 index 00000000000..4da4f86bdff --- /dev/null +++ b/go/test/endtoend/transaction/twopc/stress/main_test.go @@ -0,0 +1,127 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stress + +import ( + "context" + _ "embed" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + vtgateGrpcAddress string + keyspaceName = "ks" + unshardedKeyspaceName = "uks" + cell = "zone1" + hostname = "localhost" + + //go:embed schema.sql + SchemaSQL string + + //go:embed vschema.json + VSchema string +) + +func TestMain(m *testing.M) { + flag.Parse() + + exitcode := func() int { + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + if err := clusterInstance.StartTopo(); err != nil { + return 1 + } + + // Reserve vtGate port in order to pass it to vtTablet + clusterInstance.VtgateGrpcPort = clusterInstance.GetAndReservePort() + + // Set extra args for twopc + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--transaction_mode", "TWOPC", + "--grpc_use_effective_callerid", + "--tablet_refresh_interval", "2s", + ) + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, + "--twopc_abandon_age", "1", + "--migration_check_interval", "2s", + "--onterm_timeout", "1s", + "--onclose_timeout", "1s", + ) + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: SchemaSQL, + VSchema: VSchema, + DurabilityPolicy: "semi_sync", + } + if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { + return 1 + } + + // Start an unsharded keyspace + unshardedKeyspace := &cluster.Keyspace{ + Name: unshardedKeyspaceName, + SchemaSQL: "", + VSchema: "{}", + DurabilityPolicy: "semi_sync", + } + if err := clusterInstance.StartUnshardedKeyspace(*unshardedKeyspace, 2, false); err != nil { + return 1 + } + + // Start Vtgate + if err := clusterInstance.StartVtgate(); err != nil { + return 1 + } + vtParams = clusterInstance.GetVTParams("") + vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort) + + return m.Run() + }() + os.Exit(exitcode) +} + +func start(t *testing.T) (*mysql.Conn, func()) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + cleanup(t) + + return conn, func() { + conn.Close() + cleanup(t) + } +} + +func cleanup(t *testing.T) { + utils.ClearOutTable(t, vtParams, "twopc_t1") + utils.ClearOutTable(t, vtParams, "twopc_settings") +} diff --git a/go/test/endtoend/utils/mysql_test.go b/go/test/endtoend/utils/mysql_test.go index d2b10f1d0a8..c0ba9e8376c 100644 --- a/go/test/endtoend/utils/mysql_test.go +++ b/go/test/endtoend/utils/mysql_test.go @@ -50,7 +50,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) exitCode := func() int { clusterInstance = cluster.NewCluster(cell, "localhost") diff --git a/go/test/endtoend/utils/mysqlvsvitess/main_test.go b/go/test/endtoend/utils/mysqlvsvitess/main_test.go index 8f162fae41d..f064afb895d 100644 --- a/go/test/endtoend/utils/mysqlvsvitess/main_test.go +++ b/go/test/endtoend/utils/mysqlvsvitess/main_test.go @@ -64,7 +64,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) exitCode := func() int { clusterInstance = cluster.NewCluster(cell, "localhost") diff --git a/go/test/endtoend/vault/vault_test.go b/go/test/endtoend/vault/vault_test.go index f8e19c07a0c..aab68159ca3 100644 --- a/go/test/endtoend/vault/vault_test.go +++ b/go/test/endtoend/vault/vault_test.go @@ -99,7 +99,6 @@ var ( ) func TestVaultAuth(t *testing.T) { - defer cluster.PanicHandler(nil) // Instantiate Vitess Cluster objects and start topo initializeClusterEarly(t) diff --git a/go/test/endtoend/versionupgrade/upgrade_test.go b/go/test/endtoend/versionupgrade/upgrade_test.go index 181b5dfc9ad..48e552c3a7c 100644 --- a/go/test/endtoend/versionupgrade/upgrade_test.go +++ b/go/test/endtoend/versionupgrade/upgrade_test.go @@ -72,7 +72,6 @@ var ( // TestMain is the main entry point func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -131,12 +130,10 @@ func TestMain(m *testing.M) { } func TestShards(t *testing.T) { - defer cluster.PanicHandler(t) assert.Equal(t, 2, len(clusterInstance.Keyspaces[0].Shards)) } func TestDeploySchema(t *testing.T) { - defer cluster.PanicHandler(t) if clusterInstance.ReusingVTDATAROOT { // we assume data is already deployed @@ -163,7 +160,6 @@ func TestDeploySchema(t *testing.T) { } func TestTablesExist(t *testing.T) { - defer cluster.PanicHandler(t) checkTables(t, "", totalTableCount) } diff --git a/go/test/endtoend/vtadmin/main_test.go b/go/test/endtoend/vtadmin/main_test.go new file mode 100644 index 00000000000..9233cd5b0aa --- /dev/null +++ b/go/test/endtoend/vtadmin/main_test.go @@ -0,0 +1,100 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtadmin + +import ( + _ "embed" + "flag" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + uks = "uks" + cell = "test_misc" + + uschemaSQL = ` +create table u_a +( + id bigint, + a bigint, + primary key (id) +) Engine = InnoDB; + +create table u_b +( + id bigint, + b varchar(50), + primary key (id) +) Engine = InnoDB; + +` +) + +func TestMain(m *testing.M) { + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + // Start Unsharded keyspace + ukeyspace := &cluster.Keyspace{ + Name: uks, + SchemaSQL: uschemaSQL, + } + err = clusterInstance.StartUnshardedKeyspace(*ukeyspace, 0, false) + if err != nil { + return 1 + } + + err = clusterInstance.StartVtgate() + if err != nil { + return 1 + } + + clusterInstance.NewVTAdminProcess() + err = clusterInstance.VtadminProcess.Setup() + if err != nil { + return 1 + } + + return m.Run() + }() + os.Exit(exitCode) +} + +// TestVtadminAPIs tests the vtadmin APIs. +func TestVtadminAPIs(t *testing.T) { + + // Test the vtadmin APIs + t.Run("keyspaces api", func(t *testing.T) { + resp := clusterInstance.VtadminProcess.MakeAPICallRetry(t, "api/keyspaces") + require.Contains(t, resp, uks) + }) +} diff --git a/go/test/endtoend/vtgate/concurrentdml/main_test.go b/go/test/endtoend/vtgate/concurrentdml/main_test.go index 6ee5619b742..734962b0d33 100644 --- a/go/test/endtoend/vtgate/concurrentdml/main_test.go +++ b/go/test/endtoend/vtgate/concurrentdml/main_test.go @@ -66,7 +66,6 @@ INSERT INTO t1_seq (id, next_id, cache) values(0, 1, 1000); ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -108,7 +107,6 @@ func TestMain(m *testing.M) { } func TestInsertIgnoreOnLookupUniqueVindex(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -137,7 +135,6 @@ func TestInsertIgnoreOnLookupUniqueVindex(t *testing.T) { func TestOpenTxBlocksInSerial(t *testing.T) { t.Skip("Update and Insert in same transaction does not work with the unique consistent lookup having same value.") - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -169,7 +166,6 @@ func TestOpenTxBlocksInSerial(t *testing.T) { func TestOpenTxBlocksInConcurrent(t *testing.T) { t.Skip("Update and Insert in same transaction does not work with the unique consistent lookup having same value.") - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -207,7 +203,6 @@ func TestOpenTxBlocksInConcurrent(t *testing.T) { } func TestUpdateLookupUniqueVindex(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/connectiondrain/main_test.go b/go/test/endtoend/vtgate/connectiondrain/main_test.go new file mode 100644 index 00000000000..6257baf8e40 --- /dev/null +++ b/go/test/endtoend/vtgate/connectiondrain/main_test.go @@ -0,0 +1,185 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package connectiondrain + +import ( + "context" + _ "embed" + "flag" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" +) + +var ( + keyspaceName = "ks" + cell = "zone-1" + + //go:embed schema.sql + schemaSQL string +) + +func TestMain(m *testing.M) { + flag.Parse() + os.Exit(m.Run()) +} + +func setupCluster(t *testing.T) (*cluster.LocalProcessCluster, mysql.ConnParams) { + clusterInstance := cluster.NewCluster(cell, "localhost") + + // Start topo server + err := clusterInstance.StartTopo() + require.NoError(t, err) + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: schemaSQL, + } + err = clusterInstance.StartUnshardedKeyspace(*keyspace, 0, false) + require.NoError(t, err) + + // Start vtgate + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--mysql-server-drain-onterm", "--onterm_timeout", "30s") + err = clusterInstance.StartVtgate() + require.NoError(t, err) + + vtParams := clusterInstance.GetVTParams(keyspaceName) + return clusterInstance, vtParams +} + +func start(t *testing.T, vtParams mysql.ConnParams) (*mysql.Conn, func()) { + vtConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + + deleteAll := func() { + _, _ = utils.ExecAllowError(t, vtConn, "set workload = oltp") + + tables := []string{"t1"} + for _, table := range tables { + _, _ = utils.ExecAllowError(t, vtConn, "delete from "+table) + } + } + + deleteAll() + + return vtConn, func() { + deleteAll() + vtConn.Close() + } +} + +func TestConnectionDrainCloseConnections(t *testing.T) { + clusterInstance, vtParams := setupCluster(t) + defer clusterInstance.Teardown() + + vtConn, closer := start(t, vtParams) + defer closer() + + // Create a second connection, this connection will be used to create a transaction. + vtConn2, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + + // Start the transaction with the second connection + _, err = vtConn2.ExecuteFetch("BEGIN", 1, false) + require.NoError(t, err) + _, err = vtConn2.ExecuteFetch("select id1 from t1", 1, false) + require.NoError(t, err) + + _, err = vtConn.ExecuteFetch("select id1 from t1", 1, false) + require.NoError(t, err) + + // Tearing down vtgate here, from there on vtConn should still be able to conclude in-flight transaction and + // execute queries with idle connections. However, no new connections are allowed. + err = clusterInstance.VtgateProcess.Terminate() + require.NoError(t, err) + + // Give enough time to vtgate to receive and start processing the SIGTERM signal + time.Sleep(2 * time.Second) + + // Create a third connection, this connection should not be allowed. + // Set a connection timeout to 1s otherwise the connection will take forever + // and eventually vtgate will reach the --onterm_timeout. + vtParams.ConnectTimeoutMs = 1000 + defer func() { + vtParams.ConnectTimeoutMs = 0 + }() + _, err = mysql.Connect(context.Background(), &vtParams) + require.Error(t, err) + + // Idle connections should be allowed to execute queries until they are drained + _, err = vtConn.ExecuteFetch("select id1 from t1", 1, false) + require.NoError(t, err) + + // Finish the transaction + _, err = vtConn2.ExecuteFetch("select id1 from t1", 1, false) + require.NoError(t, err) + _, err = vtConn2.ExecuteFetch("COMMIT", 1, false) + require.NoError(t, err) + vtConn2.Close() + + // vtgate should still be running + require.False(t, clusterInstance.VtgateProcess.IsShutdown()) + + // This connection should still be allowed + _, err = vtConn.ExecuteFetch("select id1 from t1", 1, false) + require.NoError(t, err) + vtConn.Close() + + // Give enough time for vtgate to finish all the onterm hooks without reaching the 30s of --onterm_timeout + time.Sleep(10 * time.Second) + + // By now the vtgate should have shutdown on its own and without reaching --onterm_timeout + require.True(t, clusterInstance.VtgateProcess.IsShutdown()) +} + +func TestConnectionDrainOnTermTimeout(t *testing.T) { + clusterInstance, vtParams := setupCluster(t) + defer clusterInstance.Teardown() + + // Connect to vtgate again, this should work + vtConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + vtConn2, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + + defer func() { + vtConn.Close() + vtConn2.Close() + }() + + // Tearing down vtgate here, we want to reach the onterm_timeout of 30s + err = clusterInstance.VtgateProcess.Terminate() + require.NoError(t, err) + + // Run a busy query that returns only after the onterm_timeout is reached, this should fail when we reach the timeout + _, err = vtConn.ExecuteFetch("select sleep(40)", 1, false) + require.Error(t, err) + + // Running a query after we have reached the onterm_timeout should fail + _, err = vtConn2.ExecuteFetch("select id from t1", 1, false) + require.Error(t, err) + + // By now vtgate will be shutdown becaused it reached its onterm_timeout, despite idle connections still being opened + require.True(t, clusterInstance.VtgateProcess.IsShutdown()) +} diff --git a/go/test/endtoend/vtgate/consolidator/main_test.go b/go/test/endtoend/vtgate/consolidator/main_test.go index 021db7e513e..0d5eae3eca9 100644 --- a/go/test/endtoend/vtgate/consolidator/main_test.go +++ b/go/test/endtoend/vtgate/consolidator/main_test.go @@ -65,7 +65,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/createdb_plugin/main_test.go b/go/test/endtoend/vtgate/createdb_plugin/main_test.go index 5bfec3890b5..e2925bf928d 100644 --- a/go/test/endtoend/vtgate/createdb_plugin/main_test.go +++ b/go/test/endtoend/vtgate/createdb_plugin/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -81,7 +80,6 @@ func TestMain(m *testing.M) { } func TestDBDDLPlugin(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/errors_as_warnings/main_test.go b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go index 71f4a2353f7..374a92f395f 100644 --- a/go/test/endtoend/vtgate/errors_as_warnings/main_test.go +++ b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go @@ -66,7 +66,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/foreignkey/main_test.go b/go/test/endtoend/vtgate/foreignkey/main_test.go index b4d610785b5..fe418c1c0ea 100644 --- a/go/test/endtoend/vtgate/foreignkey/main_test.go +++ b/go/test/endtoend/vtgate/foreignkey/main_test.go @@ -98,7 +98,6 @@ type fkReference struct { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -197,7 +196,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go index b9240f46605..9a65491261e 100644 --- a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go +++ b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go @@ -309,7 +309,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -627,7 +626,6 @@ func ExecuteFKTest(t *testing.T, tcase *testCase) { } func TestStressFK(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("validate replication health", func(t *testing.T) { validateReplicationIsHealthy(t, replicaNoFK) diff --git a/go/test/endtoend/vtgate/gen4/column_name_test.go b/go/test/endtoend/vtgate/gen4/column_name_test.go index d23c03c9f6b..0f5a83a5092 100644 --- a/go/test/endtoend/vtgate/gen4/column_name_test.go +++ b/go/test/endtoend/vtgate/gen4/column_name_test.go @@ -26,11 +26,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestColumnNames(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) diff --git a/go/test/endtoend/vtgate/gen4/main_test.go b/go/test/endtoend/vtgate/gen4/main_test.go index 4c94e8e2ec8..e8280b3aa06 100644 --- a/go/test/endtoend/vtgate/gen4/main_test.go +++ b/go/test/endtoend/vtgate/gen4/main_test.go @@ -64,7 +64,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -152,6 +151,5 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/gen4/system_schema_test.go b/go/test/endtoend/vtgate/gen4/system_schema_test.go index fc4983935e9..d01d4d972a1 100644 --- a/go/test/endtoend/vtgate/gen4/system_schema_test.go +++ b/go/test/endtoend/vtgate/gen4/system_schema_test.go @@ -28,11 +28,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestDbNameOverride(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) @@ -55,7 +53,6 @@ func TestDbNameOverride(t *testing.T) { } func TestInformationSchemaQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -90,7 +87,6 @@ func assertSingleRowIsReturned(t *testing.T, conn *mysql.Conn, predicate string, } func TestInformationSchemaWithSubquery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -101,7 +97,6 @@ func TestInformationSchemaWithSubquery(t *testing.T) { } func TestInformationSchemaQueryGetsRoutedToTheRightTableAndKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -113,7 +108,6 @@ func TestInformationSchemaQueryGetsRoutedToTheRightTableAndKeyspace(t *testing.T } func TestFKConstraintUsingInformationSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -131,7 +125,6 @@ func TestFKConstraintUsingInformationSchema(t *testing.T) { } func TestConnectWithSystemSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() for _, dbname := range []string{"information_schema", "mysql", "performance_schema", "sys"} { connParams := vtParams @@ -144,7 +137,6 @@ func TestConnectWithSystemSchema(t *testing.T) { } func TestUseSystemSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -156,7 +148,6 @@ func TestUseSystemSchema(t *testing.T) { } func TestSystemSchemaQueryWithoutQualifier(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -191,7 +182,6 @@ func TestSystemSchemaQueryWithoutQualifier(t *testing.T) { } func TestMultipleSchemaPredicates(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -217,7 +207,6 @@ func TestMultipleSchemaPredicates(t *testing.T) { } func TestQuerySystemTables(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/godriver/main_test.go b/go/test/endtoend/vtgate/godriver/main_test.go index 587c189d2ea..91605394cf1 100644 --- a/go/test/endtoend/vtgate/godriver/main_test.go +++ b/go/test/endtoend/vtgate/godriver/main_test.go @@ -86,7 +86,6 @@ create table my_message( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -125,7 +124,6 @@ func TestMain(m *testing.M) { } func TestStreamMessaging(t *testing.T) { - defer cluster.PanicHandler(t) cnf := vitessdriver.Configuration{ Protocol: "grpc", diff --git a/go/test/endtoend/vtgate/grpc_api/main_test.go b/go/test/endtoend/vtgate/grpc_api/main_test.go index 3c8605f79a0..87d30f4ce26 100644 --- a/go/test/endtoend/vtgate/grpc_api/main_test.go +++ b/go/test/endtoend/vtgate/grpc_api/main_test.go @@ -75,7 +75,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { diff --git a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go index 4971d03060b..1eb31663577 100644 --- a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go +++ b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go @@ -117,7 +117,6 @@ func createCluster(extraVTGateArgs []string) (*cluster.LocalProcessCluster, int) } func TestRoutingWithKeyspacesToWatch(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance, exitCode := createCluster(nil) defer clusterInstance.Teardown() @@ -141,7 +140,6 @@ func TestRoutingWithKeyspacesToWatch(t *testing.T) { } func TestVSchemaDDLWithKeyspacesToWatch(t *testing.T) { - defer cluster.PanicHandler(t) extraVTGateArgs := []string{ "--vschema_ddl_authorized_users", "%", diff --git a/go/test/endtoend/vtgate/main_test.go b/go/test/endtoend/vtgate/main_test.go index b276508f269..6c43a70632b 100644 --- a/go/test/endtoend/vtgate/main_test.go +++ b/go/test/endtoend/vtgate/main_test.go @@ -54,7 +54,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { clusterInstance = cluster.NewCluster(Cell, "localhost") @@ -122,6 +121,5 @@ func start(t *testing.T) (*mysql.Conn, func()) { return conn, func() { deleteAll() conn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/misc_test.go b/go/test/endtoend/vtgate/misc_test.go index 3735c8c59ed..0a5416a1be2 100644 --- a/go/test/endtoend/vtgate/misc_test.go +++ b/go/test/endtoend/vtgate/misc_test.go @@ -28,7 +28,6 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -780,7 +779,6 @@ func TestJoinWithMergedRouteWithPredicate(t *testing.T) { func TestRowCountExceed(t *testing.T) { conn, _ := start(t) defer func() { - cluster.PanicHandler(t) // needs special delete logic as it exceeds row count. for i := 50; i <= 300; i += 50 { utils.Exec(t, conn, fmt.Sprintf("delete from t1 where id1 < %d", i)) diff --git a/go/test/endtoend/vtgate/mysql80/main_test.go b/go/test/endtoend/vtgate/mysql80/main_test.go index 4f5897d1f59..b970fb66b12 100644 --- a/go/test/endtoend/vtgate/mysql80/main_test.go +++ b/go/test/endtoend/vtgate/mysql80/main_test.go @@ -35,7 +35,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/mysql80/misc_test.go b/go/test/endtoend/vtgate/mysql80/misc_test.go index b29eb13ecdc..5132bf87aba 100644 --- a/go/test/endtoend/vtgate/mysql80/misc_test.go +++ b/go/test/endtoend/vtgate/mysql80/misc_test.go @@ -23,15 +23,12 @@ import ( "vitess.io/vitess/go/test/endtoend/utils" - "vitess.io/vitess/go/test/endtoend/cluster" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" ) func TestFunctionInDefault(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -223,7 +220,6 @@ func BenchmarkReservedConnWhenSettingSysVar(b *testing.B) { } func TestJsonFunctions(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/partialfailure/main_test.go b/go/test/endtoend/vtgate/partialfailure/main_test.go index 9e39e7b5dd5..d5b6a639c68 100644 --- a/go/test/endtoend/vtgate/partialfailure/main_test.go +++ b/go/test/endtoend/vtgate/partialfailure/main_test.go @@ -45,7 +45,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/plan_tests/main_test.go b/go/test/endtoend/vtgate/plan_tests/main_test.go new file mode 100644 index 00000000000..d3915af0c8d --- /dev/null +++ b/go/test/endtoend/vtgate/plan_tests/main_test.go @@ -0,0 +1,230 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plan_tests + +import ( + "encoding/json" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + mysqlParams mysql.ConnParams + uks = "main" + sks = "user" + cell = "plantests" +) + +func TestMain(m *testing.M) { + vschema := readFile("vschemas/schema.json") + userVs := extractUserKS(vschema) + mainVs := extractMainKS(vschema) + sSQL := readFile("schemas/user.sql") + uSQL := readFile("schemas/main.sql") + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + // Start unsharded keyspace + uKeyspace := &cluster.Keyspace{ + Name: uks, + SchemaSQL: uSQL, + VSchema: mainVs, + } + err = clusterInstance.StartUnshardedKeyspace(*uKeyspace, 0, false) + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + // Start sharded keyspace + skeyspace := &cluster.Keyspace{ + Name: sks, + SchemaSQL: sSQL, + VSchema: userVs, + } + err = clusterInstance.StartKeyspace(*skeyspace, []string{"-80", "80-"}, 0, false) + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + // TODO: (@GuptaManan100/@systay): Also run the tests with normalizer on. + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--normalize_queries=false", + "--schema_change_signal=false", + ) + + // Start vtgate + err = clusterInstance.StartVtgate() + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + vtParams = clusterInstance.GetVTParams(sks) + + // create mysql instance and connection parameters + conn, closer, err := utils.NewMySQL(clusterInstance, sks, sSQL, uSQL) + if err != nil { + fmt.Println(err.Error()) + return 1 + } + defer closer() + mysqlParams = conn + + return m.Run() + }() + os.Exit(exitCode) +} + +func readFile(filename string) string { + schema, err := os.ReadFile(locateFile(filename)) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + return string(schema) +} + +func start(t *testing.T) (utils.MySQLCompare, func()) { + mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) + require.NoError(t, err) + return mcmp, func() { + mcmp.Close() + } +} + +func readJSONTests(filename string) []planbuilder.PlanTest { + var output []planbuilder.PlanTest + file, err := os.Open(locateFile(filename)) + if err != nil { + panic(err) + } + defer file.Close() + dec := json.NewDecoder(file) + err = dec.Decode(&output) + if err != nil { + panic(err) + } + return output +} + +func locateFile(name string) string { + return "../../../../vt/vtgate/planbuilder/testdata/" + name +} + +// verifyTestExpectations verifies the expectations of the test. +func verifyTestExpectations(t *testing.T, pd engine.PrimitiveDescription, test planbuilder.PlanTest) { + // 1. Verify that the Join primitive sees atleast 1 row on the left side. + engine.WalkPrimitiveDescription(pd, func(description engine.PrimitiveDescription) { + if description.OperatorType == "Join" { + require.NotZero(t, description.Inputs[0].RowsReceived[0]) + } + }) + + // 2. Verify that the plan description matches the expected plan description. + planBytes, err := test.Plan.MarshalJSON() + require.NoError(t, err) + mp := make(map[string]any) + err = json.Unmarshal(planBytes, &mp) + require.NoError(t, err) + pdExpected, err := engine.PrimitiveDescriptionFromMap(mp["Instructions"].(map[string]any)) + require.NoError(t, err) + require.Empty(t, pdExpected.Equals(pd), "Expected: %v\nGot: %v", string(planBytes), pd) +} + +func extractUserKS(jsonString string) string { + var result map[string]any + if err := json.Unmarshal([]byte(jsonString), &result); err != nil { + panic(err.Error()) + } + + keyspaces, ok := result["keyspaces"].(map[string]any) + if !ok { + panic("Keyspaces not found") + } + + user, ok := keyspaces["user"].(map[string]any) + if !ok { + panic("User keyspace not found") + } + + tables, ok := user["tables"].(map[string]any) + if !ok { + panic("Tables not found") + } + + userTbl, ok := tables["user"].(map[string]any) + if !ok { + panic("User table not found") + } + + delete(userTbl, "auto_increment") // TODO: we should have an unsharded keyspace where this could live + + // Marshal the inner part back to JSON string + userJson, err := json.Marshal(user) + if err != nil { + panic(err.Error()) + } + + return string(userJson) +} + +func extractMainKS(jsonString string) string { + var result map[string]any + if err := json.Unmarshal([]byte(jsonString), &result); err != nil { + panic(err.Error()) + } + + keyspaces, ok := result["keyspaces"].(map[string]any) + if !ok { + panic("Keyspaces not found") + } + + main, ok := keyspaces["main"].(map[string]any) + if !ok { + panic("main keyspace not found") + } + + // Marshal the inner part back to JSON string + mainJson, err := json.Marshal(main) + if err != nil { + panic(err.Error()) + } + + return string(mainJson) +} diff --git a/go/test/endtoend/vtgate/prefixfanout/main_test.go b/go/test/endtoend/vtgate/prefixfanout/main_test.go index 928808fd48e..a96ee4ce7f5 100644 --- a/go/test/endtoend/vtgate/prefixfanout/main_test.go +++ b/go/test/endtoend/vtgate/prefixfanout/main_test.go @@ -109,7 +109,6 @@ PRIMARY KEY (c1) ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -158,7 +157,6 @@ func TestMain(m *testing.M) { } func TestCFCPrefixQueryNoHash(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(sKs) conn, err := mysql.Connect(ctx, &vtParams) @@ -196,7 +194,6 @@ func TestCFCPrefixQueryNoHash(t *testing.T) { } func TestCFCPrefixQueryWithHash(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(sKsMD5) @@ -239,7 +236,6 @@ func TestCFCPrefixQueryWithHash(t *testing.T) { } func TestCFCInsert(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(sKs) diff --git a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go index 2e46c1071a2..8519e4510f9 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go +++ b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -66,7 +65,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/aggregation/main_test.go b/go/test/endtoend/vtgate/queries/aggregation/main_test.go index 02013a9b0e2..bd1c1aa3b7d 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/main_test.go +++ b/go/test/endtoend/vtgate/queries/aggregation/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/benchmark/main_test.go b/go/test/endtoend/vtgate/queries/benchmark/main_test.go index 6978d0b9428..a4e4f4400a3 100644 --- a/go/test/endtoend/vtgate/queries/benchmark/main_test.go +++ b/go/test/endtoend/vtgate/queries/benchmark/main_test.go @@ -65,7 +65,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -119,6 +118,5 @@ func start(b *testing.B) (*mysql.Conn, func()) { return conn, func() { deleteAll() conn.Close() - cluster.PanicHandler(b) } } diff --git a/go/test/endtoend/vtgate/queries/derived/derived_test.go b/go/test/endtoend/vtgate/queries/derived/derived_test.go index cb106564b2f..fe467f31c20 100644 --- a/go/test/endtoend/vtgate/queries/derived/derived_test.go +++ b/go/test/endtoend/vtgate/queries/derived/derived_test.go @@ -21,7 +21,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -44,7 +43,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/derived/main_test.go b/go/test/endtoend/vtgate/queries/derived/main_test.go index 3b44811f95c..0bab24a966a 100644 --- a/go/test/endtoend/vtgate/queries/derived/main_test.go +++ b/go/test/endtoend/vtgate/queries/derived/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/dml/main_test.go b/go/test/endtoend/vtgate/queries/dml/main_test.go index 0c4d58aa614..bc72acc1159 100644 --- a/go/test/endtoend/vtgate/queries/dml/main_test.go +++ b/go/test/endtoend/vtgate/queries/dml/main_test.go @@ -66,7 +66,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -148,6 +147,5 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go b/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go index f52e2eff532..e50e9210c55 100644 --- a/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go +++ b/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go @@ -21,12 +21,10 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) func TestFoundRows(t *testing.T) { - defer cluster.PanicHandler(t) mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) require.NoError(t, err) defer mcmp.Close() diff --git a/go/test/endtoend/vtgate/queries/foundrows/main_test.go b/go/test/endtoend/vtgate/queries/foundrows/main_test.go index 8f992863008..248b6cd9434 100644 --- a/go/test/endtoend/vtgate/queries/foundrows/main_test.go +++ b/go/test/endtoend/vtgate/queries/foundrows/main_test.go @@ -46,7 +46,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go index c5568b2db49..e158bd96e33 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go @@ -25,7 +25,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -47,7 +46,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } @@ -114,7 +112,6 @@ func TestFKConstraintUsingInformationSchema(t *testing.T) { } func TestConnectWithSystemSchema(t *testing.T) { - defer cluster.PanicHandler(t) for _, dbname := range []string{"information_schema", "mysql", "performance_schema", "sys"} { vtConnParams := vtParams vtConnParams.DbName = dbname diff --git a/go/test/endtoend/vtgate/queries/informationschema/main_test.go b/go/test/endtoend/vtgate/queries/informationschema/main_test.go index 3696617281e..76d5f44ebae 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/main_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/main_test.go @@ -52,7 +52,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/kill/main_test.go b/go/test/endtoend/vtgate/queries/kill/main_test.go index 99608030246..61ddec43589 100644 --- a/go/test/endtoend/vtgate/queries/kill/main_test.go +++ b/go/test/endtoend/vtgate/queries/kill/main_test.go @@ -50,7 +50,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go index a587f124762..818b834511a 100644 --- a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go +++ b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go @@ -49,7 +49,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -119,7 +118,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/misc/main_test.go b/go/test/endtoend/vtgate/queries/misc/main_test.go index f20072031a8..ee9be542634 100644 --- a/go/test/endtoend/vtgate/queries/misc/main_test.go +++ b/go/test/endtoend/vtgate/queries/misc/main_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/misc/misc_test.go b/go/test/endtoend/vtgate/queries/misc/misc_test.go index 46df8799528..bee98096fab 100644 --- a/go/test/endtoend/vtgate/queries/misc/misc_test.go +++ b/go/test/endtoend/vtgate/queries/misc/misc_test.go @@ -28,7 +28,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -48,7 +47,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/no_scatter/main_test.go b/go/test/endtoend/vtgate/queries/no_scatter/main_test.go index c4b0974c24b..a1478dcd2ac 100644 --- a/go/test/endtoend/vtgate/queries/no_scatter/main_test.go +++ b/go/test/endtoend/vtgate/queries/no_scatter/main_test.go @@ -40,7 +40,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go b/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go index 7bf702afc15..b302a0f4dc7 100644 --- a/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go +++ b/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -43,7 +42,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return vtConn, func() { deleteAll() vtConn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/normalize/main_test.go b/go/test/endtoend/vtgate/queries/normalize/main_test.go index 8f4d97209dd..8c75d38284d 100644 --- a/go/test/endtoend/vtgate/queries/normalize/main_test.go +++ b/go/test/endtoend/vtgate/queries/normalize/main_test.go @@ -39,7 +39,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/orderby/main_test.go b/go/test/endtoend/vtgate/queries/orderby/main_test.go index 9f18377ee3f..353745722b7 100644 --- a/go/test/endtoend/vtgate/queries/orderby/main_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go index c36b52a4e6a..716f01fb5c7 100644 --- a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go @@ -22,8 +22,6 @@ import ( "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func start(t *testing.T) (utils.MySQLCompare, func()) { @@ -44,7 +42,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go index 00221e9c9f3..373c1327074 100644 --- a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go index a20c7ad54c6..956815d2a0d 100644 --- a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go @@ -22,8 +22,6 @@ import ( "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func start(t *testing.T) (utils.MySQLCompare, func()) { @@ -44,7 +42,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/random/main_test.go b/go/test/endtoend/vtgate/queries/random/main_test.go index e3256f60796..85c8840924d 100644 --- a/go/test/endtoend/vtgate/queries/random/main_test.go +++ b/go/test/endtoend/vtgate/queries/random/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/random/random_test.go b/go/test/endtoend/vtgate/queries/random/random_test.go index 2d210ee7f99..20c7934d91f 100644 --- a/go/test/endtoend/vtgate/queries/random/random_test.go +++ b/go/test/endtoend/vtgate/queries/random/random_test.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -61,7 +60,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/reference/main_test.go b/go/test/endtoend/vtgate/queries/reference/main_test.go index c350038bf6e..03ee429e4c0 100644 --- a/go/test/endtoend/vtgate/queries/reference/main_test.go +++ b/go/test/endtoend/vtgate/queries/reference/main_test.go @@ -53,7 +53,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/reference/reference_test.go b/go/test/endtoend/vtgate/queries/reference/reference_test.go index 0ddc0342991..f38e2ea9b05 100644 --- a/go/test/endtoend/vtgate/queries/reference/reference_test.go +++ b/go/test/endtoend/vtgate/queries/reference/reference_test.go @@ -24,8 +24,6 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/utils" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func start(t *testing.T) (*mysql.Conn, func()) { @@ -35,7 +33,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return vtConn, func() { vtConn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/subquery/main_test.go b/go/test/endtoend/vtgate/queries/subquery/main_test.go index 9eaf3b4caa0..bc8580bd38d 100644 --- a/go/test/endtoend/vtgate/queries/subquery/main_test.go +++ b/go/test/endtoend/vtgate/queries/subquery/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go index 29bd256ea51..e68ced57f9c 100644 --- a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go +++ b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go @@ -23,7 +23,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" +<<<<<<< HEAD "vitess.io/vitess/go/test/endtoend/cluster" +======= + "vitess.io/vitess/go/sqltypes" + +>>>>>>> bad431deed (Remove broken panic handler (#17354)) "vitess.io/vitess/go/test/endtoend/utils" ) @@ -45,7 +50,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/timeout/main_test.go b/go/test/endtoend/vtgate/queries/timeout/main_test.go index c265e824e88..c9d7e2487bb 100644 --- a/go/test/endtoend/vtgate/queries/timeout/main_test.go +++ b/go/test/endtoend/vtgate/queries/timeout/main_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go index 09bbcf8cc96..312b4b3a9f8 100644 --- a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go +++ b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go @@ -22,7 +22,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" +<<<<<<< HEAD "vitess.io/vitess/go/test/endtoend/cluster" +======= + "vitess.io/vitess/go/mysql" +>>>>>>> bad431deed (Remove broken panic handler (#17354)) "vitess.io/vitess/go/test/endtoend/utils" ) @@ -42,7 +46,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/tpch/main_test.go b/go/test/endtoend/vtgate/queries/tpch/main_test.go index 103adb336ab..403ddd510ce 100644 --- a/go/test/endtoend/vtgate/queries/tpch/main_test.go +++ b/go/test/endtoend/vtgate/queries/tpch/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/tpch/tpch_test.go b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go index c4bf71cafa1..efa322a5e1c 100644 --- a/go/test/endtoend/vtgate/queries/tpch/tpch_test.go +++ b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go @@ -21,7 +21,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -43,7 +42,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/union/main_test.go b/go/test/endtoend/vtgate/queries/union/main_test.go index 06ec07a6c2f..a5f45f84156 100644 --- a/go/test/endtoend/vtgate/queries/union/main_test.go +++ b/go/test/endtoend/vtgate/queries/union/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/union/union_test.go b/go/test/endtoend/vtgate/queries/union/union_test.go index 03f98950f44..26371af3c87 100644 --- a/go/test/endtoend/vtgate/queries/union/union_test.go +++ b/go/test/endtoend/vtgate/queries/union/union_test.go @@ -19,7 +19,6 @@ package union import ( "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/assert" @@ -44,7 +43,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/vexplain/main_test.go b/go/test/endtoend/vtgate/queries/vexplain/main_test.go index c1c401bc573..96b6a1c41d1 100644 --- a/go/test/endtoend/vtgate/queries/vexplain/main_test.go +++ b/go/test/endtoend/vtgate/queries/vexplain/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go b/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go index ed43d57b578..f95cb200934 100644 --- a/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go +++ b/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/utils" @@ -48,7 +47,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return vtConn, func() { deleteAll() vtConn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/readafterwrite/raw_test.go b/go/test/endtoend/vtgate/readafterwrite/raw_test.go index 0549a9b06b0..ce6db45d24e 100644 --- a/go/test/endtoend/vtgate/readafterwrite/raw_test.go +++ b/go/test/endtoend/vtgate/readafterwrite/raw_test.go @@ -100,7 +100,6 @@ CREATE TABLE test_vdx ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -143,7 +142,6 @@ func TestMain(m *testing.M) { } func TestRAWSettings(t *testing.T) { - defer cluster.PanicHandler(t) conn, err := mysql.Connect(context.Background(), &vtParams) require.NoError(t, err) defer conn.Close() diff --git a/go/test/endtoend/vtgate/reservedconn/main_test.go b/go/test/endtoend/vtgate/reservedconn/main_test.go index 528182a82e2..ce91d270c4b 100644 --- a/go/test/endtoend/vtgate/reservedconn/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/main_test.go @@ -103,7 +103,6 @@ CREATE TABLE test_vdx ( var enableSettingsPool bool func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() code := runAllTests(m) diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go index 491ce6bc6ab..fe665d18930 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go @@ -65,7 +65,6 @@ var ( var enableSettingsPool bool func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() code := runAllTests(m) diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go index a448574c282..09f7aa17cb8 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go @@ -66,7 +66,6 @@ var ( var enableSettingsPool bool func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() code := runAllTests(m) diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go index 677c24666b2..dd594207e13 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go @@ -42,7 +42,6 @@ var ( var enableSettingsPool bool func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() code := runAllTests(m) diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go index 1dc53a89506..79acce24cb0 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go @@ -42,7 +42,6 @@ var ( var enableSettingsPool bool func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() code := runAllTests(m) diff --git a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go index 564cc671d5f..6ea97f84e9a 100644 --- a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go +++ b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go @@ -29,11 +29,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestSetSysVarSingle(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() type queriesWithExpectations struct { name, expr string diff --git a/go/test/endtoend/vtgate/reservedconn/udv_test.go b/go/test/endtoend/vtgate/reservedconn/udv_test.go index 55f4c54c612..14b65dbcd35 100644 --- a/go/test/endtoend/vtgate/reservedconn/udv_test.go +++ b/go/test/endtoend/vtgate/reservedconn/udv_test.go @@ -31,11 +31,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestSetUDV(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() type queriesWithExpectations struct { @@ -123,7 +121,6 @@ func TestSetUDV(t *testing.T) { } func TestMysqlDumpInitialLog(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) diff --git a/go/test/endtoend/vtgate/schema/schema_test.go b/go/test/endtoend/vtgate/schema/schema_test.go index 6b2e8ef7e61..4c28e29ca0d 100644 --- a/go/test/endtoend/vtgate/schema/schema_test.go +++ b/go/test/endtoend/vtgate/schema/schema_test.go @@ -55,7 +55,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -101,7 +100,6 @@ func TestMain(m *testing.M) { } func TestSchemaChange(t *testing.T) { - defer cluster.PanicHandler(t) testWithInitialSchema(t) testWithAlterSchema(t) testWithAlterDatabase(t) diff --git a/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go index 9586206221e..ec201487887 100644 --- a/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go +++ b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go @@ -57,7 +57,6 @@ var ( ) func TestLoadKeyspaceWithNoTablet(t *testing.T) { - defer cluster.PanicHandler(t) var err error clusterInstance = cluster.NewCluster(cell, hostname) @@ -100,7 +99,6 @@ func TestLoadKeyspaceWithNoTablet(t *testing.T) { } func TestNoInitialKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) var err error clusterInstance = cluster.NewCluster(cell, hostname) diff --git a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go index 3bb4f6dfd9f..1943fefa9d7 100644 --- a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go +++ b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go @@ -64,7 +64,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { @@ -116,7 +115,6 @@ func TestMain(m *testing.M) { } func TestVSchemaTrackerInit(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -137,7 +135,6 @@ func TestVSchemaTrackerInit(t *testing.T) { // properly handles primary tablet restarts -- meaning that we maintain // the exact same vschema state as before the restart. func TestVSchemaTrackerKeyspaceReInit(t *testing.T) { - defer cluster.PanicHandler(t) primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() diff --git a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go index 50042f3142a..5f82bd5d71a 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -161,7 +160,6 @@ func TestNewTable(t *testing.T) { } func TestAmbiguousColumnJoin(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go index 09bd97eb9fe..6fbd3f3d33c 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go @@ -122,7 +122,6 @@ create table t8( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -208,7 +207,6 @@ func TestMain(m *testing.M) { } func TestAddColumn(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go index 5ecf89a5db7..4256727915d 100644 --- a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go @@ -49,7 +49,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -112,7 +111,6 @@ func TestMain(m *testing.M) { } func TestNewUnshardedTable(t *testing.T) { - defer cluster.PanicHandler(t) // create a sql connection ctx := context.Background() @@ -182,7 +180,6 @@ func TestNewUnshardedTable(t *testing.T) { // creating two tables having the same name differing only in casing, but other operating systems don't. // More information at https://dev.mysql.com/doc/refman/8.0/en/identifier-case-sensitivity.html#:~:text=Table%20names%20are%20stored%20in,lowercase%20on%20storage%20and%20lookup. func TestCaseSensitiveSchemaTracking(t *testing.T) { - defer cluster.PanicHandler(t) // create a sql connection ctx := context.Background() diff --git a/go/test/endtoend/vtgate/sec_vind/main_test.go b/go/test/endtoend/vtgate/sec_vind/main_test.go index 7aa5df76a83..7ec0d5c0682 100644 --- a/go/test/endtoend/vtgate/sec_vind/main_test.go +++ b/go/test/endtoend/vtgate/sec_vind/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -101,7 +100,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return conn, func() { deleteAll() conn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/sequence/seq_test.go b/go/test/endtoend/vtgate/sequence/seq_test.go index 1bda37094b2..0fc1c810eb3 100644 --- a/go/test/endtoend/vtgate/sequence/seq_test.go +++ b/go/test/endtoend/vtgate/sequence/seq_test.go @@ -174,7 +174,6 @@ CREATE TABLE allDefaults ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -216,7 +215,6 @@ func TestMain(m *testing.M) { } func TestSeq(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -274,7 +272,6 @@ func TestSeq(t *testing.T) { } func TestDotTableSeq(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -297,7 +294,6 @@ func TestDotTableSeq(t *testing.T) { } func TestInsertAllDefaults(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go index d6357ce8f2a..77e9a58cf69 100644 --- a/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go +++ b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go @@ -89,7 +89,6 @@ create table corder( // TestMain sets up the vitess cluster for any subsequent tests func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go index 50529d9fdf9..3457a2cab3c 100644 --- a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go +++ b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go @@ -86,7 +86,6 @@ create table corder( // TestMain sets up the vitess cluster for any subsequent tests func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/transaction/restart/main_test.go b/go/test/endtoend/vtgate/transaction/restart/main_test.go index 01185b5fa59..caa3111ad49 100644 --- a/go/test/endtoend/vtgate/transaction/restart/main_test.go +++ b/go/test/endtoend/vtgate/transaction/restart/main_test.go @@ -41,7 +41,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/transaction/rollback/txn_rollback_shutdown_test.go b/go/test/endtoend/vtgate/transaction/rollback/txn_rollback_shutdown_test.go index 2dff9f7b95f..c7bef098c05 100644 --- a/go/test/endtoend/vtgate/transaction/rollback/txn_rollback_shutdown_test.go +++ b/go/test/endtoend/vtgate/transaction/rollback/txn_rollback_shutdown_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -87,7 +86,6 @@ func TestMain(m *testing.M) { } func TestTransactionRollBackWhenShutDown(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -122,7 +120,6 @@ func TestTransactionRollBackWhenShutDown(t *testing.T) { } func TestErrorInAutocommitSession(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/transaction/single/main_test.go b/go/test/endtoend/vtgate/transaction/single/main_test.go index ec2dbd6378a..1eab3cea276 100644 --- a/go/test/endtoend/vtgate/transaction/single/main_test.go +++ b/go/test/endtoend/vtgate/transaction/single/main_test.go @@ -46,7 +46,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/transaction/tx_test.go b/go/test/endtoend/vtgate/transaction/tx_test.go index 8a004277b89..86b1ebdf486 100644 --- a/go/test/endtoend/vtgate/transaction/tx_test.go +++ b/go/test/endtoend/vtgate/transaction/tx_test.go @@ -47,7 +47,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -96,7 +95,6 @@ func TestMain(m *testing.M) { // TestTransactionModes tests transactions using twopc mode func TestTransactionModes(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -142,7 +140,6 @@ func TestTransactionModes(t *testing.T) { // TestTransactionIsolation tests transaction isolation level. func TestTransactionIsolation(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -249,6 +246,5 @@ func start(t *testing.T) func() { return func() { deleteAll() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/unsharded/main_test.go b/go/test/endtoend/vtgate/unsharded/main_test.go index 0aaff58a65e..fc9bb808a08 100644 --- a/go/test/endtoend/vtgate/unsharded/main_test.go +++ b/go/test/endtoend/vtgate/unsharded/main_test.go @@ -149,7 +149,6 @@ END; var enableSettingsPool bool func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() code := runAllTests(m) @@ -207,7 +206,6 @@ func runAllTests(m *testing.M) int { func TestSelectIntoAndLoadFrom(t *testing.T) { // Test is skipped because it requires secure-file-priv variable to be set to not NULL or empty. t.Skip() - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -242,7 +240,6 @@ func TestSelectIntoAndLoadFrom(t *testing.T) { } func TestEmptyStatement(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -259,7 +256,6 @@ func TestEmptyStatement(t *testing.T) { } func TestTopoDownServingQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -279,7 +275,6 @@ func TestTopoDownServingQuery(t *testing.T) { } func TestInsertAllDefaults(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -294,7 +289,6 @@ func TestInsertAllDefaults(t *testing.T) { } func TestDDLUnsharded(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -315,7 +309,6 @@ func TestDDLUnsharded(t *testing.T) { } func TestCallProcedure(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -362,7 +355,6 @@ func TestCallProcedure(t *testing.T) { } func TestTempTable(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -387,7 +379,6 @@ func TestTempTable(t *testing.T) { } func TestReservedConnDML(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -410,7 +401,6 @@ func TestReservedConnDML(t *testing.T) { } func TestNumericPrecisionScale(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/vindex_bindvars/main_test.go b/go/test/endtoend/vtgate/vindex_bindvars/main_test.go index 3251668e155..84c2c825784 100644 --- a/go/test/endtoend/vtgate/vindex_bindvars/main_test.go +++ b/go/test/endtoend/vtgate/vindex_bindvars/main_test.go @@ -265,7 +265,6 @@ CREATE TABLE thex ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -304,7 +303,6 @@ func TestMain(m *testing.M) { } func TestVindexHexTypes(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) @@ -326,7 +324,6 @@ func TestVindexHexTypes(t *testing.T) { } func TestVindexBindVarOverlap(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) diff --git a/go/test/endtoend/vtgate/vschema/vschema_test.go b/go/test/endtoend/vtgate/vschema/vschema_test.go index eec54f8f47f..82d08fa3056 100644 --- a/go/test/endtoend/vtgate/vschema/vschema_test.go +++ b/go/test/endtoend/vtgate/vschema/vschema_test.go @@ -97,7 +97,6 @@ func TestMain(m *testing.M) { } func TestVSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtorc/api/api_test.go b/go/test/endtoend/vtorc/api/api_test.go index 8fa24a39ac7..1a72be11aa7 100644 --- a/go/test/endtoend/vtorc/api/api_test.go +++ b/go/test/endtoend/vtorc/api/api_test.go @@ -33,7 +33,6 @@ import ( // TestAPIEndpoints tests the various API endpoints that VTOrc offers. func TestAPIEndpoints(t *testing.T) { - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, RecoveryPeriodBlockSeconds: 5, diff --git a/go/test/endtoend/vtorc/api/config_test.go b/go/test/endtoend/vtorc/api/config_test.go new file mode 100644 index 00000000000..821b0f8071e --- /dev/null +++ b/go/test/endtoend/vtorc/api/config_test.go @@ -0,0 +1,203 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +package api + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/vtorc/utils" +) + +// TestDynamicConfigs tests the dyanamic configurations that VTOrc offers. +func TestDynamicConfigs(t *testing.T) { + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 1, "") + vtorc := clusterInfo.ClusterInstance.VTOrcProcesses[0] + + // Restart VTOrc without any flag overrides so that all the configurations can be tested. + err := vtorc.TearDown() + require.NoError(t, err) + vtorc.Config = cluster.VTOrcConfiguration{} + vtorc.NoOverride = true + err = vtorc.Setup() + require.NoError(t, err) + + // Call API with retry to ensure VTOrc is up + status, resp := utils.MakeAPICallRetry(t, vtorc, "/debug/health", func(code int, response string) bool { + return code != 200 + }) + // Verify when VTOrc is healthy, it has also run the first discovery. + assert.Equal(t, 200, status) + assert.Contains(t, resp, `"Healthy": true,`) + + t.Run("InstancePollTime", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"instance-poll-time": 5000000000`) + // Update configuration and verify the output. + vtorc.Config.InstancePollTime = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"instance-poll-time": "10h"`) + }) + + t.Run("PreventCrossCellFailover", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"prevent-cross-cell-failover": false`) + // Update configuration and verify the output. + vtorc.Config.PreventCrossCellFailover = true + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"prevent-cross-cell-failover": true`) + }) + + t.Run("SnapshotTopologyInterval", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"snapshot-topology-interval": 0`) + // Update configuration and verify the output. + vtorc.Config.SnapshotTopologyInterval = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"snapshot-topology-interval": "10h"`) + }) + + t.Run("ReasonableReplicationLag", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"reasonable-replication-lag": 10000000000`) + // Update configuration and verify the output. + vtorc.Config.ReasonableReplicationLag = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"reasonable-replication-lag": "10h"`) + }) + + t.Run("AuditToBackend", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"audit-to-backend": false`) + // Update configuration and verify the output. + vtorc.Config.AuditToBackend = true + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"audit-to-backend": true`) + }) + + t.Run("AuditToSyslog", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"audit-to-syslog": false`) + // Update configuration and verify the output. + vtorc.Config.AuditToSyslog = true + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"audit-to-syslog": true`) + }) + + t.Run("AuditPurgeDuration", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"audit-purge-duration": 604800000000000`) + // Update configuration and verify the output. + vtorc.Config.AuditPurgeDuration = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"audit-purge-duration": "10h"`) + }) + + t.Run("WaitReplicasTimeout", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"wait-replicas-timeout": 30000000000`) + // Update configuration and verify the output. + vtorc.Config.WaitReplicasTimeout = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"wait-replicas-timeout": "10h"`) + }) + + t.Run("TolerableReplicationLag", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"tolerable-replication-lag": 0`) + // Update configuration and verify the output. + vtorc.Config.TolerableReplicationLag = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"tolerable-replication-lag": "10h"`) + }) + + t.Run("TopoInformationRefreshDuration", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"topo-information-refresh-duration": 15000000000`) + // Update configuration and verify the output. + vtorc.Config.TopoInformationRefreshDuration = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"topo-information-refresh-duration": "10h"`) + }) + + t.Run("RecoveryPollDuration", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"recovery-poll-duration": 1000000000`) + // Update configuration and verify the output. + vtorc.Config.RecoveryPollDuration = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"recovery-poll-duration": "10h"`) + }) + + t.Run("AllowEmergencyReparent", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"allow-emergency-reparent": true`) + // Update configuration and verify the output. + vtorc.Config.AllowEmergencyReparent = "false" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"allow-emergency-reparent": "false"`) + }) + + t.Run("ChangeTabletsWithErrantGtidToDrained", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"change-tablets-with-errant-gtid-to-drained": false`) + // Update configuration and verify the output. + vtorc.Config.ChangeTabletsWithErrantGtidToDrained = true + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"change-tablets-with-errant-gtid-to-drained": true`) + }) +} + +// waitForConfig waits for the expectedConfig to be present in the VTOrc configuration. +func waitForConfig(t *testing.T, vtorc *cluster.VTOrcProcess, expectedConfig string) { + t.Helper() + status, _ := utils.MakeAPICallRetry(t, vtorc, "/api/config", func(_ int, response string) bool { + return !strings.Contains(response, expectedConfig) + }) + require.EqualValues(t, 200, status) +} diff --git a/go/test/endtoend/vtorc/api/main_test.go b/go/test/endtoend/vtorc/api/main_test.go index f89326bc856..cc3e796b293 100644 --- a/go/test/endtoend/vtorc/api/main_test.go +++ b/go/test/endtoend/vtorc/api/main_test.go @@ -21,7 +21,6 @@ import ( "os" "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" ) @@ -53,8 +52,6 @@ func TestMain(m *testing.M) { return m.Run(), nil }() - cluster.PanicHandler(nil) - if clusterInfo != nil { // stop vtorc first otherwise its logs get polluted // with instances being unreachable triggering unnecessary operations diff --git a/go/test/endtoend/vtorc/general/main_test.go b/go/test/endtoend/vtorc/general/main_test.go index 6db0792de3a..0cd88cd378c 100644 --- a/go/test/endtoend/vtorc/general/main_test.go +++ b/go/test/endtoend/vtorc/general/main_test.go @@ -21,7 +21,6 @@ import ( "os" "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" ) @@ -47,8 +46,6 @@ func TestMain(m *testing.M) { return m.Run(), nil }() - cluster.PanicHandler(nil) - if clusterInfo != nil { // stop vtorc first otherwise its logs get polluted // with instances being unreachable triggering unnecessary operations diff --git a/go/test/endtoend/vtorc/general/vtorc_test.go b/go/test/endtoend/vtorc/general/vtorc_test.go index 88cd7b65d63..bd1e0007079 100644 --- a/go/test/endtoend/vtorc/general/vtorc_test.go +++ b/go/test/endtoend/vtorc/general/vtorc_test.go @@ -40,7 +40,6 @@ import ( // verify that with multiple vtorc instances, we still only have 1 PlannedReparentShard call func TestPrimaryElection(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 2, "") @@ -62,13 +61,72 @@ func TestPrimaryElection(t *testing.T) { require.Len(t, res.Rows, 1, "There should only be 1 primary tablet which was elected") } +<<<<<<< HEAD +======= +// TestErrantGTIDOnPreviousPrimary tests that VTOrc is able to detect errant GTIDs on a previously demoted primary +// if it has an errant GTID. +func TestErrantGTIDOnPreviousPrimary(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 0, []string{"--change-tablets-with-errant-gtid-to-drained"}, cluster.VTOrcConfiguration{}, 1, "") + keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] + shard0 := &keyspace.Shards[0] + + // find primary from topo + curPrimary := utils.ShardPrimaryTablet(t, clusterInfo, keyspace, shard0) + assert.NotNil(t, curPrimary, "should have elected a primary") + vtOrcProcess := clusterInfo.ClusterInstance.VTOrcProcesses[0] + utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.ElectNewPrimaryRecoveryName, 1) + utils.WaitForSuccessfulPRSCount(t, vtOrcProcess, keyspace.Name, shard0.Name, 1) + + var replica, otherReplica *cluster.Vttablet + for _, tablet := range shard0.Vttablets { + // we know we have only two tablets, so the "other" one must be the new primary + if tablet.Alias != curPrimary.Alias { + if replica == nil { + replica = tablet + } else { + otherReplica = tablet + } + } + } + require.NotNil(t, replica, "should be able to find a replica") + require.NotNil(t, otherReplica, "should be able to find 2nd replica") + utils.CheckReplication(t, clusterInfo, curPrimary, []*cluster.Vttablet{replica, otherReplica}, 15*time.Second) + + // Disable global recoveries for the cluster. + vtOrcProcess.DisableGlobalRecoveries(t) + + // Run PRS to promote a different replica. + output, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput( + "PlannedReparentShard", + fmt.Sprintf("%s/%s", keyspace.Name, shard0.Name), + "--new-primary", replica.Alias) + require.NoError(t, err, "error in PlannedReparentShard output - %s", output) + + // Stop replicatin on the previous primary to simulate it not reparenting properly. + // Also insert an errant GTID on the previous primary. + err = utils.RunSQLs(t, []string{ + "STOP REPLICA", + "RESET REPLICA ALL", + "set global read_only=OFF", + "insert into vt_ks.vt_insert_test(id, msg) values (10173, 'test 178342')", + }, curPrimary, "") + require.NoError(t, err) + + // Wait for VTOrc to detect the errant GTID and change the tablet to a drained type. + vtOrcProcess.EnableGlobalRecoveries(t) + + // Wait for the tablet to be drained. + utils.WaitForTabletType(t, curPrimary, "drained") +} + +>>>>>>> bad431deed (Remove broken panic handler (#17354)) // Cases to test: // 1. create cluster with 1 replica and 1 rdonly, let orc choose primary // verify rdonly is not elected, only replica // verify replication is setup func TestSingleKeyspace(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, []string{"--clusters_to_watch", "ks"}, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -87,7 +145,6 @@ func TestSingleKeyspace(t *testing.T) { // verify replication is setup func TestKeyspaceShard(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, []string{"--clusters_to_watch", "ks/0"}, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -109,7 +166,6 @@ func TestKeyspaceShard(t *testing.T) { // 6. disable recoveries and make sure the detected problems are set correctly. func TestVTOrcRepairs(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 0, []string{"--change-tablets-with-errant-gtid-to-drained"}, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -288,7 +344,6 @@ func TestRepairAfterTER(t *testing.T) { // test fails intermittently on CI, skip until it can be fixed. t.SkipNow() defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 0, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -422,7 +477,6 @@ func TestSemiSync(t *testing.T) { // TestVTOrcWithPrs tests that VTOrc works fine even when PRS is called from vtctld func TestVTOrcWithPrs(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 4, 0, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -474,7 +528,6 @@ func TestVTOrcWithPrs(t *testing.T) { // TestMultipleDurabilities tests that VTOrc works with 2 keyspaces having 2 different durability policies func TestMultipleDurabilities(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) // Setup a normal cluster and start vtorc utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, nil, cluster.VTOrcConfiguration{}, 1, "") // Setup a semi-sync cluster @@ -492,6 +545,48 @@ func TestMultipleDurabilities(t *testing.T) { assert.NotNil(t, primary, "should have elected a primary") } +<<<<<<< HEAD +======= +// TestDrainedTablet tests that we don't forget drained tablets and they still show up in the vtorc output. +func TestDrainedTablet(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) + + // Setup a normal cluster and start vtorc + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 0, nil, cluster.VTOrcConfiguration{}, 1, "") + keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] + shard0 := &keyspace.Shards[0] + + // find primary from topo + curPrimary := utils.ShardPrimaryTablet(t, clusterInfo, keyspace, shard0) + assert.NotNil(t, curPrimary, "should have elected a primary") + vtOrcProcess := clusterInfo.ClusterInstance.VTOrcProcesses[0] + + // find any replica tablet other than the current primary + var replica *cluster.Vttablet + for _, tablet := range shard0.Vttablets { + if tablet.Alias != curPrimary.Alias { + replica = tablet + break + } + } + require.NotNil(t, replica, "could not find any replica tablet") + + output, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput( + "ChangeTabletType", replica.Alias, "DRAINED") + require.NoError(t, err, "error in changing tablet type output - %s", output) + + // Make sure VTOrc sees the drained tablets and doesn't forget them. + utils.WaitForDrainedTabletInVTOrc(t, vtOrcProcess, 1) + + output, err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput( + "ChangeTabletType", replica.Alias, "REPLICA") + require.NoError(t, err, "error in changing tablet type output - %s", output) + + // We have no drained tablets anymore. Wait for VTOrc to have processed that. + utils.WaitForDrainedTabletInVTOrc(t, vtOrcProcess, 0) +} + +>>>>>>> bad431deed (Remove broken panic handler (#17354)) // TestDurabilityPolicySetLater tests that VTOrc works even if the durability policy of the keyspace is // set after VTOrc has been started. func TestDurabilityPolicySetLater(t *testing.T) { @@ -543,7 +638,6 @@ func TestDurabilityPolicySetLater(t *testing.T) { func TestFullStatusConnectionPooling(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 4, 0, []string{ "--tablet_manager_grpc_concurrency=1", }, cluster.VTOrcConfiguration{ diff --git a/go/test/endtoend/vtorc/primaryfailure/main_test.go b/go/test/endtoend/vtorc/primaryfailure/main_test.go index a3e50bd0cc9..cd03df01bd6 100644 --- a/go/test/endtoend/vtorc/primaryfailure/main_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/main_test.go @@ -21,7 +21,6 @@ import ( "os" "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" ) @@ -53,8 +52,6 @@ func TestMain(m *testing.M) { return m.Run(), nil }() - cluster.PanicHandler(nil) - if clusterInfo != nil { // stop vtorc first otherwise its logs get polluted // with instances being unreachable triggering unnecessary operations diff --git a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go index 645b413799c..0bd34807df7 100644 --- a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go @@ -39,7 +39,6 @@ import ( // Also tests that VTOrc can handle multiple failures, if the durability policies allow it func TestDownPrimary(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) // We specify the --wait-replicas-timeout to a small value because we spawn a cross-cell replica later in the test. // If that replica is more advanced than the same-cell-replica, then we try to promote the cross-cell replica as an intermediate source. // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. @@ -122,7 +121,6 @@ func TestDownPrimary(t *testing.T) { // bring down primary before VTOrc has started, let vtorc repair. func TestDownPrimaryBeforeVTOrc(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 0, "none") keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] @@ -178,7 +176,6 @@ func TestDownPrimaryBeforeVTOrc(t *testing.T) { // delete the primary record and let vtorc repair. func TestDeletedPrimaryTablet(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s"}, cluster.VTOrcConfiguration{}, 1, "none") keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] @@ -245,7 +242,6 @@ func TestDeletedPrimaryTablet(t *testing.T) { // that primary is unreachable. This help us save few seconds depending on value of `RemoteOperationTimeout` flag. func TestDeadPrimaryRecoversImmediately(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) // We specify the --wait-replicas-timeout to a small value because we spawn a cross-cell replica later in the test. // If that replica is more advanced than the same-cell-replica, then we try to promote the cross-cell replica as an intermediate source. // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. @@ -328,7 +324,6 @@ func TestDeadPrimaryRecoversImmediately(t *testing.T) { // covers part of the test case master-failover-lost-replicas from orchestrator func TestCrossDataCenterFailure(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -374,7 +369,6 @@ func TestCrossDataCenterFailure(t *testing.T) { // In case of no viable candidates, we should error out func TestCrossDataCenterFailureError(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -421,7 +415,6 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) { // were detected by vtorc and could be configured to have their sources detached t.Skip() defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 2, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -501,7 +494,6 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) { // covers the test case master-failover-fail-promotion-lag-minutes-success from orchestrator func TestPromotionLagSuccess(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ ReplicationLagQuery: "select 59", FailPrimaryPromotionOnLagMinutes: 1, @@ -551,7 +543,6 @@ func TestPromotionLagFailure(t *testing.T) { // was smaller than the configured value, otherwise it would fail the promotion t.Skip() defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 1, nil, cluster.VTOrcConfiguration{ ReplicationLagQuery: "select 61", FailPrimaryPromotionOnLagMinutes: 1, @@ -604,7 +595,6 @@ func TestPromotionLagFailure(t *testing.T) { // That is the replica which should be promoted in case of primary failure func TestDownPrimaryPromotionRule(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ LockShardTimeoutSeconds: 5, }, 1, "test") @@ -652,7 +642,6 @@ func TestDownPrimaryPromotionRule(t *testing.T) { // It should also be caught up when it is promoted func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ LockShardTimeoutSeconds: 5, }, 1, "test") @@ -732,7 +721,6 @@ func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { // It should also be caught up when it is promoted func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ LockShardTimeoutSeconds: 5, PreventCrossDataCenterPrimaryFailover: true, diff --git a/test/config.json b/test/config.json index 5f8b3a56f1e..8e68a41397a 100644 --- a/test/config.json +++ b/test/config.json @@ -331,17 +331,6 @@ "RetryMax": 1, "Tags": [] }, - "pitr": { - "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/recovery/pitr"], - "Command": [], - "Manual": false, - "Shard": "10", - "RetryMax": 1, - "Tags": [ - "site_test" - ] - }, "recovery": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/recovery/unshardedrecovery"],