diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go index ebb0767a6a7..c9436c80487 100644 --- a/go/test/endtoend/backup/vtbackup/backup_only_test.go +++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go @@ -58,7 +58,6 @@ func TestTabletInitialBackup(t *testing.T) { // - Take a Second Backup // - Bring up a second replica, and restore from the second backup // - list the backups, remove them - defer cluster.PanicHandler(t) waitForReplicationToCatchup([]cluster.Vttablet{*replica1, *replica2}) @@ -102,7 +101,6 @@ func TestTabletBackupOnly(t *testing.T) { // - Take a Second Backup // - Bring up a second replica, and restore from the second backup // - list the backups, remove them - defer cluster.PanicHandler(t) // Reset the tablet object values in order on init tablet in the next step. primary.VttabletProcess.ServingStatus = "NOT_SERVING" diff --git a/go/test/endtoend/backup/vtbackup/main_test.go b/go/test/endtoend/backup/vtbackup/main_test.go index 367956c9827..6e1840b2979 100644 --- a/go/test/endtoend/backup/vtbackup/main_test.go +++ b/go/test/endtoend/backup/vtbackup/main_test.go @@ -52,7 +52,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode, err := func() (int, error) { diff --git a/go/test/endtoend/backup/vtctlbackup/backup_utils.go b/go/test/endtoend/backup/vtctlbackup/backup_utils.go index a70d1804028..4463cd38eb0 100644 --- a/go/test/endtoend/backup/vtctlbackup/backup_utils.go +++ b/go/test/endtoend/backup/vtctlbackup/backup_utils.go @@ -388,7 +388,6 @@ func TestBackup(t *testing.T, setupType int, streamMode string, stripes int, cDe }, // } - defer cluster.PanicHandler(t) // setup cluster for the testing code, err := LaunchCluster(setupType, streamMode, stripes, cDetails) require.Nilf(t, err, "setup failed with status code %d", code) @@ -1430,3 +1429,150 @@ func verifyTabletRestoreStats(t *testing.T, vars map[string]any) { } require.Contains(t, bd, "BackupStorage.File.File:Read") } +<<<<<<< HEAD +======= + +func getDefaultCommonArgs() []string { + return []string{ + "--vreplication_retry_delay", "1s", + "--degraded_threshold", "5s", + "--lock_tables_timeout", "5s", + "--watch_replication_stream", + "--enable_replication_reporter", + "--serving_state_grace_period", "1s", + } +} + +func setDefaultCommonArgs() { commonTabletArg = getDefaultCommonArgs() } + +// fetch the backup engine used on the last backup triggered by the end-to-end tests. +func getBackupEngineOfLastBackup(t *testing.T) string { + lastBackup := getLastBackup(t) + + manifest := readManifestFile(t, path.Join(localCluster.CurrentVTDATAROOT, "backups", keyspaceName, shardName, lastBackup)) + + return manifest.BackupMethod +} + +func getLastBackup(t *testing.T) string { + backups, err := localCluster.ListBackups(shardKsName) + require.NoError(t, err) + + return backups[len(backups)-1] +} + +func TestBackupEngineSelector(t *testing.T) { + defer setDefaultCommonArgs() + + // launch the custer with xtrabackup as the default engine + code, err := LaunchCluster(XtraBackup, "xbstream", 0, &CompressionDetails{CompressorEngineName: "pgzip"}) + require.Nilf(t, err, "setup failed with status code %d", code) + + defer TearDownCluster() + + localCluster.DisableVTOrcRecoveries(t) + defer func() { + localCluster.EnableVTOrcRecoveries(t) + }() + verifyInitialReplication(t) + + t.Run("backup with backup-engine=builtin", func(t *testing.T) { + // first try to backup with an alternative engine (builtin) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=builtin", primary.Alias) + require.NoError(t, err) + engineUsed := getBackupEngineOfLastBackup(t) + require.Equal(t, "builtin", engineUsed) + }) + + t.Run("backup with backup-engine=xtrabackup", func(t *testing.T) { + // then try to backup specifying the xtrabackup engine + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=xtrabackup", primary.Alias) + require.NoError(t, err) + engineUsed := getBackupEngineOfLastBackup(t) + require.Equal(t, "xtrabackup", engineUsed) + }) + + t.Run("backup without specifying backup-engine", func(t *testing.T) { + // check that by default we still use the xtrabackup engine if not specified + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", primary.Alias) + require.NoError(t, err) + engineUsed := getBackupEngineOfLastBackup(t) + require.Equal(t, "xtrabackup", engineUsed) + }) +} + +func TestRestoreAllowedBackupEngines(t *testing.T) { + defer setDefaultCommonArgs() + + backupMsg := "right after xtrabackup backup" + + cDetails := &CompressionDetails{CompressorEngineName: "pgzip"} + + // launch the custer with xtrabackup as the default engine + code, err := LaunchCluster(XtraBackup, "xbstream", 0, cDetails) + require.Nilf(t, err, "setup failed with status code %d", code) + + defer TearDownCluster() + + localCluster.DisableVTOrcRecoveries(t) + defer func() { + localCluster.EnableVTOrcRecoveries(t) + }() + verifyInitialReplication(t) + + t.Run("generate backups", func(t *testing.T) { + // lets take two backups, each using a different backup engine + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=builtin", primary.Alias) + require.NoError(t, err) + + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", "--backup-engine=xtrabackup", primary.Alias) + require.NoError(t, err) + }) + + // insert more data on the primary + _, err = primary.VttabletProcess.QueryTablet(fmt.Sprintf("insert into vt_insert_test (msg) values ('%s')", backupMsg), keyspaceName, true) + require.NoError(t, err) + + t.Run("restore replica and verify data", func(t *testing.T) { + // now bring up another replica, letting it restore from backup. + restoreWaitForBackup(t, "replica", cDetails, true) + err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, timeout) + require.NoError(t, err) + + // check the new replica has the data + cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) + result, err := replica2.VttabletProcess.QueryTablet( + fmt.Sprintf("select msg from vt_insert_test where msg='%s'", backupMsg), replica2.VttabletProcess.Keyspace, true) + require.NoError(t, err) + require.Equal(t, backupMsg, result.Named().Row().AsString("msg", "")) + }) + + t.Run("test broken restore", func(t *testing.T) { + // now lets break the last backup in the shard + err = os.Remove(path.Join(localCluster.CurrentVTDATAROOT, + "backups", keyspaceName, shardName, + getLastBackup(t), "backup.xbstream.gz")) + require.NoError(t, err) + + // and try to restore from it + err = localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", replica2.Alias) + require.Error(t, err) // this should fail + }) + + t.Run("test older working backup", func(t *testing.T) { + // now we retry but with the first backup + err = localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", "--allowed-backup-engines=builtin", replica2.Alias) + require.NoError(t, err) // this should succeed + + // make sure we are replicating after the restore is done + err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, timeout) + require.NoError(t, err) + cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) + + result, err := replica2.VttabletProcess.QueryTablet( + fmt.Sprintf("select msg from vt_insert_test where msg='%s'", backupMsg), replica2.VttabletProcess.Keyspace, true) + require.NoError(t, err) + require.Equal(t, backupMsg, result.Named().Row().AsString("msg", "")) + }) +} +>>>>>>> bad431deed (Remove broken panic handler (#17354)) diff --git a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go index 6270c023eab..23b055396bf 100644 --- a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go +++ b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go @@ -91,7 +91,6 @@ func waitForReplica(t *testing.T, replicaIndex int) { // in between, it makes writes to the database, and takes notes: what data was available in what backup. // It then restores each and every one of those backups, in random order, and expects to find the specific data associated with the backup. func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) { - defer cluster.PanicHandler(t) t.Run(tcase.Name, func(t *testing.T) { // setup cluster for the testing @@ -320,7 +319,6 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) // ExecTestIncrementalBackupAndRestoreToPos func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTestCase) { - defer cluster.PanicHandler(t) var lastInsertedRowTimestamp time.Time insertRowOnPrimary := func(t *testing.T, hint string) { @@ -580,7 +578,6 @@ func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTes // Specifically, it's designed to test how incremental backups are taken by interleaved replicas, so that they successfully build on // one another. func ExecTestIncrementalBackupOnTwoTablets(t *testing.T, tcase *PITRTestCase) { - defer cluster.PanicHandler(t) t.Run(tcase.Name, func(t *testing.T) { // setup cluster for the testing diff --git a/go/test/endtoend/cellalias/cell_alias_test.go b/go/test/endtoend/cellalias/cell_alias_test.go index d357331d8cd..8c4ec093c09 100644 --- a/go/test/endtoend/cellalias/cell_alias_test.go +++ b/go/test/endtoend/cellalias/cell_alias_test.go @@ -90,7 +90,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -232,7 +231,6 @@ func TestMain(m *testing.M) { } func TestAlias(t *testing.T) { - defer cluster.PanicHandler(t) insertInitialValues(t) defer deleteInitialValues(t) @@ -296,7 +294,6 @@ func TestAlias(t *testing.T) { } func TestAddAliasWhileVtgateUp(t *testing.T) { - defer cluster.PanicHandler(t) insertInitialValues(t) defer deleteInitialValues(t) diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go index a9cc482b9e3..16a20586333 100644 --- a/go/test/endtoend/cluster/cluster_process.go +++ b/go/test/endtoend/cluster/cluster_process.go @@ -992,7 +992,6 @@ func (cluster *LocalProcessCluster) VtctlclientChangeTabletType(tablet *Vttablet // Teardown brings down the cluster by invoking teardown for individual processes func (cluster *LocalProcessCluster) Teardown() { - PanicHandler(nil) cluster.mx.Lock() defer cluster.mx.Unlock() if cluster.teardownCompleted { diff --git a/go/test/endtoend/cluster/cluster_util.go b/go/test/endtoend/cluster/cluster_util.go index a35c3bf3769..825305d182f 100644 --- a/go/test/endtoend/cluster/cluster_util.go +++ b/go/test/endtoend/cluster/cluster_util.go @@ -127,15 +127,6 @@ func VerifyRowsInTablet(t *testing.T, vttablet *Vttablet, ksName string, expecte VerifyRowsInTabletForTable(t, vttablet, ksName, expectedRows, "vt_insert_test") } -// PanicHandler handles the panic in the testcase. -func PanicHandler(t testing.TB) { - err := recover() - if t == nil { - return - } - require.Nilf(t, err, "panic occured in testcase %v", t.Name()) -} - // ListBackups Lists back preset in shard func (cluster LocalProcessCluster) ListBackups(shardKsName string) ([]string, error) { output, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("ListBackups", shardKsName) diff --git a/go/test/endtoend/clustertest/add_keyspace_test.go b/go/test/endtoend/clustertest/add_keyspace_test.go index edee87d035e..b8422b52eb3 100644 --- a/go/test/endtoend/clustertest/add_keyspace_test.go +++ b/go/test/endtoend/clustertest/add_keyspace_test.go @@ -61,7 +61,6 @@ primary key (id) ) func TestAddKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) if err := clusterInstance.StartKeyspace(*testKeyspace, []string{"-80", "80-"}, 0, false); err != nil { log.Errorf("failed to AddKeyspace %v: %v", *testKeyspace, err) t.Fatal(err) diff --git a/go/test/endtoend/clustertest/etcd_test.go b/go/test/endtoend/clustertest/etcd_test.go index 5239d960c47..f47a002a3cf 100644 --- a/go/test/endtoend/clustertest/etcd_test.go +++ b/go/test/endtoend/clustertest/etcd_test.go @@ -24,12 +24,9 @@ import ( "github.com/stretchr/testify/require" clientv3 "go.etcd.io/etcd/client/v3" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestEtcdServer(t *testing.T) { - defer cluster.PanicHandler(t) // Confirm the basic etcd cluster health. etcdHealthURL := fmt.Sprintf("http://%s:%d/health", clusterInstance.Hostname, clusterInstance.TopoPort) diff --git a/go/test/endtoend/clustertest/main_test.go b/go/test/endtoend/clustertest/main_test.go index 35da40a3edb..3fc2524208b 100644 --- a/go/test/endtoend/clustertest/main_test.go +++ b/go/test/endtoend/clustertest/main_test.go @@ -60,7 +60,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/clustertest/vtctld_test.go b/go/test/endtoend/clustertest/vtctld_test.go index c1b341ccd73..074abd142bc 100644 --- a/go/test/endtoend/clustertest/vtctld_test.go +++ b/go/test/endtoend/clustertest/vtctld_test.go @@ -30,8 +30,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) var ( @@ -44,7 +42,6 @@ var ( ) func TestVtctldProcess(t *testing.T) { - defer cluster.PanicHandler(t) url := fmt.Sprintf("http://%s:%d/api/keyspaces/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort) testURL(t, url, "keyspace url") diff --git a/go/test/endtoend/clustertest/vtgate_test.go b/go/test/endtoend/clustertest/vtgate_test.go index 2f72682a391..264b292f482 100644 --- a/go/test/endtoend/clustertest/vtgate_test.go +++ b/go/test/endtoend/clustertest/vtgate_test.go @@ -32,11 +32,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestVtgateProcess(t *testing.T) { - defer cluster.PanicHandler(t) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) diff --git a/go/test/endtoend/clustertest/vttablet_test.go b/go/test/endtoend/clustertest/vttablet_test.go index 369deb18cfd..35cad38503c 100644 --- a/go/test/endtoend/clustertest/vttablet_test.go +++ b/go/test/endtoend/clustertest/vttablet_test.go @@ -25,12 +25,9 @@ import ( "testing" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestVttabletProcess(t *testing.T) { - defer cluster.PanicHandler(t) firstTabletPort := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].HTTPPort testURL(t, fmt.Sprintf("http://localhost:%d/debug/vars/", firstTabletPort), "tablet debug var url") resp, err := http.Get(fmt.Sprintf("http://localhost:%d/debug/vars", firstTabletPort)) @@ -48,7 +45,6 @@ func TestVttabletProcess(t *testing.T) { } func TestDeleteTablet(t *testing.T) { - defer cluster.PanicHandler(t) primary := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() require.NotNil(t, primary) _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("DeleteTablet", "--", "--allow_primary", primary.Alias) diff --git a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go index 725659a5ee1..26bc77a36d0 100644 --- a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go +++ b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go @@ -42,7 +42,6 @@ var ( // This test makes sure that we can use SSL replication with Vitess func TestSecure(t *testing.T) { - defer cluster.PanicHandler(t) testReplicationBase(t, true) testReplicationBase(t, false) } diff --git a/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go b/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go index b076006ec2c..6122c77071c 100644 --- a/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go +++ b/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go @@ -102,7 +102,6 @@ var ( ) func TestSecureTransport(t *testing.T) { - defer cluster.PanicHandler(t) flag.Parse() // initialize cluster diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index 338ad5c8cd2..b302a2d65a1 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -80,7 +80,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -169,15 +168,18 @@ func checkDurabilityPolicy(t *testing.T, durabilityPolicy string) { } func TestGetSrvKeyspaceNames(t *testing.T) { +<<<<<<< HEAD defer cluster.PanicHandler(t) output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell) +======= + data, err := clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell) +>>>>>>> bad431deed (Remove broken panic handler (#17354)) require.Nil(t, err) assert.Contains(t, strings.Split(output, "\n"), keyspaceUnshardedName) assert.Contains(t, strings.Split(output, "\n"), keyspaceShardedName) } func TestGetSrvKeyspacePartitions(t *testing.T) { - defer cluster.PanicHandler(t) shardedSrvKeyspace := getSrvKeyspace(t, cell, keyspaceShardedName) otherShardRefFound := false for _, partition := range shardedSrvKeyspace.Partitions { @@ -206,6 +208,7 @@ func TestGetSrvKeyspacePartitions(t *testing.T) { } func TestShardNames(t *testing.T) { +<<<<<<< HEAD defer cluster.PanicHandler(t) output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvKeyspace", cell, keyspaceShardedName) require.Nil(t, err) @@ -223,14 +226,33 @@ func TestGetKeyspace(t *testing.T) { var keyspace topodata.Keyspace err = json.Unmarshal([]byte(output), &keyspace) +======= + output, err := clusterForKSTest.VtctldClientProcess.GetSrvKeyspaces(keyspaceShardedName, cell) + require.NoError(t, err) + require.NotNil(t, output[cell], "no srvkeyspace for cell %s", cell) +} + +func TestGetKeyspace(t *testing.T) { + _, err := clusterForKSTest.VtctldClientProcess.GetKeyspace(keyspaceUnshardedName) +>>>>>>> bad431deed (Remove broken panic handler (#17354)) require.Nil(t, err) } func TestDeleteKeyspace(t *testing.T) { +<<<<<<< HEAD defer cluster.PanicHandler(t) _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateKeyspace", "test_delete_keyspace") _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--", "--keyspace=test_delete_keyspace", "--shard=0", "zone1-0000000100", "primary") +======= + _ = clusterForKSTest.VtctldClientProcess.CreateKeyspace("test_delete_keyspace", sidecar.DefaultName) + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") + _ = clusterForKSTest.InitTablet(&cluster.Vttablet{ + Type: "primary", + TabletUID: 100, + Cell: "zone1", + }, "test_delete_keyspace", "0") +>>>>>>> bad431deed (Remove broken panic handler (#17354)) // Can't delete keyspace if there are shards present. err := clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteKeyspace", "test_delete_keyspace") @@ -346,7 +368,6 @@ func TestDeleteKeyspace(t *testing.T) { } */ func TestShardCountForAllKeyspaces(t *testing.T) { - defer cluster.PanicHandler(t) testShardCountForKeyspace(t, keyspaceUnshardedName, 1) testShardCountForKeyspace(t, keyspaceShardedName, 2) } @@ -363,7 +384,6 @@ func testShardCountForKeyspace(t *testing.T, keyspace string, count int) { } func TestShardNameForAllKeyspaces(t *testing.T) { - defer cluster.PanicHandler(t) testShardNameForKeyspace(t, keyspaceUnshardedName, []string{"test_ks_unsharded"}) testShardNameForKeyspace(t, keyspaceShardedName, []string{"-80", "80-"}) } @@ -382,7 +402,6 @@ func testShardNameForKeyspace(t *testing.T, keyspace string, shardNames []string } func TestKeyspaceToShardName(t *testing.T) { - defer cluster.PanicHandler(t) var id []byte srvKeyspace := getSrvKeyspace(t, cell, keyspaceShardedName) diff --git a/go/test/endtoend/messaging/main_test.go b/go/test/endtoend/messaging/main_test.go index 49477ebe631..c654869316b 100644 --- a/go/test/endtoend/messaging/main_test.go +++ b/go/test/endtoend/messaging/main_test.go @@ -104,7 +104,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { diff --git a/go/test/endtoend/messaging/message_test.go b/go/test/endtoend/messaging/message_test.go index 3082f295055..b05478f55f5 100644 --- a/go/test/endtoend/messaging/message_test.go +++ b/go/test/endtoend/messaging/message_test.go @@ -375,7 +375,6 @@ func TestUnsharded(t *testing.T) { // TestReparenting checks the client connection count after reparenting. func TestReparenting(t *testing.T) { - defer cluster.PanicHandler(t) name := "sharded_message" ctx := context.Background() @@ -435,7 +434,6 @@ func TestReparenting(t *testing.T) { // TestConnection validate the connection count and message streaming. func TestConnection(t *testing.T) { - defer cluster.PanicHandler(t) name := "sharded_message" @@ -494,7 +492,6 @@ func TestConnection(t *testing.T) { } func testMessaging(t *testing.T, name, ks string) { - defer cluster.PanicHandler(t) ctx := context.Background() stream, err := VtgateGrpcConn(ctx, clusterInstance) require.Nil(t, err) diff --git a/go/test/endtoend/mysqlctl/mysqlctl_test.go b/go/test/endtoend/mysqlctl/mysqlctl_test.go index 3b28c5bcf30..52fac9b1185 100644 --- a/go/test/endtoend/mysqlctl/mysqlctl_test.go +++ b/go/test/endtoend/mysqlctl/mysqlctl_test.go @@ -40,7 +40,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -139,7 +138,6 @@ func initCluster(shardNames []string, totalTabletsRequired int) { } func TestRestart(t *testing.T) { - defer cluster.PanicHandler(t) err := primaryTablet.MysqlctlProcess.Stop() require.Nil(t, err) primaryTablet.MysqlctlProcess.CleanupFiles(primaryTablet.TabletUID) @@ -148,7 +146,6 @@ func TestRestart(t *testing.T) { } func TestAutoDetect(t *testing.T) { - defer cluster.PanicHandler(t) err := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.Setup() require.Nil(t, err, "error should be nil") diff --git a/go/test/endtoend/mysqlctld/mysqlctld_test.go b/go/test/endtoend/mysqlctld/mysqlctld_test.go index e1577acfc52..5e5508b680f 100644 --- a/go/test/endtoend/mysqlctld/mysqlctld_test.go +++ b/go/test/endtoend/mysqlctld/mysqlctld_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -141,7 +140,6 @@ func initCluster(shardNames []string, totalTabletsRequired int) error { } func TestRestart(t *testing.T) { - defer cluster.PanicHandler(t) err := primaryTablet.MysqlctldProcess.Stop() require.Nil(t, err) require.Truef(t, primaryTablet.MysqlctldProcess.WaitForMysqlCtldShutdown(), "Mysqlctld has not stopped...") @@ -151,7 +149,6 @@ func TestRestart(t *testing.T) { } func TestAutoDetect(t *testing.T) { - defer cluster.PanicHandler(t) err := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.Setup() require.Nil(t, err, "error should be nil") diff --git a/go/test/endtoend/mysqlserver/main_test.go b/go/test/endtoend/mysqlserver/main_test.go index 18b169e33d7..20da69e18e8 100644 --- a/go/test/endtoend/mysqlserver/main_test.go +++ b/go/test/endtoend/mysqlserver/main_test.go @@ -61,7 +61,6 @@ END; ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() // setting grpc max size diff --git a/go/test/endtoend/mysqlserver/mysql_server_test.go b/go/test/endtoend/mysqlserver/mysql_server_test.go index caed342688d..a495c8ff672 100644 --- a/go/test/endtoend/mysqlserver/mysql_server_test.go +++ b/go/test/endtoend/mysqlserver/mysql_server_test.go @@ -35,14 +35,12 @@ import ( "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" _ "github.com/go-sql-driver/mysql" ) // TestMultiStmt checks that multiStatements=True and multiStatements=False work properly. func TestMultiStatement(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() // connect database with multiStatements=True @@ -70,7 +68,6 @@ func TestMultiStatement(t *testing.T) { // TestLargeComment add large comment in insert stmt and validate the insert process. func TestLargeComment(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -89,7 +86,6 @@ func TestLargeComment(t *testing.T) { // TestInsertLargerThenGrpcLimit insert blob larger then grpc limit and verify the error. func TestInsertLargerThenGrpcLimit(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() @@ -109,7 +105,6 @@ func TestInsertLargerThenGrpcLimit(t *testing.T) { // TestTimeout executes sleep(5) with query_timeout of 1 second, and verifies the error. func TestTimeout(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -125,7 +120,6 @@ func TestTimeout(t *testing.T) { // TestInvalidField tries to fetch invalid column and verifies the error. func TestInvalidField(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -141,7 +135,6 @@ func TestInvalidField(t *testing.T) { // TestWarnings validates the behaviour of SHOW WARNINGS. func TestWarnings(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -183,7 +176,6 @@ func TestWarnings(t *testing.T) { // TestSelectWithUnauthorizedUser verifies that an unauthorized user // is not able to read from the table. func TestSelectWithUnauthorizedUser(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() tmpVtParam := vtParams @@ -202,7 +194,6 @@ func TestSelectWithUnauthorizedUser(t *testing.T) { // TestPartitionedTable validates that partitioned tables are recognized by schema engine func TestPartitionedTable(t *testing.T) { - defer cluster.PanicHandler(t) tablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() diff --git a/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go b/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go new file mode 100644 index 00000000000..ee8141860f4 --- /dev/null +++ b/go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go @@ -0,0 +1,662 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This test is designed to test the flow of a single online DDL migration, with tablet throttler +// enabled. IT tests the following: +// - A primary + replica setup +// - Creating and populating a table +// - Enabling tablet (lag) throttler +// - Running a workload that generates DMLs, and which checks the throttler +// - Running an online DDL migration: +// - Using `online --postpone-completion` to use vreplication +// - vreplication configured (by default) to read from replica +// - vreplication by nature also checks the throttler +// - meanwhile, the workload generates DMLs, give migration some run time +// - proactively throttle and then unthrottle the migration +// - complete the migration +// +// - Validate sufficient DML has been applied +// - Validate the migration completed, and validate new schema is instated +// +// The test is designed with upgrade/downgrade in mind. In particular, we wish to test +// different vitess versions for `primary` and `replica` tablets. Thus, we validate: +// - Cross tablet and cross version throttler communication +// - Cross version vreplication + +package flow + +import ( + "context" + "flag" + "fmt" + "io" + "math/rand/v2" + "net/http" + "os" + "path" + "runtime" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/onlineddl" + "vitess.io/vitess/go/test/endtoend/throttler" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/schema" + throttlebase "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + shards []cluster.Shard + vtParams mysql.ConnParams + primaryTablet *cluster.Vttablet + replicaTablet *cluster.Vttablet + tablets []*cluster.Vttablet + httpClient = throttlebase.SetupHTTPClient(time.Second) + throttleWorkload atomic.Bool + totalAppliedDML atomic.Int64 + + hostname = "localhost" + keyspaceName = "ks" + cell = "zone1" + schemaChangeDirectory = "" + tableName = `stress_test` + createStatement = ` + CREATE TABLE stress_test ( + id bigint(20) not null, + rand_val varchar(32) null default '', + hint_col varchar(64) not null default '', + created_timestamp timestamp not null default current_timestamp, + updates int unsigned not null default 0, + PRIMARY KEY (id), + key created_idx(created_timestamp), + key updates_idx(updates) + ) ENGINE=InnoDB + ` + alterHintStatement = ` + ALTER TABLE stress_test modify hint_col varchar(64) not null default '%s' + ` + insertRowStatement = ` + INSERT IGNORE INTO stress_test (id, rand_val) VALUES (%d, left(md5(rand()), 8)) + ` + updateRowStatement = ` + UPDATE stress_test SET updates=updates+1 WHERE id=%d + ` + deleteRowStatement = ` + DELETE FROM stress_test WHERE id=%d AND updates=1 + ` +) + +var ( + countIterations = 5 +) + +const ( + maxTableRows = 4096 + workloadDuration = 5 * time.Second + migrationWaitTimeout = 60 * time.Second +) + +func TestMain(m *testing.M) { + flag.Parse() + + exitcode, err := func() (int, error) { + clusterInstance = cluster.NewCluster(cell, hostname) + schemaChangeDirectory = path.Join("/tmp", fmt.Sprintf("schema_change_dir_%d", clusterInstance.GetAndReserveTabletUID())) + defer os.RemoveAll(schemaChangeDirectory) + defer clusterInstance.Teardown() + + if _, err := os.Stat(schemaChangeDirectory); os.IsNotExist(err) { + _ = os.Mkdir(schemaChangeDirectory, 0700) + } + + clusterInstance.VtctldExtraArgs = []string{ + "--schema_change_dir", schemaChangeDirectory, + "--schema_change_controller", "local", + "--schema_change_check_interval", "1s", + } + + clusterInstance.VtTabletExtraArgs = []string{ + "--heartbeat_interval", "250ms", + "--heartbeat_on_demand_duration", "5s", + "--migration_check_interval", "2s", + "--watch_replication_stream", + } + clusterInstance.VtGateExtraArgs = []string{ + "--ddl_strategy", "online", + } + + if err := clusterInstance.StartTopo(); err != nil { + return 1, err + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + } + + // No need for replicas in this stress test + if err := clusterInstance.StartKeyspace(*keyspace, []string{"1"}, 1, false); err != nil { + return 1, err + } + + // Collect table paths and ports + tablets = clusterInstance.Keyspaces[0].Shards[0].Vttablets + for _, tablet := range tablets { + if tablet.Type == "primary" { + primaryTablet = tablet + } else { + replicaTablet = tablet + } + } + + vtgateInstance := clusterInstance.NewVtgateInstance() + // Start vtgate + if err := vtgateInstance.Setup(); err != nil { + return 1, err + } + // ensure it is torn down during cluster TearDown + clusterInstance.VtgateProcess = *vtgateInstance + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + + return m.Run(), nil + }() + if err != nil { + fmt.Printf("%v\n", err) + os.Exit(1) + } else { + os.Exit(exitcode) + } + +} + +func TestOnlineDDLFlow(t *testing.T) { + ctx := context.Background() + + require.NotNil(t, clusterInstance) + require.NotNil(t, primaryTablet) + require.NotNil(t, replicaTablet) + require.Equal(t, 2, len(tablets)) + + // This test is designed with upgrade/downgrade in mind. Do some logging to show what's + // the configuration for this test. + if binarySuffix := os.Getenv("PRIMARY_TABLET_BINARY_SUFFIX"); binarySuffix != "" { + t.Logf("Using PRIMARY_TABLET_BINARY_SUFFIX: %s", binarySuffix) + } + if binarySuffix := os.Getenv("REPLICA_TABLET_BINARY_SUFFIX"); binarySuffix != "" { + t.Logf("Using REPLICA_TABLET_BINARY_SUFFIX: %s", binarySuffix) + } + + require.NotEmpty(t, clusterInstance.Keyspaces) + shards = clusterInstance.Keyspaces[0].Shards + require.Equal(t, 1, len(shards)) + + throttler.EnableLagThrottlerAndWaitForStatus(t, clusterInstance) + + t.Run("flow", func(t *testing.T) { + t.Run("create schema", func(t *testing.T) { + testWithInitialSchema(t) + }) + t.Run("init table", func(t *testing.T) { + // Populates table. Makes work for vcopier. + initTable(t) + }) + t.Run("migrate", func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + workloadCtx, cancelWorkload := context.WithCancel(ctx) + defer cancelWorkload() + + t.Run("routine throttler check", func(t *testing.T) { + go func() { + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + for { + _, statusCode, err := throttlerCheck(primaryTablet.VttabletProcess, throttlerapp.OnlineDDLName) + assert.NoError(t, err) + throttleWorkload.Store(statusCode != http.StatusOK) + select { + case <-ticker.C: + case <-workloadCtx.Done(): + t.Logf("Terminating routine throttler check") + return + } + } + }() + }) + + var wg sync.WaitGroup + t.Run("generate workload", func(t *testing.T) { + // Create work for vplayer. + // This workload will consider throttling state and avoid generating DMLs if throttled. + wg.Add(1) + go func() { + defer cancel() + defer t.Logf("Terminating workload") + defer wg.Done() + runMultipleConnections(workloadCtx, t) + }() + }) + appliedDMLStart := totalAppliedDML.Load() + + hint := "post_completion_hint" + var uuid string + t.Run("submit migration", func(t *testing.T) { + uuid = testOnlineDDLStatement(t, fmt.Sprintf(alterHintStatement, hint), "online --postpone-completion", "", true) + }) + t.Run("wait for ready_to_complete", func(t *testing.T) { + waitForReadyToComplete(t, uuid, true) + }) + t.Run("validating running status", func(t *testing.T) { + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning) + }) + t.Run("throttle online-ddl", func(t *testing.T) { + onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, false) + onlineddl.ThrottleAllMigrations(t, &vtParams) + onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, true) + waitForThrottleCheckStatus(t, throttlerapp.OnlineDDLName, primaryTablet, http.StatusExpectationFailed) + }) + t.Run("unthrottle online-ddl", func(t *testing.T) { + onlineddl.UnthrottleAllMigrations(t, &vtParams) + if !onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, false) { + status, err := throttler.GetThrottlerStatus(&clusterInstance.VtctldClientProcess, primaryTablet) + assert.NoError(t, err) + + t.Logf("Throttler status: %+v", status) + } + waitForThrottleCheckStatus(t, throttlerapp.OnlineDDLName, primaryTablet, http.StatusOK) + }) + t.Run("apply more DML", func(t *testing.T) { + // Looking to run a substantial amount of DML, giving vreplication + // more "opportunities" to throttle or to make progress. + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + startDML := totalAppliedDML.Load() + for { + appliedDML := totalAppliedDML.Load() + if appliedDML-startDML >= int64(maxTableRows) { + // We have generated enough DMLs + return + } + select { + case <-ticker.C: + case <-ctx.Done(): + require.Fail(t, "timeout waiting for applied DML") + } + } + }) + t.Run("validate applied DML", func(t *testing.T) { + // Validate that during Online DDL, and even with throttling, we were + // able to produce meaningful traffic. + appliedDMLEnd := totalAppliedDML.Load() + assert.Greater(t, appliedDMLEnd, appliedDMLStart) + assert.GreaterOrEqual(t, appliedDMLEnd-appliedDMLStart, int64(maxTableRows)) + t.Logf("Applied DML: %d", appliedDMLEnd-appliedDMLStart) + }) + t.Run("attempt to complete", func(t *testing.T) { + onlineddl.CheckCompleteMigration(t, &vtParams, shards, uuid, true) + }) + isComplete := false + t.Run("optimistic wait for migration completion", func(t *testing.T) { + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, migrationWaitTimeout, schema.OnlineDDLStatusComplete) + isComplete = (status == schema.OnlineDDLStatusComplete) + t.Logf("# Migration status (for debug purposes): <%s>", status) + }) + if !isComplete { + t.Run("force complete cut-over", func(t *testing.T) { + onlineddl.CheckForceMigrationCutOver(t, &vtParams, shards, uuid, true) + }) + t.Run("another optimistic wait for migration completion", func(t *testing.T) { + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, migrationWaitTimeout, schema.OnlineDDLStatusComplete) + isComplete = (status == schema.OnlineDDLStatusComplete) + t.Logf("# Migration status (for debug purposes): <%s>", status) + }) + } + if !isComplete { + t.Run("terminate workload", func(t *testing.T) { + // Seems like workload is too high and preventing migration from completing. + // We can't go on forever. It's nice to have normal completion under workload, + // but it's not strictly what this test is designed for. We terminate the + // workload so as to allow the migration to complete. + cancelWorkload() + }) + } + t.Run("wait for migration completion", func(t *testing.T) { + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, migrationWaitTimeout, schema.OnlineDDLStatusComplete) + t.Logf("# Migration status (for debug purposes): <%s>", status) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + }) + t.Run("validate table schema", func(t *testing.T) { + checkMigratedTable(t, tableName, hint) + }) + + cancelWorkload() // Early break + cancel() // Early break + wg.Wait() + }) + }) +} + +func testWithInitialSchema(t *testing.T) { + // Create the stress table + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, createStatement) + require.Nil(t, err) + + // Check if table is created + checkTable(t, tableName) +} + +// testOnlineDDLStatement runs an online DDL, ALTER statement +func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, expectHint string, skipWait bool) (uuid string) { + row := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, alterStatement, "").Named().Row() + require.NotNil(t, row) + uuid = row.AsString("uuid", "") + uuid = strings.TrimSpace(uuid) + require.NotEmpty(t, uuid) + t.Logf("# Generated UUID (for debug purposes):") + t.Logf("<%s>", uuid) + + strategySetting, err := schema.ParseDDLStrategy(ddlStrategy) + assert.NoError(t, err) + + if !strategySetting.Strategy.IsDirect() && !skipWait && uuid != "" { + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, migrationWaitTimeout, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + t.Logf("# Migration status (for debug purposes): <%s>", status) + } + + if expectHint != "" { + checkMigratedTable(t, tableName, expectHint) + } + return uuid +} + +// checkTable checks the number of tables in the first two shards. +func checkTable(t *testing.T, showTableName string) { + for i := range clusterInstance.Keyspaces[0].Shards { + checkTablesCount(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], showTableName, 1) + } +} + +// checkTablesCount checks the number of tables in the given tablet +func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName string, expectCount int) { + query := fmt.Sprintf(`show tables like '%%%s%%';`, showTableName) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + rowcount := 0 + + for { + queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.Nil(t, err) + rowcount = len(queryResult.Rows) + if rowcount > 0 { + break + } + + select { + case <-ticker.C: + continue // Keep looping + case <-ctx.Done(): + // Break below to the assertion + } + + break + } + + assert.Equal(t, expectCount, rowcount) +} + +// checkMigratedTables checks the CREATE STATEMENT of a table after migration +func checkMigratedTable(t *testing.T, tableName, expectHint string) { + for i := range clusterInstance.Keyspaces[0].Shards { + createStatement := getCreateTableStatement(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], tableName) + assert.Contains(t, createStatement, expectHint) + } +} + +// getCreateTableStatement returns the CREATE TABLE statement for a given table +func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName string) (statement string) { + queryResult, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("show create table %s;", tableName), keyspaceName, true) + require.Nil(t, err) + + assert.Equal(t, len(queryResult.Rows), 1) + assert.Equal(t, len(queryResult.Rows[0]), 2) // table name, create statement + statement = queryResult.Rows[0][1].ToString() + return statement +} + +func waitForReadyToComplete(t *testing.T, uuid string, expected bool) bool { + ctx, cancel := context.WithTimeout(context.Background(), migrationWaitTimeout) + defer cancel() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for { + rs := onlineddl.ReadMigrations(t, &vtParams, uuid) + require.NotNil(t, rs) + for _, row := range rs.Named().Rows { + readyToComplete := row.AsInt64("ready_to_complete", 0) + if expected == (readyToComplete > 0) { + // all good. This is what we waited for + if expected { + // if migration is ready to complete, the timestamp should be non-null + assert.False(t, row["ready_to_complete_timestamp"].IsNull()) + } else { + assert.True(t, row["ready_to_complete_timestamp"].IsNull()) + } + return true + } + } + select { + case <-ticker.C: + case <-ctx.Done(): + assert.NoError(t, ctx.Err(), "timeout waiting for ready_to_complete") + return false + } + } +} + +func generateInsert(t *testing.T, conn *mysql.Conn) error { + id := rand.Int32N(int32(maxTableRows)) + query := fmt.Sprintf(insertRowStatement, id) + _, err := conn.ExecuteFetch(query, 1, false) + if err == nil { + totalAppliedDML.Add(1) + } + + return err +} + +func generateUpdate(t *testing.T, conn *mysql.Conn) error { + id := rand.Int32N(int32(maxTableRows)) + query := fmt.Sprintf(updateRowStatement, id) + _, err := conn.ExecuteFetch(query, 1, false) + if err == nil { + totalAppliedDML.Add(1) + } + + return err +} + +func generateDelete(t *testing.T, conn *mysql.Conn) error { + id := rand.Int32N(int32(maxTableRows)) + query := fmt.Sprintf(deleteRowStatement, id) + _, err := conn.ExecuteFetch(query, 1, false) + if err == nil { + totalAppliedDML.Add(1) + } + + return err +} + +func runSingleConnection(ctx context.Context, t *testing.T, sleepInterval time.Duration) { + log.Infof("Running single connection") + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + _, err = conn.ExecuteFetch("set autocommit=1", 1000, true) + require.Nil(t, err) + _, err = conn.ExecuteFetch("set transaction isolation level read committed", 1000, true) + require.Nil(t, err) + + ticker := time.NewTicker(sleepInterval) + defer ticker.Stop() + + for { + if !throttleWorkload.Load() { + switch rand.Int32N(3) { + case 0: + err = generateInsert(t, conn) + case 1: + err = generateUpdate(t, conn) + case 2: + err = generateDelete(t, conn) + } + } + select { + case <-ctx.Done(): + log.Infof("Terminating single connection") + return + case <-ticker.C: + } + assert.Nil(t, err) + } +} + +func runMultipleConnections(ctx context.Context, t *testing.T) { + // The workload for a 16 vCPU machine is: + // - Concurrency of 16 + // - 2ms interval between queries for each connection + // As the number of vCPUs decreases, so do we decrease concurrency, and increase intervals. For example, on a 8 vCPU machine + // we run concurrency of 8 and interval of 4ms. On a 4 vCPU machine we run concurrency of 4 and interval of 8ms. + maxConcurrency := runtime.NumCPU() + sleepModifier := 16.0 / float64(maxConcurrency) + baseSleepInterval := 2 * time.Millisecond + singleConnectionSleepIntervalNanoseconds := float64(baseSleepInterval.Nanoseconds()) * sleepModifier + sleepInterval := time.Duration(int64(singleConnectionSleepIntervalNanoseconds)) + + log.Infof("Running multiple connections: maxConcurrency=%v, sleep interval=%v", maxConcurrency, sleepInterval) + var wg sync.WaitGroup + for i := 0; i < maxConcurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + runSingleConnection(ctx, t, sleepInterval) + }() + } + wg.Wait() + log.Infof("Running multiple connections: done") +} + +func initTable(t *testing.T) { + log.Infof("initTable begin") + defer log.Infof("initTable complete") + + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + appliedDMLStart := totalAppliedDML.Load() + + for i := 0; i < maxTableRows/2; i++ { + generateInsert(t, conn) + } + for i := 0; i < maxTableRows/4; i++ { + generateUpdate(t, conn) + } + for i := 0; i < maxTableRows/4; i++ { + generateDelete(t, conn) + } + appliedDMLEnd := totalAppliedDML.Load() + assert.Greater(t, appliedDMLEnd, appliedDMLStart) + assert.GreaterOrEqual(t, appliedDMLEnd-appliedDMLStart, int64(maxTableRows)) +} + +func throttleResponse(tablet *cluster.VttabletProcess, path string) (respBody string, err error) { + apiURL := fmt.Sprintf("http://%s:%d/%s", tablet.TabletHostname, tablet.Port, path) + resp, err := httpClient.Get(apiURL) + if err != nil { + return "", err + } + defer resp.Body.Close() + b, err := io.ReadAll(resp.Body) + respBody = string(b) + return respBody, err +} + +func throttleApp(tablet *cluster.VttabletProcess, throttlerApp throttlerapp.Name) (string, error) { + return throttleResponse(tablet, fmt.Sprintf("throttler/throttle-app?app=%s&duration=1h", throttlerApp.String())) +} + +func unthrottleApp(tablet *cluster.VttabletProcess, throttlerApp throttlerapp.Name) (string, error) { + return throttleResponse(tablet, fmt.Sprintf("throttler/unthrottle-app?app=%s", throttlerApp.String())) +} + +func throttlerCheck(tablet *cluster.VttabletProcess, throttlerApp throttlerapp.Name) (respBody string, statusCode int, err error) { + apiURL := fmt.Sprintf("http://%s:%d/throttler/check?app=%s", tablet.TabletHostname, tablet.Port, throttlerApp.String()) + resp, err := httpClient.Get(apiURL) + if err != nil { + return "", 0, err + } + defer resp.Body.Close() + statusCode = resp.StatusCode + b, err := io.ReadAll(resp.Body) + respBody = string(b) + return respBody, statusCode, err +} + +// waitForThrottleCheckStatus waits for the tablet to return the provided HTTP code in a throttle check +func waitForThrottleCheckStatus(t *testing.T, throttlerApp throttlerapp.Name, tablet *cluster.Vttablet, wantCode int) { + ctx, cancel := context.WithTimeout(context.Background(), migrationWaitTimeout) + defer cancel() + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + respBody, statusCode, err := throttlerCheck(tablet.VttabletProcess, throttlerApp) + require.NoError(t, err) + + if wantCode == statusCode { + return + } + select { + case <-ctx.Done(): + assert.Equalf(t, wantCode, statusCode, "body: %s", respBody) + return + case <-ticker.C: + } + } +} diff --git a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go index d4517e67aff..e1a27dc0046 100644 --- a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go +++ b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go @@ -134,7 +134,6 @@ type revertibleTestCase struct { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -200,8 +199,12 @@ func TestMain(m *testing.M) { } +<<<<<<< HEAD func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) +======= +func TestRevertSchemaChanges(t *testing.T) { +>>>>>>> bad431deed (Remove broken panic handler (#17354)) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go index 3a502e64c23..c287794b595 100644 --- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go +++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go @@ -226,7 +226,6 @@ func waitForMessage(t *testing.T, uuid string, messageSubstring string) { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -306,7 +305,6 @@ func TestSchemaChange(t *testing.T) { } func testScheduler(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -1259,7 +1257,6 @@ func testScheduler(t *testing.T) { } func testSingleton(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -1495,7 +1492,6 @@ DROP TABLE IF EXISTS stress_test }) } func testDeclarative(t *testing.T) { - defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -2167,7 +2163,6 @@ func testDeclarative(t *testing.T) { } func testForeignKeys(t *testing.T) { - defer cluster.PanicHandler(t) var ( createStatements = []string{ diff --git a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go index 49e72eda290..c2960cd75c0 100644 --- a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go +++ b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go @@ -156,7 +156,6 @@ const ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -220,8 +219,12 @@ func TestMain(m *testing.M) { } +<<<<<<< HEAD func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) +======= +func TestVreplSchemaChanges(t *testing.T) { +>>>>>>> bad431deed (Remove broken panic handler (#17354)) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 2, len(shards)) diff --git a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go index 9f442a39c76..74c9d9bb63b 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go @@ -160,7 +160,6 @@ func nextOpOrder() int64 { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -229,8 +228,12 @@ func TestMain(m *testing.M) { } +<<<<<<< HEAD func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) +======= +func TestVreplMiniStressSchemaChanges(t *testing.T) { +>>>>>>> bad431deed (Remove broken panic handler (#17354)) ctx := context.Background() diff --git a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go index 2d9caaa6703..e7cc5d5d413 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go @@ -408,7 +408,6 @@ func mysqlParams() *mysql.ConnParams { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -481,8 +480,12 @@ func TestMain(m *testing.M) { } +<<<<<<< HEAD func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) +======= +func TestVreplStressSchemaChanges(t *testing.T) { +>>>>>>> bad431deed (Remove broken panic handler (#17354)) shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go index 56818069e05..0c6ea595bc5 100644 --- a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go @@ -63,7 +63,6 @@ const ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -126,8 +125,12 @@ func TestMain(m *testing.M) { } +<<<<<<< HEAD func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) +======= +func TestVreplSuiteSchemaChanges(t *testing.T) { +>>>>>>> bad431deed (Remove broken panic handler (#17354)) shards := clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) diff --git a/go/test/endtoend/preparestmt/main_test.go b/go/test/endtoend/preparestmt/main_test.go index 018e9d266fd..0e067062c94 100644 --- a/go/test/endtoend/preparestmt/main_test.go +++ b/go/test/endtoend/preparestmt/main_test.go @@ -162,7 +162,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { diff --git a/go/test/endtoend/preparestmt/stmt_methods_test.go b/go/test/endtoend/preparestmt/stmt_methods_test.go index 24fb58bff81..5768c6eec7a 100644 --- a/go/test/endtoend/preparestmt/stmt_methods_test.go +++ b/go/test/endtoend/preparestmt/stmt_methods_test.go @@ -27,20 +27,16 @@ import ( "github.com/icrowley/fake" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) // TestSelect simple select the data without any condition. func TestSelect(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() selectWhere(t, dbo, "") } func TestSelectDatabase(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() prepare, err := dbo.Prepare("select database()") @@ -58,7 +54,6 @@ func TestSelectDatabase(t *testing.T) { // TestInsertUpdateDelete validates all insert, update and // delete method on prepared statements. func TestInsertUpdateDelete(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() // prepare insert statement @@ -134,7 +129,6 @@ func testReplica(t *testing.T) { // testcount validates inserted rows count with expected count. func testcount(t *testing.T, dbo *sql.DB, except int) { - defer cluster.PanicHandler(t) r, err := dbo.Query("SELECT count(1) FROM " + tableName) require.Nil(t, err) @@ -148,7 +142,6 @@ func testcount(t *testing.T, dbo *sql.DB, except int) { // TestAutoIncColumns test insertion of row without passing // the value of auto increment columns (here it is id). func TestAutoIncColumns(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() // insert a row without id @@ -227,7 +220,6 @@ func reconnectAndTest(t *testing.T) { // TestColumnParameter query database using column // parameter. func TestColumnParameter(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -267,7 +259,6 @@ func TestColumnParameter(t *testing.T) { // TestWrongTableName query database using invalid // tablename and validate error. func TestWrongTableName(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() execWithError(t, dbo, []uint16{1146}, "select * from teseting_table;") @@ -319,7 +310,6 @@ func getStringToString(x sql.NullString) string { } func TestSelectDBA(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -381,7 +371,6 @@ func TestSelectDBA(t *testing.T) { } func TestSelectLock(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -417,7 +406,6 @@ func TestSelectLock(t *testing.T) { } func TestShowColumns(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t) defer dbo.Close() @@ -438,7 +426,6 @@ func TestShowColumns(t *testing.T) { } func TestBinaryColumn(t *testing.T) { - defer cluster.PanicHandler(t) dbo := Connect(t, "interpolateParams=false") defer dbo.Close() diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go index 7f70a926be3..953f4480964 100644 --- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go +++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go @@ -123,7 +123,6 @@ var ( // - asserting that restoring to restoreTime2 (going from 2 shards to 2 shards with past time) is working, it will assert for both shards // - asserting that restoring to restoreTime3 is working, we should get complete data after restoring, as we have in existing shards. func TestPITRRecovery(t *testing.T) { - defer cluster.PanicHandler(nil) initializeCluster(t) defer clusterInstance.Teardown() @@ -522,7 +521,6 @@ func launchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, binlogServer * tablet.MysqlctlProcess = *mysqlctlProcess extraArgs := []string{"--db-credentials-file", dbCredentialFile} tablet.MysqlctlProcess.InitDBFile = initDBFileWithPassword - tablet.VttabletProcess.DbPassword = mysqlPassword tablet.MysqlctlProcess.ExtraArgs = extraArgs err = tablet.MysqlctlProcess.Start() require.NoError(t, err) @@ -542,6 +540,7 @@ func launchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, binlogServer * clusterInstance.VtTabletExtraArgs, clusterInstance.DefaultCharset) tablet.Alias = tablet.VttabletProcess.TabletPath + tablet.VttabletProcess.DbPassword = mysqlPassword tablet.VttabletProcess.SupportsBackup = true tablet.VttabletProcess.Keyspace = restoreKeyspaceName tablet.VttabletProcess.ExtraArgs = []string{ diff --git a/go/test/endtoend/recovery/unshardedrecovery/recovery.go b/go/test/endtoend/recovery/unshardedrecovery/recovery.go index 8966e66ed47..d199b9803dd 100644 --- a/go/test/endtoend/recovery/unshardedrecovery/recovery.go +++ b/go/test/endtoend/recovery/unshardedrecovery/recovery.go @@ -72,7 +72,6 @@ var ( // TestMainImpl creates cluster for unsharded recovery testing. func TestMainImpl(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode, err := func() (int, error) { @@ -201,7 +200,6 @@ func TestMainImpl(m *testing.M) { // // 7. check that vtgate queries work correctly func TestRecoveryImpl(t *testing.T) { - defer cluster.PanicHandler(t) defer tabletsTeardown() verifyInitialReplication(t) diff --git a/go/test/endtoend/reparent/emergencyreparent/ers_test.go b/go/test/endtoend/reparent/emergencyreparent/ers_test.go index 819113c9fd8..b320c35ea99 100644 --- a/go/test/endtoend/reparent/emergencyreparent/ers_test.go +++ b/go/test/endtoend/reparent/emergencyreparent/ers_test.go @@ -30,7 +30,6 @@ import ( ) func TestTrivialERS(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -55,7 +54,6 @@ func TestTrivialERS(t *testing.T) { } func TestReparentIgnoreReplicas(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -97,7 +95,6 @@ func TestReparentIgnoreReplicas(t *testing.T) { } func TestReparentDownPrimary(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -133,7 +130,6 @@ func TestReparentDownPrimary(t *testing.T) { } func TestReparentNoChoiceDownPrimary(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -169,7 +165,6 @@ func TestReparentNoChoiceDownPrimary(t *testing.T) { func TestSemiSyncSetupCorrectly(t *testing.T) { t.Run("semi-sync enabled", func(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -197,7 +192,6 @@ func TestSemiSyncSetupCorrectly(t *testing.T) { }) t.Run("semi-sync disabled", func(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "none") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -227,7 +221,6 @@ func TestSemiSyncSetupCorrectly(t *testing.T) { // TestERSPromoteRdonly tests that we never end up promoting a rdonly instance as the primary func TestERSPromoteRdonly(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -255,7 +248,6 @@ func TestERSPromoteRdonly(t *testing.T) { // TestERSPreventCrossCellPromotion tests that we promote a replica in the same cell as the previous primary if prevent cross cell promotion flag is set func TestERSPreventCrossCellPromotion(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -278,7 +270,6 @@ func TestERSPreventCrossCellPromotion(t *testing.T) { // TestPullFromRdonly tests that if a rdonly tablet is the most advanced, then our promoted primary should have // caught up to it by pulling transactions from it func TestPullFromRdonly(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -350,7 +341,6 @@ func TestPullFromRdonly(t *testing.T) { // replicas which do not have any replication status and also succeeds if the io thread // is stopped on the primary elect. func TestNoReplicationStatusAndIOThreadStopped(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -450,7 +440,6 @@ func TestERSForInitialization(t *testing.T) { } func TestRecoverWithMultipleFailures(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -478,7 +467,6 @@ func TestRecoverWithMultipleFailures(t *testing.T) { // TestERSFailFast tests that ERS will fail fast if it cannot find any tablet which can be safely promoted instead of promoting // a tablet and hanging while inserting a row in the reparent journal on getting semi-sync ACKs func TestERSFailFast(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -518,7 +506,6 @@ func TestERSFailFast(t *testing.T) { // TestReplicationStopped checks that ERS ignores the tablets that have sql thread stopped. // If there are more than 1, we also fail. func TestReplicationStopped(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets diff --git a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go index 92b1be7b3f7..37fa3281e24 100644 --- a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go +++ b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go @@ -36,7 +36,6 @@ import ( // The test takes down the vttablets of the primary and a rdonly tablet and runs ERS with the // default values of remote_operation_timeout, lock-timeout flags and wait_replicas_timeout subflag. func TestRecoverWithMultipleVttabletFailures(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -68,7 +67,6 @@ func TestRecoverWithMultipleVttabletFailures(t *testing.T) { // and ERS succeeds. func TestSingleReplicaERS(t *testing.T) { // Set up a cluster with none durability policy - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "none") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -104,7 +102,6 @@ func TestSingleReplicaERS(t *testing.T) { // TestTabletRestart tests that a running tablet can be restarted and everything is still fine func TestTabletRestart(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -117,7 +114,6 @@ func TestTabletRestart(t *testing.T) { // Tests ensures that ChangeTabletType works even when semi-sync plugins are not loaded. func TestChangeTypeWithoutSemiSync(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "none") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -160,9 +156,33 @@ func TestChangeTypeWithoutSemiSync(t *testing.T) { require.NoError(t, err) } +<<<<<<< HEAD func TestBufferingWithMultipleDisruptions(t *testing.T) { defer cluster.PanicHandler(t) clusterInstance := utils.SetupShardedReparentCluster(t) +======= +// TestERSWithWriteInPromoteReplica tests that ERS doesn't fail even if there is a +// write that happens when PromoteReplica is called. +func TestERSWithWriteInPromoteReplica(t *testing.T) { + clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + defer utils.TeardownCluster(clusterInstance) + tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets + utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) + + // Drop a table so that when sidecardb changes are checked, we run a DML query. + utils.RunSQLs(context.Background(), t, []string{ + "set sql_log_bin=0", + `SET @@global.super_read_only=0`, + `DROP TABLE _vt.heartbeat`, + "set sql_log_bin=1", + }, tablets[3]) + _, err := utils.Ers(clusterInstance, tablets[3], "60s", "30s") + require.NoError(t, err, "ERS should not fail even if there is a sidecardb change") +} + +func TestBufferingWithMultipleDisruptions(t *testing.T) { + clusterInstance := utils.SetupShardedReparentCluster(t, "semi_sync") +>>>>>>> bad431deed (Remove broken panic handler (#17354)) defer utils.TeardownCluster(clusterInstance) // Stop all VTOrc instances, so that they don't interfere with the test. diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go index 2d89893569d..91471b1cebb 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_range_based_test.go @@ -28,7 +28,6 @@ import ( ) func TestReparentGracefulRangeBased(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() utils.ShardName = "0000000000000000-ffffffffffffffff" diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go index 38e872f0f2b..cc49175e84e 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go @@ -36,7 +36,6 @@ import ( ) func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -48,7 +47,6 @@ func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { } func TestReparentCrossCell(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -62,7 +60,6 @@ func TestReparentCrossCell(t *testing.T) { } func TestReparentGraceful(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -85,7 +82,6 @@ func TestReparentGraceful(t *testing.T) { // TestPRSWithDrainedLaggingTablet tests that PRS succeeds even if we have a lagging drained tablet func TestPRSWithDrainedLaggingTablet(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -112,7 +108,6 @@ func TestPRSWithDrainedLaggingTablet(t *testing.T) { } func TestReparentReplicaOffline(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -136,7 +131,6 @@ func TestReparentReplicaOffline(t *testing.T) { } func TestReparentAvoid(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -172,14 +166,12 @@ func TestReparentAvoid(t *testing.T) { } func TestReparentFromOutside(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) reparentFromOutside(t, clusterInstance, false) } func TestReparentFromOutsideWithNoPrimary(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -275,7 +267,6 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus } func TestReparentWithDownReplica(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -325,7 +316,6 @@ func TestReparentWithDownReplica(t *testing.T) { } func TestChangeTypeSemiSync(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -392,7 +382,6 @@ func TestChangeTypeSemiSync(t *testing.T) { // 1. When PRS is run with the cross_cell durability policy setup, then the semi-sync settings on all the tablets are as expected // 2. Bringing up a new vttablet should have its replication and semi-sync setup correctly without any manual intervention func TestCrossCellDurability(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "cross_cell") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets @@ -432,7 +421,6 @@ func TestCrossCellDurability(t *testing.T) { // TestFullStatus tests that the RPC FullStatus works as intended. func TestFullStatus(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance := utils.SetupReparentCluster(t, "semi_sync") defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets diff --git a/go/test/endtoend/reparent/prscomplex/main_test.go b/go/test/endtoend/reparent/prscomplex/main_test.go index 88e3d6c09fa..c2dafb8589f 100644 --- a/go/test/endtoend/reparent/prscomplex/main_test.go +++ b/go/test/endtoend/reparent/prscomplex/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/reparent/prssettingspool/main_test.go b/go/test/endtoend/reparent/prssettingspool/main_test.go index a9f4312caea..b85925cd85e 100644 --- a/go/test/endtoend/reparent/prssettingspool/main_test.go +++ b/go/test/endtoend/reparent/prssettingspool/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/reparent/semisync/semi_sync_test.go b/go/test/endtoend/reparent/semisync/semi_sync_test.go new file mode 100644 index 00000000000..df9bf192e65 --- /dev/null +++ b/go/test/endtoend/reparent/semisync/semi_sync_test.go @@ -0,0 +1,101 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package semisync + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/reparent/utils" +) + +func TestSemiSyncUpgradeDowngrade(t *testing.T) { + ver, err := cluster.GetMajorVersion("vtgate") + require.NoError(t, err) + if ver != 21 { + t.Skip("We only want to run this test for v21 release") + } + clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + defer utils.TeardownCluster(clusterInstance) + tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets + + // Verify that replication is running as intended. + utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) + + replica := tablets[1] + // Verify we are using the correct vttablet version. + verifyVttabletVersion(t, replica, 21) + // Check the plugin loaded in vttablet. + require.EqualValues(t, mysql.SemiSyncTypeSource, semiSyncExtensionLoaded(t, replica)) + + t.Run("Downgrade to previous release", func(t *testing.T) { + // change vttablet binary and downgrade it. + changeVttabletBinary(t, replica, "vttabletold") + // Verify we are using the older vttablet version. + verifyVttabletVersion(t, replica, 20) + // Verify that replication is running as intended. + utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) + // Check the plugin loaded in vttablet. + require.EqualValues(t, mysql.SemiSyncTypeSource, semiSyncExtensionLoaded(t, replica)) + }) + + t.Run("Upgrade to current release", func(t *testing.T) { + // change vttablet binary and downgrade it. + changeVttabletBinary(t, replica, "vttablet") + // Verify we are using the older vttablet version. + verifyVttabletVersion(t, replica, 21) + // Verify that replication is running as intended. + utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) + // Check the plugin loaded in vttablet. + require.EqualValues(t, mysql.SemiSyncTypeSource, semiSyncExtensionLoaded(t, replica)) + }) +} + +// semiSyncExtensionLoaded checks if the semisync extension has been loaded. +// It should work for both MariaDB and MySQL. +func semiSyncExtensionLoaded(t *testing.T, replica *cluster.Vttablet) mysql.SemiSyncType { + qr := utils.RunSQL(context.Background(), t, `SHOW VARIABLES LIKE 'rpl_semi_sync_%_enabled'`, replica) + for _, row := range qr.Rows { + if row[0].ToString() == "rpl_semi_sync_source_enabled" { + return mysql.SemiSyncTypeSource + } + if row[0].ToString() == "rpl_semi_sync_master_enabled" { + return mysql.SemiSyncTypeMaster + } + } + return mysql.SemiSyncTypeOff +} + +func changeVttabletBinary(t *testing.T, replica *cluster.Vttablet, binary string) { + t.Helper() + err := replica.VttabletProcess.TearDown() + require.NoError(t, err) + replica.VttabletProcess.Binary = binary + err = replica.VttabletProcess.Setup() + require.NoError(t, err) +} + +func verifyVttabletVersion(t *testing.T, replica *cluster.Vttablet, version int) { + t.Helper() + verGot, err := cluster.GetMajorVersion(replica.VttabletProcess.Binary) + require.NoError(t, err) + require.EqualValues(t, version, verGot) +} diff --git a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go index 79c3b62ebea..6bb2baa4119 100644 --- a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go +++ b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go @@ -69,7 +69,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -132,8 +131,12 @@ func TestMain(m *testing.M) { } +<<<<<<< HEAD func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) +======= +func TestSchemadiffSchemaChanges(t *testing.T) { +>>>>>>> bad431deed (Remove broken panic handler (#17354)) shards := clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) @@ -276,7 +279,6 @@ func testSingle(t *testing.T, testName string) { } // func TestRandomSchemaChanges(t *testing.T) { -// defer cluster.PanicHandler(t) // hints := &schemadiff.DiffHints{AutoIncrementStrategy: schemadiff.AutoIncrementIgnore} // // count := 20 diff --git a/go/test/endtoend/sharded/sharded_keyspace_test.go b/go/test/endtoend/sharded/sharded_keyspace_test.go index 857dc455206..ea50f7c80f0 100644 --- a/go/test/endtoend/sharded/sharded_keyspace_test.go +++ b/go/test/endtoend/sharded/sharded_keyspace_test.go @@ -73,7 +73,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -102,7 +101,6 @@ func TestMain(m *testing.M) { } func TestShardedKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) shard1 := clusterInstance.Keyspaces[0].Shards[0] shard2 := clusterInstance.Keyspaces[0].Shards[1] diff --git a/go/test/endtoend/stress/stress_test.go b/go/test/endtoend/stress/stress_test.go index 30a5ee69c1a..1bf716274d4 100644 --- a/go/test/endtoend/stress/stress_test.go +++ b/go/test/endtoend/stress/stress_test.go @@ -42,7 +42,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -84,7 +83,6 @@ func TestMain(m *testing.M) { // The stressor is started on its own goroutine while the end-to-end test // is executed on the same cluster. func TestSimpleStressTest(t *testing.T) { - defer cluster.PanicHandler(t) cfg := stress.DefaultConfig cfg.ConnParams = &vtParams diff --git a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go index 979f33a11be..01dbe17b855 100644 --- a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go +++ b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go @@ -272,7 +272,6 @@ type BufferingTest struct { } func (bt *BufferingTest) Test(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance, exitCode := bt.createCluster() if exitCode != 0 { t.Fatal("failed to start cluster") diff --git a/go/test/endtoend/tabletgateway/main_test.go b/go/test/endtoend/tabletgateway/main_test.go index da4fe711f64..441e9d8f9bb 100644 --- a/go/test/endtoend/tabletgateway/main_test.go +++ b/go/test/endtoend/tabletgateway/main_test.go @@ -59,7 +59,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/tabletgateway/vtgate_test.go b/go/test/endtoend/tabletgateway/vtgate_test.go index c48aa6c2131..43648e49268 100644 --- a/go/test/endtoend/tabletgateway/vtgate_test.go +++ b/go/test/endtoend/tabletgateway/vtgate_test.go @@ -39,7 +39,6 @@ import ( ) func TestVtgateHealthCheck(t *testing.T) { - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) @@ -53,7 +52,6 @@ func TestVtgateHealthCheck(t *testing.T) { } func TestVtgateReplicationStatusCheck(t *testing.T) { - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) @@ -102,7 +100,6 @@ func TestVtgateReplicationStatusCheck(t *testing.T) { } func TestVtgateReplicationStatusCheckWithTabletTypeChange(t *testing.T) { - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) @@ -178,7 +175,6 @@ func retryNTimes(t *testing.T, maxRetries int, f func() bool) { func TestReplicaTransactions(t *testing.T) { // TODO(deepthi): this test seems to depend on previous test. Fix tearDown so that tests are independent - defer cluster.PanicHandler(t) // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) ctx := context.Background() @@ -283,6 +279,46 @@ func TestReplicaTransactions(t *testing.T) { assert.Equal(t, `[[INT64(1) VARCHAR("email1")] [INT64(2) VARCHAR("email2")]]`, fmt.Sprintf("%v", qr4.Rows), "we are not able to reconnect after restart") } +<<<<<<< HEAD +======= +// TestStreamingRPCStuck tests that StreamExecute calls don't get stuck on the vttablets if a client stop reading from a stream. +func TestStreamingRPCStuck(t *testing.T) { + ctx := context.Background() + vtConn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer vtConn.Close() + + // We want the table to have enough rows such that a streaming call returns multiple packets. + // Therefore, we insert one row and keep doubling it. + utils.Exec(t, vtConn, "insert into customer(email) values('testemail')") + for i := 0; i < 15; i++ { + // Double the number of rows in customer table. + utils.Exec(t, vtConn, "insert into customer (email) select email from customer") + } + + // Connect to vtgate and run a streaming query. + vtgateConn, err := cluster.DialVTGate(ctx, t.Name(), vtgateGrpcAddress, "test_user", "") + require.NoError(t, err) + stream, err := vtgateConn.Session("", &querypb.ExecuteOptions{}).StreamExecute(ctx, "select * from customer", map[string]*querypb.BindVariable{}) + require.NoError(t, err) + + // We read packets until we see the first set of results. This ensures that the stream is working. + for { + res, err := stream.Recv() + require.NoError(t, err) + if res != nil && len(res.Rows) > 0 { + // breaking here stops reading from the stream. + break + } + } + + // We simulate a misbehaving client that doesn't read from the stream anymore. + // This however shouldn't block PlannedReparentShard calls. + err = clusterInstance.VtctldClientProcess.PlannedReparentShard(keyspaceName, "0", clusterInstance.Keyspaces[0].Shards[0].Vttablets[1].Alias) + require.NoError(t, err) +} + +>>>>>>> bad431deed (Remove broken panic handler (#17354)) func getMapFromJSON(JSON map[string]any, key string) map[string]any { result := make(map[string]any) object := reflect.ValueOf(JSON[key]) diff --git a/go/test/endtoend/tabletmanager/commands_test.go b/go/test/endtoend/tabletmanager/commands_test.go index 1a2d2424cb4..24d04bdd5a0 100644 --- a/go/test/endtoend/tabletmanager/commands_test.go +++ b/go/test/endtoend/tabletmanager/commands_test.go @@ -32,7 +32,13 @@ import ( "github.com/stretchr/testify/assert" "vitess.io/vitess/go/mysql" +<<<<<<< HEAD "vitess.io/vitess/go/test/endtoend/cluster" +======= + "vitess.io/vitess/go/test/endtoend/utils" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +>>>>>>> bad431deed (Remove broken panic handler (#17354)) ) var ( @@ -44,7 +50,6 @@ var ( // TabletCommands tests the basic tablet commands func TestTabletCommands(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -155,7 +160,6 @@ func TestActionAndTimeout(t *testing.T) { func TestHook(t *testing.T) { // test a regular program works - defer cluster.PanicHandler(t) runHookAndAssert(t, []string{ "ExecuteHook", "--", primaryTablet.Alias, "test.sh", "--flag1", "--param1=hello"}, "0", false, "") @@ -201,8 +205,12 @@ func runHookAndAssert(t *testing.T, params []string, expectedStatus string, expe func TestShardReplicationFix(t *testing.T) { // make sure the replica is in the replication graph, 2 nodes: 1 primary, 1 replica +<<<<<<< HEAD defer cluster.PanicHandler(t) result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShardReplication", cell, keyspaceShard) +======= + result, err := clusterInstance.VtctldClientProcess.GetShardReplication(keyspaceName, shardName, cell) +>>>>>>> bad431deed (Remove broken panic handler (#17354)) require.Nil(t, err, "error should be Nil") assertNodeCount(t, result, int(3)) @@ -222,7 +230,6 @@ func TestShardReplicationFix(t *testing.T) { } func TestGetSchema(t *testing.T) { - defer cluster.PanicHandler(t) res, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetSchema", "--", "--include-views", "--tables", "t1,v1", diff --git a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go index aa09a99e0fe..c7de38a5fca 100644 --- a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go +++ b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go @@ -33,7 +33,6 @@ import ( func TestTopoCustomRule(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) require.NoError(t, err) diff --git a/go/test/endtoend/tabletmanager/lock_unlock_test.go b/go/test/endtoend/tabletmanager/lock_unlock_test.go index 79286438698..f636f52c353 100644 --- a/go/test/endtoend/tabletmanager/lock_unlock_test.go +++ b/go/test/endtoend/tabletmanager/lock_unlock_test.go @@ -30,12 +30,10 @@ import ( "github.com/stretchr/testify/assert" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) // TestLockAndUnlock tests the lock ability by locking a replica and asserting it does not see changes func TestLockAndUnlock(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -76,7 +74,6 @@ func TestLockAndUnlock(t *testing.T) { // TestStartReplicationUntilAfter tests by writing three rows, noting the gtid after each, and then replaying them one by one func TestStartReplicationUntilAfter(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -130,7 +127,6 @@ func TestStartReplicationUntilAfter(t *testing.T) { // TestLockAndTimeout tests that the lock times out and updates can be seen after timeout func TestLockAndTimeout(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() primaryConn, err := mysql.Connect(ctx, &primaryTabletParams) diff --git a/go/test/endtoend/tabletmanager/main_test.go b/go/test/endtoend/tabletmanager/main_test.go index 1d5992bd839..9447eb0dab9 100644 --- a/go/test/endtoend/tabletmanager/main_test.go +++ b/go/test/endtoend/tabletmanager/main_test.go @@ -79,7 +79,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/tabletmanager/primary/tablet_test.go b/go/test/endtoend/tabletmanager/primary/tablet_test.go index f6255b1f71a..00ddac2259b 100644 --- a/go/test/endtoend/tabletmanager/primary/tablet_test.go +++ b/go/test/endtoend/tabletmanager/primary/tablet_test.go @@ -70,7 +70,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -117,7 +116,6 @@ func TestMain(m *testing.M) { } func TestRepeatedInitShardPrimary(t *testing.T) { - defer cluster.PanicHandler(t) // Test that using InitShardPrimary can go back and forth between 2 hosts. // Make replica tablet as primary @@ -156,7 +154,6 @@ func TestRepeatedInitShardPrimary(t *testing.T) { } func TestPrimaryRestartSetsPTSTimestamp(t *testing.T) { - defer cluster.PanicHandler(t) // Test that PTS timestamp is set when we restart the PRIMARY vttablet. // PTS = PrimaryTermStart. // See StreamHealthResponse.primary_term_start_timestamp for details. diff --git a/go/test/endtoend/tabletmanager/qps_test.go b/go/test/endtoend/tabletmanager/qps_test.go index 0611feada12..0ce41f04a63 100644 --- a/go/test/endtoend/tabletmanager/qps_test.go +++ b/go/test/endtoend/tabletmanager/qps_test.go @@ -24,12 +24,10 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) func TestQPS(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ diff --git a/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go b/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go index 86b02244762..6620657c264 100644 --- a/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go +++ b/go/test/endtoend/tabletmanager/replication_manager/tablet_test.go @@ -73,7 +73,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go index cfe961c4558..66a04fc3baf 100644 --- a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go +++ b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go @@ -83,7 +83,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/tabletmanager/tablet_health_test.go b/go/test/endtoend/tabletmanager/tablet_health_test.go index 7dc4bcd97d2..49f4df956a1 100644 --- a/go/test/endtoend/tabletmanager/tablet_health_test.go +++ b/go/test/endtoend/tabletmanager/tablet_health_test.go @@ -40,7 +40,6 @@ import ( // TabletReshuffle test if a vttablet can be pointed at an existing mysql func TestTabletReshuffle(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -93,7 +92,6 @@ func TestTabletReshuffle(t *testing.T) { func TestHealthCheck(t *testing.T) { // Add one replica that starts not initialized - defer cluster.PanicHandler(t) ctx := context.Background() clusterInstance.DisableVTOrcRecoveries(t) defer clusterInstance.EnableVTOrcRecoveries(t) @@ -201,7 +199,6 @@ func TestHealthCheck(t *testing.T) { // TestHealthCheckSchemaChangeSignal tests the tables and views, which report their schemas have changed in the output of a StreamHealth. func TestHealthCheckSchemaChangeSignal(t *testing.T) { // Add one replica that starts not initialized - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(keyspaceName) @@ -386,7 +383,6 @@ func TestHealthCheckDrainedStateDoesNotShutdownQueryService(t *testing.T) { // - the query service won't be shutdown // Wait if tablet is not in service state - defer cluster.PanicHandler(t) clusterInstance.DisableVTOrcRecoveries(t) defer clusterInstance.EnableVTOrcRecoveries(t) err := rdonlyTablet.VttabletProcess.WaitForTabletStatus("SERVING") diff --git a/go/test/endtoend/tabletmanager/tablet_security_policy_test.go b/go/test/endtoend/tabletmanager/tablet_security_policy_test.go index b3b11405abb..90397a737ef 100644 --- a/go/test/endtoend/tabletmanager/tablet_security_policy_test.go +++ b/go/test/endtoend/tabletmanager/tablet_security_policy_test.go @@ -29,7 +29,6 @@ import ( ) func TestFallbackSecurityPolicy(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() mTablet := clusterInstance.NewVttabletInstance("replica", 0, "") @@ -84,7 +83,6 @@ func assertAllowedURLTest(t *testing.T, url string) { } func TestDenyAllSecurityPolicy(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() mTablet := clusterInstance.NewVttabletInstance("replica", 0, "") @@ -116,7 +114,6 @@ func TestDenyAllSecurityPolicy(t *testing.T) { } func TestReadOnlySecurityPolicy(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() mTablet := clusterInstance.NewVttabletInstance("replica", 0, "") diff --git a/go/test/endtoend/tabletmanager/tablet_test.go b/go/test/endtoend/tabletmanager/tablet_test.go index 4fe5a70d125..189d7e7aef8 100644 --- a/go/test/endtoend/tabletmanager/tablet_test.go +++ b/go/test/endtoend/tabletmanager/tablet_test.go @@ -30,7 +30,6 @@ import ( // TestEnsureDB tests that vttablet creates the db as needed func TestEnsureDB(t *testing.T) { - defer cluster.PanicHandler(t) // Create new tablet tablet := clusterInstance.NewVttabletInstance("replica", 0, "") @@ -66,7 +65,6 @@ func TestEnsureDB(t *testing.T) { // TestResetReplicationParameters tests that the RPC ResetReplicationParameters works as intended. func TestResetReplicationParameters(t *testing.T) { - defer cluster.PanicHandler(t) // Create new tablet tablet := clusterInstance.NewVttabletInstance("replica", 0, "") diff --git a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go index 7c0f05bdcc2..6a66113ea8d 100644 --- a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go +++ b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go @@ -96,7 +96,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -241,7 +240,6 @@ func vtgateExec(t *testing.T, query string, expectError string) *sqltypes.Result } func TestInitialThrottler(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("validating OK response from disabled throttler", func(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) @@ -328,12 +326,57 @@ func TestInitialThrottler(t *testing.T) { }) t.Run("validating pushback response from throttler on low threshold once heartbeats go stale", func(t *testing.T) { time.Sleep(2 * onDemandHeartbeatDuration) // just... really wait long enough, make sure on-demand stops +<<<<<<< HEAD waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests) +======= + waitForThrottleCheckStatus(t, primaryTablet, tabletmanagerdatapb.CheckThrottlerResponseCode_THRESHOLD_EXCEEDED) + }) +} + +func TestThrottleViaApplySchema(t *testing.T) { + t.Run("throttling via ApplySchema", func(t *testing.T) { + vtctlParams := &cluster.ApplySchemaParams{DDLStrategy: "online"} + _, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput( + keyspaceName, "alter vitess_migration throttle all", *vtctlParams, + ) + assert.NoError(t, err) + }) + t.Run("validate keyspace configuration after throttle", func(t *testing.T) { + keyspace, err := clusterInstance.VtctldClientProcess.GetKeyspace(keyspaceName) + require.NoError(t, err) + require.NotNil(t, keyspace) + require.NotNil(t, keyspace.Keyspace.ThrottlerConfig) + require.NotNil(t, keyspace.Keyspace.ThrottlerConfig.ThrottledApps) + require.NotEmpty(t, keyspace.Keyspace.ThrottlerConfig.ThrottledApps, "throttler config: %+v", keyspace.Keyspace.ThrottlerConfig) + appRule, ok := keyspace.Keyspace.ThrottlerConfig.ThrottledApps[throttlerapp.OnlineDDLName.String()] + require.True(t, ok, "throttled apps: %v", keyspace.Keyspace.ThrottlerConfig.ThrottledApps) + require.NotNil(t, appRule) + assert.Equal(t, throttlerapp.OnlineDDLName.String(), appRule.Name) + assert.EqualValues(t, 1.0, appRule.Ratio) + expireAt := time.Unix(appRule.ExpiresAt.Seconds, int64(appRule.ExpiresAt.Nanoseconds)) + assert.True(t, expireAt.After(time.Now()), "expected rule to expire in the future: %v", expireAt) + }) + t.Run("unthrottling via ApplySchema", func(t *testing.T) { + vtctlParams := &cluster.ApplySchemaParams{DDLStrategy: "online"} + _, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput( + keyspaceName, "alter vitess_migration unthrottle all", *vtctlParams, + ) + assert.NoError(t, err) + }) + t.Run("validate keyspace configuration after unthrottle", func(t *testing.T) { + keyspace, err := clusterInstance.VtctldClientProcess.GetKeyspace(keyspaceName) + require.NoError(t, err) + require.NotNil(t, keyspace) + require.NotNil(t, keyspace.Keyspace.ThrottlerConfig) + require.NotNil(t, keyspace.Keyspace.ThrottlerConfig.ThrottledApps) + // ThrottledApps will actually be empty at this point, but more specifically we want to see that "online-ddl" is not there. + appRule, ok := keyspace.Keyspace.ThrottlerConfig.ThrottledApps[throttlerapp.OnlineDDLName.String()] + assert.False(t, ok, "app rule: %v", appRule) +>>>>>>> bad431deed (Remove broken panic handler (#17354)) }) } func TestThrottlerAfterMetricsCollected(t *testing.T) { - defer cluster.PanicHandler(t) // By this time metrics will have been collected. We expect no lag, and something like: // {"StatusCode":200,"Value":0.282278,"Threshold":1,"Message":""} @@ -362,7 +405,6 @@ func TestThrottlerAfterMetricsCollected(t *testing.T) { } func TestLag(t *testing.T) { - defer cluster.PanicHandler(t) // Temporarily disable VTOrc recoveries because we want to // STOP replication specifically in order to increase the // lag and we DO NOT want VTOrc to try and fix this. @@ -437,7 +479,6 @@ func TestLag(t *testing.T) { } func TestNoReplicas(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("changing replica to RDONLY", func(t *testing.T) { err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "RDONLY") assert.NoError(t, err) @@ -455,7 +496,6 @@ func TestNoReplicas(t *testing.T) { } func TestCustomQuery(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("enabling throttler with custom query and threshold", func(t *testing.T) { _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, customThreshold, customQuery, nil) @@ -522,7 +562,6 @@ func TestCustomQuery(t *testing.T) { } func TestRestoreDefaultQuery(t *testing.T) { - defer cluster.PanicHandler(t) // Validate going back from custom-query to default-query (replication lag) still works. t.Run("enabling throttler with default query and threshold", func(t *testing.T) { diff --git a/go/test/endtoend/topoconncache/main_test.go b/go/test/endtoend/topoconncache/main_test.go index 4c17481ec84..6fa23537216 100644 --- a/go/test/endtoend/topoconncache/main_test.go +++ b/go/test/endtoend/topoconncache/main_test.go @@ -97,7 +97,6 @@ Topology: We create a keyspace with two shards , having 3 tablets each. Primarie to 'zone1' and replicas/rdonly belongs to cell2. */ func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { diff --git a/go/test/endtoend/topoconncache/topo_conn_cache_test.go b/go/test/endtoend/topoconncache/topo_conn_cache_test.go index 504ca218047..d8d9f56255f 100644 --- a/go/test/endtoend/topoconncache/topo_conn_cache_test.go +++ b/go/test/endtoend/topoconncache/topo_conn_cache_test.go @@ -37,7 +37,6 @@ import ( 4. 'ListAllTablets' should return all the new tablets. */ func TestVtctldListAllTablets(t *testing.T) { - defer cluster.PanicHandler(t) url := fmt.Sprintf("http://%s:%d/api/keyspaces/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort) testURL(t, url, "keyspace url") diff --git a/go/test/endtoend/topotest/consul/main_test.go b/go/test/endtoend/topotest/consul/main_test.go index 1c278864ced..69ecf3d0f8a 100644 --- a/go/test/endtoend/topotest/consul/main_test.go +++ b/go/test/endtoend/topotest/consul/main_test.go @@ -61,7 +61,6 @@ CREATE TABLE t1 ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -97,7 +96,6 @@ func TestMain(m *testing.M) { } func TestTopoRestart(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/topotest/etcd2/main_test.go b/go/test/endtoend/topotest/etcd2/main_test.go index db34bd2ee86..fa580cb2749 100644 --- a/go/test/endtoend/topotest/etcd2/main_test.go +++ b/go/test/endtoend/topotest/etcd2/main_test.go @@ -61,7 +61,6 @@ CREATE TABLE t1 ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -96,7 +95,6 @@ func TestMain(m *testing.M) { } func TestTopoDownServingQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/topotest/zk2/main_test.go b/go/test/endtoend/topotest/zk2/main_test.go index 816bbc72d72..1fc3374c888 100644 --- a/go/test/endtoend/topotest/zk2/main_test.go +++ b/go/test/endtoend/topotest/zk2/main_test.go @@ -61,7 +61,6 @@ CREATE TABLE t1 ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -97,7 +96,6 @@ func TestMain(m *testing.M) { } func TestTopoDownServingQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/transaction/twopc/fuzz/main_test.go b/go/test/endtoend/transaction/twopc/fuzz/main_test.go new file mode 100644 index 00000000000..4d168fbdde0 --- /dev/null +++ b/go/test/endtoend/transaction/twopc/fuzz/main_test.go @@ -0,0 +1,128 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fuzz + +import ( + "context" + _ "embed" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + vtgateGrpcAddress string + keyspaceName = "ks" + unshardedKeyspaceName = "uks" + cell = "zone1" + hostname = "localhost" + + //go:embed schema.sql + SchemaSQL string + + //go:embed vschema.json + VSchema string +) + +func TestMain(m *testing.M) { + flag.Parse() + + exitcode := func() int { + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + if err := clusterInstance.StartTopo(); err != nil { + return 1 + } + + // Reserve vtGate port in order to pass it to vtTablet + clusterInstance.VtgateGrpcPort = clusterInstance.GetAndReservePort() + + // Set extra args for twopc + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--transaction_mode", "TWOPC", + "--grpc_use_effective_callerid", + "--tablet_refresh_interval", "2s", + ) + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, + "--twopc_abandon_age", "1", + "--migration_check_interval", "2s", + ) + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: SchemaSQL, + VSchema: VSchema, + DurabilityPolicy: "semi_sync", + } + if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { + return 1 + } + + // Start an unsharded keyspace + unshardedKeyspace := &cluster.Keyspace{ + Name: unshardedKeyspaceName, + SchemaSQL: "", + VSchema: "{}", + DurabilityPolicy: "semi_sync", + } + if err := clusterInstance.StartUnshardedKeyspace(*unshardedKeyspace, 2, false); err != nil { + return 1 + } + + // Start Vtgate + if err := clusterInstance.StartVtgate(); err != nil { + return 1 + } + vtParams = clusterInstance.GetVTParams("") + vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort) + + return m.Run() + }() + os.Exit(exitcode) +} + +func start(t *testing.T) (*mysql.Conn, func()) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + cleanup(t) + + return conn, func() { + conn.Close() + cleanup(t) + } +} + +func cleanup(t *testing.T) { + + utils.ClearOutTable(t, vtParams, "twopc_fuzzer_insert") + utils.ClearOutTable(t, vtParams, "twopc_fuzzer_update") + utils.ClearOutTable(t, vtParams, "twopc_fuzzer_multi") + utils.ClearOutTable(t, vtParams, "twopc_t1") +} diff --git a/go/test/endtoend/transaction/twopc/main_test.go b/go/test/endtoend/transaction/twopc/main_test.go new file mode 100644 index 00000000000..6d09c174a4d --- /dev/null +++ b/go/test/endtoend/transaction/twopc/main_test.go @@ -0,0 +1,347 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transaction + +import ( + "context" + _ "embed" + "encoding/json" + "flag" + "fmt" + "io" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/utils" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" + twopcutil "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtgate/vtgateconn" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + mysqlParams mysql.ConnParams + vtParams mysql.ConnParams + vtgateGrpcAddress string + keyspaceName = "ks" + cell = "zone1" + hostname = "localhost" + sidecarDBName = "vt_ks" + + //go:embed schema.sql + SchemaSQL string + + //go:embed vschema.json + VSchema string +) + +func TestMain(m *testing.M) { + flag.Parse() + + exitcode := func() int { + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + if err := clusterInstance.StartTopo(); err != nil { + return 1 + } + + // Reserve vtGate port in order to pass it to vtTablet + clusterInstance.VtgateGrpcPort = clusterInstance.GetAndReservePort() + + // Set extra args for twopc + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--transaction_mode", "TWOPC", + "--grpc_use_effective_callerid", + ) + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, + "--twopc_abandon_age", "1", + "--queryserver-config-transaction-cap", "3", + "--queryserver-config-transaction-timeout", "400s", + "--queryserver-config-query-timeout", "9000s", + ) + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: SchemaSQL, + VSchema: VSchema, + SidecarDBName: sidecarDBName, + DurabilityPolicy: "semi_sync", + } + if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { + return 1 + } + + // Start Vtgate + if err := clusterInstance.StartVtgate(); err != nil { + return 1 + } + vtParams = clusterInstance.GetVTParams(keyspaceName) + vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort) + + clusterInstance.NewVTAdminProcess() + if err := clusterInstance.VtadminProcess.Setup(); err != nil { + return 1 + } + + // create mysql instance and connection parameters + conn, closer, err := utils.NewMySQL(clusterInstance, keyspaceName, SchemaSQL) + if err != nil { + fmt.Println(err) + return 1 + } + defer closer() + mysqlParams = conn + + return m.Run() + }() + os.Exit(exitcode) +} + +func start(t *testing.T) (*mysql.Conn, func()) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + cleanup(t) + + return conn, func() { + conn.Close() + cleanup(t) + } +} + +func cleanup(t *testing.T) { + twopcutil.ClearOutTable(t, vtParams, "twopc_user") + twopcutil.ClearOutTable(t, vtParams, "twopc_t1") + twopcutil.ClearOutTable(t, vtParams, "twopc_lookup") + twopcutil.ClearOutTable(t, vtParams, "lookup_unique") + twopcutil.ClearOutTable(t, vtParams, "lookup") + sm.reset() +} + +func startWithMySQL(t *testing.T) (utils.MySQLCompare, func()) { + mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) + require.NoError(t, err) + + deleteAll := func() { + tables := []string{"twopc_user"} + for _, table := range tables { + _, _ = mcmp.ExecAndIgnore("delete from " + table) + } + } + + deleteAll() + + return mcmp, func() { + deleteAll() + mcmp.Close() + } +} + +type extractInterestingValues func(dtidMap map[string]string, vals []sqltypes.Value) []sqltypes.Value + +var tables = map[string]extractInterestingValues{ + "ks.dt_state": func(dtidMap map[string]string, vals []sqltypes.Value) (out []sqltypes.Value) { + dtid := getDTID(dtidMap, vals[0].ToString()) + dtState := getDTState(vals[1]) + out = append(out, sqltypes.NewVarChar(dtid), sqltypes.NewVarChar(dtState.String())) + return + }, + "ks.dt_participant": func(dtidMap map[string]string, vals []sqltypes.Value) (out []sqltypes.Value) { + dtid := getDTID(dtidMap, vals[0].ToString()) + out = append([]sqltypes.Value{sqltypes.NewVarChar(dtid)}, vals[1:]...) + return + }, + "ks.redo_state": func(dtidMap map[string]string, vals []sqltypes.Value) (out []sqltypes.Value) { + dtid := getDTID(dtidMap, vals[0].ToString()) + dtState := getDTState(vals[1]) + out = append(out, sqltypes.NewVarChar(dtid), sqltypes.NewVarChar(dtState.String())) + return + }, + "ks.redo_statement": func(dtidMap map[string]string, vals []sqltypes.Value) (out []sqltypes.Value) { + dtid := getDTID(dtidMap, vals[0].ToString()) + stmt := getStatement(vals[2].ToString()) + out = append([]sqltypes.Value{sqltypes.NewVarChar(dtid)}, vals[1], sqltypes.TestValue(sqltypes.Blob, stmt)) + return + }, + "ks.twopc_user": func(_ map[string]string, vals []sqltypes.Value) []sqltypes.Value { return vals }, +} + +func getDTState(val sqltypes.Value) querypb.TransactionState { + s, _ := val.ToInt() + return querypb.TransactionState(s) +} + +func getDTID(dtidMap map[string]string, dtKey string) string { + dtid, exists := dtidMap[dtKey] + if !exists { + dtid = fmt.Sprintf("dtid-%d", len(dtidMap)+1) + dtidMap[dtKey] = dtid + } + return dtid +} + +func getStatement(stmt string) string { + var sKey string + var prefix string + switch { + case strings.HasPrefix(stmt, "savepoint"): + prefix = "savepoint-" + sKey = stmt[9:] + case strings.HasPrefix(stmt, "rollback to"): + prefix = "rollback-" + sKey = stmt[11:] + default: + return stmt + } + + sid, exists := sm.stmt[sKey] + if !exists { + sid = fmt.Sprintf("%d", len(sm.stmt)+1) + sm.stmt[sKey] = sid + } + return prefix + sid +} + +func runVStream(t *testing.T, ctx context.Context, ch chan *binlogdatapb.VEvent, vtgateConn *vtgateconn.VTGateConn) { + shards := []string{"-40", "40-80", "80-"} + shardGtids := make([]*binlogdatapb.ShardGtid, 0, len(shards)) + var seen = make(map[string]bool, len(shards)) + var wg sync.WaitGroup + for _, shard := range shards { + shardGtids = append(shardGtids, &binlogdatapb.ShardGtid{Keyspace: keyspaceName, Shard: shard, Gtid: "current"}) + seen[shard] = false + wg.Add(1) + } + vgtid := &binlogdatapb.VGtid{ShardGtids: shardGtids} + filter := &binlogdatapb.Filter{Rules: []*binlogdatapb.Rule{{Match: "/.*/"}}} + + vReader, err := vtgateConn.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, filter, nil) + require.NoError(t, err) + + go func() { + for { + evs, err := vReader.Recv() + if err == io.EOF || ctx.Err() != nil { + return + } + require.NoError(t, err) + + for _, ev := range evs { + // Mark VGTID event from each shard seen. + if ev.Type == binlogdatapb.VEventType_VGTID { + if !seen[ev.Shard] { + seen[ev.Shard] = true + wg.Done() + } + } + if ev.Type == binlogdatapb.VEventType_ROW || ev.Type == binlogdatapb.VEventType_FIELD { + ch <- ev + } + } + } + }() + + // Wait for VGTID event from all shards + wg.Wait() +} + +func retrieveTransitions(t *testing.T, ch chan *binlogdatapb.VEvent, tableMap map[string][]*querypb.Field, dtMap map[string]string) map[string][]string { + return retrieveTransitionsWithTimeout(t, ch, tableMap, dtMap, 1*time.Second) +} + +func retrieveTransitionsWithTimeout(t *testing.T, ch chan *binlogdatapb.VEvent, tableMap map[string][]*querypb.Field, dtMap map[string]string, timeout time.Duration) map[string][]string { + logTable := make(map[string][]string) + + keepWaiting := true + for keepWaiting { + select { + case re := <-ch: + if re.RowEvent != nil { + shard := re.RowEvent.Shard + tableName := re.RowEvent.TableName + fields, ok := tableMap[tableName] + require.Truef(t, ok, "table %s not found in fields map", tableName) + for _, rc := range re.RowEvent.RowChanges { + logEvent(logTable, dtMap, shard, tableName, fields, rc) + } + } + if re.FieldEvent != nil { + tableMap[re.FieldEvent.TableName] = re.FieldEvent.Fields + } + case <-time.After(timeout): + keepWaiting = false + } + } + return logTable +} + +func logEvent(logTable map[string][]string, dtMap map[string]string, shard string, tbl string, fields []*querypb.Field, rc *binlogdatapb.RowChange) { + key := fmt.Sprintf("%s:%s", tbl, shard) + + var eventType string + var vals []sqltypes.Value + switch { + case rc.Before == nil && rc.After == nil: + panic("do not expect row event with both before and after nil") + case rc.Before == nil: + eventType = "insert" + vals = sqltypes.MakeRowTrusted(fields, rc.After) + case rc.After == nil: + eventType = "delete" + vals = sqltypes.MakeRowTrusted(fields, rc.Before) + default: + eventType = "update" + vals = sqltypes.MakeRowTrusted(fields, rc.After) + } + execFunc, exists := tables[tbl] + if exists { + vals = execFunc(dtMap, vals) + } + logTable[key] = append(logTable[key], fmt.Sprintf("%s:%v", eventType, vals)) +} + +func prettyPrint(v interface{}) string { + b, err := json.MarshalIndent(v, "", " ") + if err != nil { + return fmt.Sprintf("got error marshalling: %v", err) + } + return string(b) +} + +type stmtMapper struct { + stmt map[string]string +} + +var sm = &stmtMapper{stmt: make(map[string]string)} + +func (sm *stmtMapper) reset() { + sm.stmt = make(map[string]string) +} diff --git a/go/test/endtoend/transaction/twopc/metric/main_test.go b/go/test/endtoend/transaction/twopc/metric/main_test.go new file mode 100644 index 00000000000..61a43017ef9 --- /dev/null +++ b/go/test/endtoend/transaction/twopc/metric/main_test.go @@ -0,0 +1,114 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transaction + +import ( + "context" + _ "embed" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + twopcutil "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + vtgateGrpcAddress string + keyspaceName = "ks" + cell = "zone1" + hostname = "localhost" + sidecarDBName = "vt_ks" + + //go:embed schema.sql + SchemaSQL string + + //go:embed vschema.json + VSchema string +) + +func TestMain(m *testing.M) { + flag.Parse() + + exitcode := func() int { + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + if err := clusterInstance.StartTopo(); err != nil { + return 1 + } + + // Reserve vtGate port in order to pass it to vtTablet + clusterInstance.VtgateGrpcPort = clusterInstance.GetAndReservePort() + + // Set extra args for twopc + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--transaction_mode", "TWOPC", + "--grpc_use_effective_callerid", + ) + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, + "--twopc_abandon_age", "1", + "--queryserver-config-transaction-cap", "100", + ) + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: SchemaSQL, + VSchema: VSchema, + SidecarDBName: sidecarDBName, + DurabilityPolicy: "semi_sync", + } + if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { + return 1 + } + + // Start Vtgate + if err := clusterInstance.StartVtgate(); err != nil { + return 1 + } + vtParams = clusterInstance.GetVTParams(keyspaceName) + vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort) + + return m.Run() + }() + os.Exit(exitcode) +} + +func start(t *testing.T) (*mysql.Conn, func()) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + cleanup(t) + + return conn, func() { + conn.Close() + cleanup(t) + } +} + +func cleanup(t *testing.T) { + twopcutil.ClearOutTable(t, vtParams, "twopc_user") + twopcutil.ClearOutTable(t, vtParams, "twopc_t1") +} diff --git a/go/test/endtoend/transaction/twopc/stress/main_test.go b/go/test/endtoend/transaction/twopc/stress/main_test.go new file mode 100644 index 00000000000..4da4f86bdff --- /dev/null +++ b/go/test/endtoend/transaction/twopc/stress/main_test.go @@ -0,0 +1,127 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stress + +import ( + "context" + _ "embed" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/transaction/twopc/utils" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + vtgateGrpcAddress string + keyspaceName = "ks" + unshardedKeyspaceName = "uks" + cell = "zone1" + hostname = "localhost" + + //go:embed schema.sql + SchemaSQL string + + //go:embed vschema.json + VSchema string +) + +func TestMain(m *testing.M) { + flag.Parse() + + exitcode := func() int { + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + if err := clusterInstance.StartTopo(); err != nil { + return 1 + } + + // Reserve vtGate port in order to pass it to vtTablet + clusterInstance.VtgateGrpcPort = clusterInstance.GetAndReservePort() + + // Set extra args for twopc + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--transaction_mode", "TWOPC", + "--grpc_use_effective_callerid", + "--tablet_refresh_interval", "2s", + ) + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, + "--twopc_abandon_age", "1", + "--migration_check_interval", "2s", + "--onterm_timeout", "1s", + "--onclose_timeout", "1s", + ) + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: SchemaSQL, + VSchema: VSchema, + DurabilityPolicy: "semi_sync", + } + if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-"}, 2, false); err != nil { + return 1 + } + + // Start an unsharded keyspace + unshardedKeyspace := &cluster.Keyspace{ + Name: unshardedKeyspaceName, + SchemaSQL: "", + VSchema: "{}", + DurabilityPolicy: "semi_sync", + } + if err := clusterInstance.StartUnshardedKeyspace(*unshardedKeyspace, 2, false); err != nil { + return 1 + } + + // Start Vtgate + if err := clusterInstance.StartVtgate(); err != nil { + return 1 + } + vtParams = clusterInstance.GetVTParams("") + vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort) + + return m.Run() + }() + os.Exit(exitcode) +} + +func start(t *testing.T) (*mysql.Conn, func()) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + cleanup(t) + + return conn, func() { + conn.Close() + cleanup(t) + } +} + +func cleanup(t *testing.T) { + utils.ClearOutTable(t, vtParams, "twopc_t1") + utils.ClearOutTable(t, vtParams, "twopc_settings") +} diff --git a/go/test/endtoend/utils/mysql_test.go b/go/test/endtoend/utils/mysql_test.go index de9db23dab1..9b8eac52278 100644 --- a/go/test/endtoend/utils/mysql_test.go +++ b/go/test/endtoend/utils/mysql_test.go @@ -45,7 +45,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) exitCode := func() int { clusterInstance = cluster.NewCluster(cell, "localhost") diff --git a/go/test/endtoend/utils/mysqlvsvitess/main_test.go b/go/test/endtoend/utils/mysqlvsvitess/main_test.go index 8f162fae41d..f064afb895d 100644 --- a/go/test/endtoend/utils/mysqlvsvitess/main_test.go +++ b/go/test/endtoend/utils/mysqlvsvitess/main_test.go @@ -64,7 +64,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) exitCode := func() int { clusterInstance = cluster.NewCluster(cell, "localhost") diff --git a/go/test/endtoend/vault/vault_test.go b/go/test/endtoend/vault/vault_test.go index 684a374707d..666429db629 100644 --- a/go/test/endtoend/vault/vault_test.go +++ b/go/test/endtoend/vault/vault_test.go @@ -99,7 +99,6 @@ var ( ) func TestVaultAuth(t *testing.T) { - defer cluster.PanicHandler(nil) // Instantiate Vitess Cluster objects and start topo initializeClusterEarly(t) diff --git a/go/test/endtoend/versionupgrade/upgrade_test.go b/go/test/endtoend/versionupgrade/upgrade_test.go index 87f7f9e8675..c9e28c5603d 100644 --- a/go/test/endtoend/versionupgrade/upgrade_test.go +++ b/go/test/endtoend/versionupgrade/upgrade_test.go @@ -72,7 +72,6 @@ var ( // TestMain is the main entry point func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -131,12 +130,10 @@ func TestMain(m *testing.M) { } func TestShards(t *testing.T) { - defer cluster.PanicHandler(t) assert.Equal(t, 2, len(clusterInstance.Keyspaces[0].Shards)) } func TestDeploySchema(t *testing.T) { - defer cluster.PanicHandler(t) if clusterInstance.ReusingVTDATAROOT { // we assume data is already deployed @@ -163,7 +160,6 @@ func TestDeploySchema(t *testing.T) { } func TestTablesExist(t *testing.T) { - defer cluster.PanicHandler(t) checkTables(t, "", totalTableCount) } diff --git a/go/test/endtoend/vtadmin/main_test.go b/go/test/endtoend/vtadmin/main_test.go new file mode 100644 index 00000000000..9233cd5b0aa --- /dev/null +++ b/go/test/endtoend/vtadmin/main_test.go @@ -0,0 +1,100 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtadmin + +import ( + _ "embed" + "flag" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + uks = "uks" + cell = "test_misc" + + uschemaSQL = ` +create table u_a +( + id bigint, + a bigint, + primary key (id) +) Engine = InnoDB; + +create table u_b +( + id bigint, + b varchar(50), + primary key (id) +) Engine = InnoDB; + +` +) + +func TestMain(m *testing.M) { + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + // Start Unsharded keyspace + ukeyspace := &cluster.Keyspace{ + Name: uks, + SchemaSQL: uschemaSQL, + } + err = clusterInstance.StartUnshardedKeyspace(*ukeyspace, 0, false) + if err != nil { + return 1 + } + + err = clusterInstance.StartVtgate() + if err != nil { + return 1 + } + + clusterInstance.NewVTAdminProcess() + err = clusterInstance.VtadminProcess.Setup() + if err != nil { + return 1 + } + + return m.Run() + }() + os.Exit(exitCode) +} + +// TestVtadminAPIs tests the vtadmin APIs. +func TestVtadminAPIs(t *testing.T) { + + // Test the vtadmin APIs + t.Run("keyspaces api", func(t *testing.T) { + resp := clusterInstance.VtadminProcess.MakeAPICallRetry(t, "api/keyspaces") + require.Contains(t, resp, uks) + }) +} diff --git a/go/test/endtoend/vtgate/concurrentdml/main_test.go b/go/test/endtoend/vtgate/concurrentdml/main_test.go index 6ee5619b742..734962b0d33 100644 --- a/go/test/endtoend/vtgate/concurrentdml/main_test.go +++ b/go/test/endtoend/vtgate/concurrentdml/main_test.go @@ -66,7 +66,6 @@ INSERT INTO t1_seq (id, next_id, cache) values(0, 1, 1000); ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -108,7 +107,6 @@ func TestMain(m *testing.M) { } func TestInsertIgnoreOnLookupUniqueVindex(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -137,7 +135,6 @@ func TestInsertIgnoreOnLookupUniqueVindex(t *testing.T) { func TestOpenTxBlocksInSerial(t *testing.T) { t.Skip("Update and Insert in same transaction does not work with the unique consistent lookup having same value.") - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -169,7 +166,6 @@ func TestOpenTxBlocksInSerial(t *testing.T) { func TestOpenTxBlocksInConcurrent(t *testing.T) { t.Skip("Update and Insert in same transaction does not work with the unique consistent lookup having same value.") - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -207,7 +203,6 @@ func TestOpenTxBlocksInConcurrent(t *testing.T) { } func TestUpdateLookupUniqueVindex(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/connectiondrain/main_test.go b/go/test/endtoend/vtgate/connectiondrain/main_test.go new file mode 100644 index 00000000000..6257baf8e40 --- /dev/null +++ b/go/test/endtoend/vtgate/connectiondrain/main_test.go @@ -0,0 +1,185 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package connectiondrain + +import ( + "context" + _ "embed" + "flag" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" +) + +var ( + keyspaceName = "ks" + cell = "zone-1" + + //go:embed schema.sql + schemaSQL string +) + +func TestMain(m *testing.M) { + flag.Parse() + os.Exit(m.Run()) +} + +func setupCluster(t *testing.T) (*cluster.LocalProcessCluster, mysql.ConnParams) { + clusterInstance := cluster.NewCluster(cell, "localhost") + + // Start topo server + err := clusterInstance.StartTopo() + require.NoError(t, err) + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: schemaSQL, + } + err = clusterInstance.StartUnshardedKeyspace(*keyspace, 0, false) + require.NoError(t, err) + + // Start vtgate + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--mysql-server-drain-onterm", "--onterm_timeout", "30s") + err = clusterInstance.StartVtgate() + require.NoError(t, err) + + vtParams := clusterInstance.GetVTParams(keyspaceName) + return clusterInstance, vtParams +} + +func start(t *testing.T, vtParams mysql.ConnParams) (*mysql.Conn, func()) { + vtConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + + deleteAll := func() { + _, _ = utils.ExecAllowError(t, vtConn, "set workload = oltp") + + tables := []string{"t1"} + for _, table := range tables { + _, _ = utils.ExecAllowError(t, vtConn, "delete from "+table) + } + } + + deleteAll() + + return vtConn, func() { + deleteAll() + vtConn.Close() + } +} + +func TestConnectionDrainCloseConnections(t *testing.T) { + clusterInstance, vtParams := setupCluster(t) + defer clusterInstance.Teardown() + + vtConn, closer := start(t, vtParams) + defer closer() + + // Create a second connection, this connection will be used to create a transaction. + vtConn2, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + + // Start the transaction with the second connection + _, err = vtConn2.ExecuteFetch("BEGIN", 1, false) + require.NoError(t, err) + _, err = vtConn2.ExecuteFetch("select id1 from t1", 1, false) + require.NoError(t, err) + + _, err = vtConn.ExecuteFetch("select id1 from t1", 1, false) + require.NoError(t, err) + + // Tearing down vtgate here, from there on vtConn should still be able to conclude in-flight transaction and + // execute queries with idle connections. However, no new connections are allowed. + err = clusterInstance.VtgateProcess.Terminate() + require.NoError(t, err) + + // Give enough time to vtgate to receive and start processing the SIGTERM signal + time.Sleep(2 * time.Second) + + // Create a third connection, this connection should not be allowed. + // Set a connection timeout to 1s otherwise the connection will take forever + // and eventually vtgate will reach the --onterm_timeout. + vtParams.ConnectTimeoutMs = 1000 + defer func() { + vtParams.ConnectTimeoutMs = 0 + }() + _, err = mysql.Connect(context.Background(), &vtParams) + require.Error(t, err) + + // Idle connections should be allowed to execute queries until they are drained + _, err = vtConn.ExecuteFetch("select id1 from t1", 1, false) + require.NoError(t, err) + + // Finish the transaction + _, err = vtConn2.ExecuteFetch("select id1 from t1", 1, false) + require.NoError(t, err) + _, err = vtConn2.ExecuteFetch("COMMIT", 1, false) + require.NoError(t, err) + vtConn2.Close() + + // vtgate should still be running + require.False(t, clusterInstance.VtgateProcess.IsShutdown()) + + // This connection should still be allowed + _, err = vtConn.ExecuteFetch("select id1 from t1", 1, false) + require.NoError(t, err) + vtConn.Close() + + // Give enough time for vtgate to finish all the onterm hooks without reaching the 30s of --onterm_timeout + time.Sleep(10 * time.Second) + + // By now the vtgate should have shutdown on its own and without reaching --onterm_timeout + require.True(t, clusterInstance.VtgateProcess.IsShutdown()) +} + +func TestConnectionDrainOnTermTimeout(t *testing.T) { + clusterInstance, vtParams := setupCluster(t) + defer clusterInstance.Teardown() + + // Connect to vtgate again, this should work + vtConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + vtConn2, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + + defer func() { + vtConn.Close() + vtConn2.Close() + }() + + // Tearing down vtgate here, we want to reach the onterm_timeout of 30s + err = clusterInstance.VtgateProcess.Terminate() + require.NoError(t, err) + + // Run a busy query that returns only after the onterm_timeout is reached, this should fail when we reach the timeout + _, err = vtConn.ExecuteFetch("select sleep(40)", 1, false) + require.Error(t, err) + + // Running a query after we have reached the onterm_timeout should fail + _, err = vtConn2.ExecuteFetch("select id from t1", 1, false) + require.Error(t, err) + + // By now vtgate will be shutdown becaused it reached its onterm_timeout, despite idle connections still being opened + require.True(t, clusterInstance.VtgateProcess.IsShutdown()) +} diff --git a/go/test/endtoend/vtgate/consolidator/main_test.go b/go/test/endtoend/vtgate/consolidator/main_test.go index 021db7e513e..0d5eae3eca9 100644 --- a/go/test/endtoend/vtgate/consolidator/main_test.go +++ b/go/test/endtoend/vtgate/consolidator/main_test.go @@ -65,7 +65,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/createdb_plugin/main_test.go b/go/test/endtoend/vtgate/createdb_plugin/main_test.go index e712fee7b36..5d23cfb6127 100644 --- a/go/test/endtoend/vtgate/createdb_plugin/main_test.go +++ b/go/test/endtoend/vtgate/createdb_plugin/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -81,7 +80,6 @@ func TestMain(m *testing.M) { } func TestDBDDLPlugin(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/errors_as_warnings/main_test.go b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go index 24cade5b550..385e8a2ac82 100644 --- a/go/test/endtoend/vtgate/errors_as_warnings/main_test.go +++ b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go @@ -66,7 +66,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/foreignkey/main_test.go b/go/test/endtoend/vtgate/foreignkey/main_test.go index 483c1d05e80..0c621b1927a 100644 --- a/go/test/endtoend/vtgate/foreignkey/main_test.go +++ b/go/test/endtoend/vtgate/foreignkey/main_test.go @@ -94,7 +94,6 @@ type fkReference struct { } func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -181,7 +180,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go index 596381894fa..7db58db1039 100644 --- a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go +++ b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go @@ -309,7 +309,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -627,7 +626,6 @@ func ExecuteFKTest(t *testing.T, tcase *testCase) { } func TestStressFK(t *testing.T) { - defer cluster.PanicHandler(t) t.Run("validate replication health", func(t *testing.T) { validateReplicationIsHealthy(t, replicaNoFK) diff --git a/go/test/endtoend/vtgate/gen4/column_name_test.go b/go/test/endtoend/vtgate/gen4/column_name_test.go index d23c03c9f6b..0f5a83a5092 100644 --- a/go/test/endtoend/vtgate/gen4/column_name_test.go +++ b/go/test/endtoend/vtgate/gen4/column_name_test.go @@ -26,11 +26,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestColumnNames(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) diff --git a/go/test/endtoend/vtgate/gen4/main_test.go b/go/test/endtoend/vtgate/gen4/main_test.go index 378b2d2969e..4ac9d0747a5 100644 --- a/go/test/endtoend/vtgate/gen4/main_test.go +++ b/go/test/endtoend/vtgate/gen4/main_test.go @@ -64,7 +64,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -152,6 +151,5 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/gen4/system_schema_test.go b/go/test/endtoend/vtgate/gen4/system_schema_test.go index fba953d51ae..84602e75e73 100644 --- a/go/test/endtoend/vtgate/gen4/system_schema_test.go +++ b/go/test/endtoend/vtgate/gen4/system_schema_test.go @@ -28,11 +28,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestDbNameOverride(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) @@ -55,7 +53,6 @@ func TestDbNameOverride(t *testing.T) { } func TestInformationSchemaQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -90,7 +87,6 @@ func assertSingleRowIsReturned(t *testing.T, conn *mysql.Conn, predicate string, } func TestInformationSchemaWithSubquery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -101,7 +97,6 @@ func TestInformationSchemaWithSubquery(t *testing.T) { } func TestInformationSchemaQueryGetsRoutedToTheRightTableAndKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -113,7 +108,6 @@ func TestInformationSchemaQueryGetsRoutedToTheRightTableAndKeyspace(t *testing.T } func TestFKConstraintUsingInformationSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -131,7 +125,6 @@ func TestFKConstraintUsingInformationSchema(t *testing.T) { } func TestConnectWithSystemSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() for _, dbname := range []string{"information_schema", "mysql", "performance_schema", "sys"} { connParams := vtParams @@ -144,7 +137,6 @@ func TestConnectWithSystemSchema(t *testing.T) { } func TestUseSystemSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -156,7 +148,6 @@ func TestUseSystemSchema(t *testing.T) { } func TestSystemSchemaQueryWithoutQualifier(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -191,7 +182,6 @@ func TestSystemSchemaQueryWithoutQualifier(t *testing.T) { } func TestMultipleSchemaPredicates(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -217,7 +207,6 @@ func TestMultipleSchemaPredicates(t *testing.T) { } func TestQuerySystemTables(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/godriver/main_test.go b/go/test/endtoend/vtgate/godriver/main_test.go index 587c189d2ea..91605394cf1 100644 --- a/go/test/endtoend/vtgate/godriver/main_test.go +++ b/go/test/endtoend/vtgate/godriver/main_test.go @@ -86,7 +86,6 @@ create table my_message( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -125,7 +124,6 @@ func TestMain(m *testing.M) { } func TestStreamMessaging(t *testing.T) { - defer cluster.PanicHandler(t) cnf := vitessdriver.Configuration{ Protocol: "grpc", diff --git a/go/test/endtoend/vtgate/grpc_api/main_test.go b/go/test/endtoend/vtgate/grpc_api/main_test.go index 3c8605f79a0..87d30f4ce26 100644 --- a/go/test/endtoend/vtgate/grpc_api/main_test.go +++ b/go/test/endtoend/vtgate/grpc_api/main_test.go @@ -75,7 +75,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { diff --git a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go index 4971d03060b..1eb31663577 100644 --- a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go +++ b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go @@ -117,7 +117,6 @@ func createCluster(extraVTGateArgs []string) (*cluster.LocalProcessCluster, int) } func TestRoutingWithKeyspacesToWatch(t *testing.T) { - defer cluster.PanicHandler(t) clusterInstance, exitCode := createCluster(nil) defer clusterInstance.Teardown() @@ -141,7 +140,6 @@ func TestRoutingWithKeyspacesToWatch(t *testing.T) { } func TestVSchemaDDLWithKeyspacesToWatch(t *testing.T) { - defer cluster.PanicHandler(t) extraVTGateArgs := []string{ "--vschema_ddl_authorized_users", "%", diff --git a/go/test/endtoend/vtgate/main_test.go b/go/test/endtoend/vtgate/main_test.go index 12abcf4dd01..2339c4973a6 100644 --- a/go/test/endtoend/vtgate/main_test.go +++ b/go/test/endtoend/vtgate/main_test.go @@ -54,7 +54,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { clusterInstance = cluster.NewCluster(Cell, "localhost") @@ -122,6 +121,5 @@ func start(t *testing.T) (*mysql.Conn, func()) { return conn, func() { deleteAll() conn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/misc_test.go b/go/test/endtoend/vtgate/misc_test.go index 128d930718c..56415e15d57 100644 --- a/go/test/endtoend/vtgate/misc_test.go +++ b/go/test/endtoend/vtgate/misc_test.go @@ -28,7 +28,6 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -780,7 +779,6 @@ func TestJoinWithMergedRouteWithPredicate(t *testing.T) { func TestRowCountExceed(t *testing.T) { conn, _ := start(t) defer func() { - cluster.PanicHandler(t) // needs special delete logic as it exceeds row count. for i := 50; i <= 300; i += 50 { utils.Exec(t, conn, fmt.Sprintf("delete from t1 where id1 < %d", i)) diff --git a/go/test/endtoend/vtgate/mysql80/main_test.go b/go/test/endtoend/vtgate/mysql80/main_test.go index 4f5897d1f59..b970fb66b12 100644 --- a/go/test/endtoend/vtgate/mysql80/main_test.go +++ b/go/test/endtoend/vtgate/mysql80/main_test.go @@ -35,7 +35,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/mysql80/misc_test.go b/go/test/endtoend/vtgate/mysql80/misc_test.go index b29eb13ecdc..5132bf87aba 100644 --- a/go/test/endtoend/vtgate/mysql80/misc_test.go +++ b/go/test/endtoend/vtgate/mysql80/misc_test.go @@ -23,15 +23,12 @@ import ( "vitess.io/vitess/go/test/endtoend/utils" - "vitess.io/vitess/go/test/endtoend/cluster" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" ) func TestFunctionInDefault(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -223,7 +220,6 @@ func BenchmarkReservedConnWhenSettingSysVar(b *testing.B) { } func TestJsonFunctions(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/partialfailure/main_test.go b/go/test/endtoend/vtgate/partialfailure/main_test.go index 9e39e7b5dd5..d5b6a639c68 100644 --- a/go/test/endtoend/vtgate/partialfailure/main_test.go +++ b/go/test/endtoend/vtgate/partialfailure/main_test.go @@ -45,7 +45,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/plan_tests/main_test.go b/go/test/endtoend/vtgate/plan_tests/main_test.go new file mode 100644 index 00000000000..d3915af0c8d --- /dev/null +++ b/go/test/endtoend/vtgate/plan_tests/main_test.go @@ -0,0 +1,230 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package plan_tests + +import ( + "encoding/json" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + mysqlParams mysql.ConnParams + uks = "main" + sks = "user" + cell = "plantests" +) + +func TestMain(m *testing.M) { + vschema := readFile("vschemas/schema.json") + userVs := extractUserKS(vschema) + mainVs := extractMainKS(vschema) + sSQL := readFile("schemas/user.sql") + uSQL := readFile("schemas/main.sql") + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + // Start unsharded keyspace + uKeyspace := &cluster.Keyspace{ + Name: uks, + SchemaSQL: uSQL, + VSchema: mainVs, + } + err = clusterInstance.StartUnshardedKeyspace(*uKeyspace, 0, false) + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + // Start sharded keyspace + skeyspace := &cluster.Keyspace{ + Name: sks, + SchemaSQL: sSQL, + VSchema: userVs, + } + err = clusterInstance.StartKeyspace(*skeyspace, []string{"-80", "80-"}, 0, false) + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + // TODO: (@GuptaManan100/@systay): Also run the tests with normalizer on. + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--normalize_queries=false", + "--schema_change_signal=false", + ) + + // Start vtgate + err = clusterInstance.StartVtgate() + if err != nil { + fmt.Println(err.Error()) + return 1 + } + + vtParams = clusterInstance.GetVTParams(sks) + + // create mysql instance and connection parameters + conn, closer, err := utils.NewMySQL(clusterInstance, sks, sSQL, uSQL) + if err != nil { + fmt.Println(err.Error()) + return 1 + } + defer closer() + mysqlParams = conn + + return m.Run() + }() + os.Exit(exitCode) +} + +func readFile(filename string) string { + schema, err := os.ReadFile(locateFile(filename)) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + return string(schema) +} + +func start(t *testing.T) (utils.MySQLCompare, func()) { + mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) + require.NoError(t, err) + return mcmp, func() { + mcmp.Close() + } +} + +func readJSONTests(filename string) []planbuilder.PlanTest { + var output []planbuilder.PlanTest + file, err := os.Open(locateFile(filename)) + if err != nil { + panic(err) + } + defer file.Close() + dec := json.NewDecoder(file) + err = dec.Decode(&output) + if err != nil { + panic(err) + } + return output +} + +func locateFile(name string) string { + return "../../../../vt/vtgate/planbuilder/testdata/" + name +} + +// verifyTestExpectations verifies the expectations of the test. +func verifyTestExpectations(t *testing.T, pd engine.PrimitiveDescription, test planbuilder.PlanTest) { + // 1. Verify that the Join primitive sees atleast 1 row on the left side. + engine.WalkPrimitiveDescription(pd, func(description engine.PrimitiveDescription) { + if description.OperatorType == "Join" { + require.NotZero(t, description.Inputs[0].RowsReceived[0]) + } + }) + + // 2. Verify that the plan description matches the expected plan description. + planBytes, err := test.Plan.MarshalJSON() + require.NoError(t, err) + mp := make(map[string]any) + err = json.Unmarshal(planBytes, &mp) + require.NoError(t, err) + pdExpected, err := engine.PrimitiveDescriptionFromMap(mp["Instructions"].(map[string]any)) + require.NoError(t, err) + require.Empty(t, pdExpected.Equals(pd), "Expected: %v\nGot: %v", string(planBytes), pd) +} + +func extractUserKS(jsonString string) string { + var result map[string]any + if err := json.Unmarshal([]byte(jsonString), &result); err != nil { + panic(err.Error()) + } + + keyspaces, ok := result["keyspaces"].(map[string]any) + if !ok { + panic("Keyspaces not found") + } + + user, ok := keyspaces["user"].(map[string]any) + if !ok { + panic("User keyspace not found") + } + + tables, ok := user["tables"].(map[string]any) + if !ok { + panic("Tables not found") + } + + userTbl, ok := tables["user"].(map[string]any) + if !ok { + panic("User table not found") + } + + delete(userTbl, "auto_increment") // TODO: we should have an unsharded keyspace where this could live + + // Marshal the inner part back to JSON string + userJson, err := json.Marshal(user) + if err != nil { + panic(err.Error()) + } + + return string(userJson) +} + +func extractMainKS(jsonString string) string { + var result map[string]any + if err := json.Unmarshal([]byte(jsonString), &result); err != nil { + panic(err.Error()) + } + + keyspaces, ok := result["keyspaces"].(map[string]any) + if !ok { + panic("Keyspaces not found") + } + + main, ok := keyspaces["main"].(map[string]any) + if !ok { + panic("main keyspace not found") + } + + // Marshal the inner part back to JSON string + mainJson, err := json.Marshal(main) + if err != nil { + panic(err.Error()) + } + + return string(mainJson) +} diff --git a/go/test/endtoend/vtgate/prefixfanout/main_test.go b/go/test/endtoend/vtgate/prefixfanout/main_test.go index 928808fd48e..a96ee4ce7f5 100644 --- a/go/test/endtoend/vtgate/prefixfanout/main_test.go +++ b/go/test/endtoend/vtgate/prefixfanout/main_test.go @@ -109,7 +109,6 @@ PRIMARY KEY (c1) ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -158,7 +157,6 @@ func TestMain(m *testing.M) { } func TestCFCPrefixQueryNoHash(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(sKs) conn, err := mysql.Connect(ctx, &vtParams) @@ -196,7 +194,6 @@ func TestCFCPrefixQueryNoHash(t *testing.T) { } func TestCFCPrefixQueryWithHash(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(sKsMD5) @@ -239,7 +236,6 @@ func TestCFCPrefixQueryWithHash(t *testing.T) { } func TestCFCInsert(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := clusterInstance.GetVTParams(sKs) diff --git a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go index d4035ebe5d6..4743261378c 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go +++ b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -66,7 +65,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/aggregation/main_test.go b/go/test/endtoend/vtgate/queries/aggregation/main_test.go index 02013a9b0e2..bd1c1aa3b7d 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/main_test.go +++ b/go/test/endtoend/vtgate/queries/aggregation/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/benchmark/main_test.go b/go/test/endtoend/vtgate/queries/benchmark/main_test.go index 6978d0b9428..a4e4f4400a3 100644 --- a/go/test/endtoend/vtgate/queries/benchmark/main_test.go +++ b/go/test/endtoend/vtgate/queries/benchmark/main_test.go @@ -65,7 +65,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -119,6 +118,5 @@ func start(b *testing.B) (*mysql.Conn, func()) { return conn, func() { deleteAll() conn.Close() - cluster.PanicHandler(b) } } diff --git a/go/test/endtoend/vtgate/queries/derived/derived_test.go b/go/test/endtoend/vtgate/queries/derived/derived_test.go index cb106564b2f..fe467f31c20 100644 --- a/go/test/endtoend/vtgate/queries/derived/derived_test.go +++ b/go/test/endtoend/vtgate/queries/derived/derived_test.go @@ -21,7 +21,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -44,7 +43,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/derived/main_test.go b/go/test/endtoend/vtgate/queries/derived/main_test.go index 3b44811f95c..0bab24a966a 100644 --- a/go/test/endtoend/vtgate/queries/derived/main_test.go +++ b/go/test/endtoend/vtgate/queries/derived/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/dml/main_test.go b/go/test/endtoend/vtgate/queries/dml/main_test.go index 0c4d58aa614..bc72acc1159 100644 --- a/go/test/endtoend/vtgate/queries/dml/main_test.go +++ b/go/test/endtoend/vtgate/queries/dml/main_test.go @@ -66,7 +66,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -148,6 +147,5 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go b/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go index f52e2eff532..e50e9210c55 100644 --- a/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go +++ b/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go @@ -21,12 +21,10 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) func TestFoundRows(t *testing.T) { - defer cluster.PanicHandler(t) mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) require.NoError(t, err) defer mcmp.Close() diff --git a/go/test/endtoend/vtgate/queries/foundrows/main_test.go b/go/test/endtoend/vtgate/queries/foundrows/main_test.go index 8f992863008..248b6cd9434 100644 --- a/go/test/endtoend/vtgate/queries/foundrows/main_test.go +++ b/go/test/endtoend/vtgate/queries/foundrows/main_test.go @@ -46,7 +46,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go index a1ef2711499..3eaac39e6fb 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go @@ -25,7 +25,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -47,7 +46,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } @@ -114,7 +112,6 @@ func TestFKConstraintUsingInformationSchema(t *testing.T) { } func TestConnectWithSystemSchema(t *testing.T) { - defer cluster.PanicHandler(t) for _, dbname := range []string{"information_schema", "mysql", "performance_schema", "sys"} { vtConnParams := vtParams vtConnParams.DbName = dbname diff --git a/go/test/endtoend/vtgate/queries/informationschema/main_test.go b/go/test/endtoend/vtgate/queries/informationschema/main_test.go index 06c5b188d18..28280aeef9e 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/main_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/main_test.go @@ -52,7 +52,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/kill/main_test.go b/go/test/endtoend/vtgate/queries/kill/main_test.go index 836603c91ee..f28df30f61f 100644 --- a/go/test/endtoend/vtgate/queries/kill/main_test.go +++ b/go/test/endtoend/vtgate/queries/kill/main_test.go @@ -50,7 +50,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go index 25bf78437da..212b1651e63 100644 --- a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go +++ b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go @@ -49,7 +49,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -119,7 +118,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/misc/main_test.go b/go/test/endtoend/vtgate/queries/misc/main_test.go index f20072031a8..ee9be542634 100644 --- a/go/test/endtoend/vtgate/queries/misc/main_test.go +++ b/go/test/endtoend/vtgate/queries/misc/main_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/misc/misc_test.go b/go/test/endtoend/vtgate/queries/misc/misc_test.go index 408b32a7969..b8509769690 100644 --- a/go/test/endtoend/vtgate/queries/misc/misc_test.go +++ b/go/test/endtoend/vtgate/queries/misc/misc_test.go @@ -28,7 +28,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -48,7 +47,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/no_scatter/main_test.go b/go/test/endtoend/vtgate/queries/no_scatter/main_test.go index c4b0974c24b..a1478dcd2ac 100644 --- a/go/test/endtoend/vtgate/queries/no_scatter/main_test.go +++ b/go/test/endtoend/vtgate/queries/no_scatter/main_test.go @@ -40,7 +40,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go b/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go index 7bf702afc15..b302a0f4dc7 100644 --- a/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go +++ b/go/test/endtoend/vtgate/queries/no_scatter/queries_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -43,7 +42,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return vtConn, func() { deleteAll() vtConn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/normalize/main_test.go b/go/test/endtoend/vtgate/queries/normalize/main_test.go index 8f4d97209dd..8c75d38284d 100644 --- a/go/test/endtoend/vtgate/queries/normalize/main_test.go +++ b/go/test/endtoend/vtgate/queries/normalize/main_test.go @@ -39,7 +39,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/orderby/main_test.go b/go/test/endtoend/vtgate/queries/orderby/main_test.go index 9f18377ee3f..353745722b7 100644 --- a/go/test/endtoend/vtgate/queries/orderby/main_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go index f6c52cab2ac..004dfd37be7 100644 --- a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go @@ -22,8 +22,6 @@ import ( "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func start(t *testing.T) (utils.MySQLCompare, func()) { @@ -44,7 +42,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go index 00221e9c9f3..373c1327074 100644 --- a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go index a20c7ad54c6..956815d2a0d 100644 --- a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go @@ -22,8 +22,6 @@ import ( "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func start(t *testing.T) (utils.MySQLCompare, func()) { @@ -44,7 +42,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/random/main_test.go b/go/test/endtoend/vtgate/queries/random/main_test.go index e3256f60796..85c8840924d 100644 --- a/go/test/endtoend/vtgate/queries/random/main_test.go +++ b/go/test/endtoend/vtgate/queries/random/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/random/random_test.go b/go/test/endtoend/vtgate/queries/random/random_test.go index aea43c2f929..e805d409c0d 100644 --- a/go/test/endtoend/vtgate/queries/random/random_test.go +++ b/go/test/endtoend/vtgate/queries/random/random_test.go @@ -28,7 +28,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -62,7 +61,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/reference/main_test.go b/go/test/endtoend/vtgate/queries/reference/main_test.go index c350038bf6e..03ee429e4c0 100644 --- a/go/test/endtoend/vtgate/queries/reference/main_test.go +++ b/go/test/endtoend/vtgate/queries/reference/main_test.go @@ -53,7 +53,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/reference/reference_test.go b/go/test/endtoend/vtgate/queries/reference/reference_test.go index 08e9cbe13b1..9b567a69cfd 100644 --- a/go/test/endtoend/vtgate/queries/reference/reference_test.go +++ b/go/test/endtoend/vtgate/queries/reference/reference_test.go @@ -24,8 +24,6 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/utils" - - "vitess.io/vitess/go/test/endtoend/cluster" ) func start(t *testing.T) (*mysql.Conn, func()) { @@ -35,7 +33,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return vtConn, func() { vtConn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/subquery/main_test.go b/go/test/endtoend/vtgate/queries/subquery/main_test.go index 9eaf3b4caa0..bc8580bd38d 100644 --- a/go/test/endtoend/vtgate/queries/subquery/main_test.go +++ b/go/test/endtoend/vtgate/queries/subquery/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go index 29bd256ea51..e68ced57f9c 100644 --- a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go +++ b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go @@ -23,7 +23,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" +<<<<<<< HEAD "vitess.io/vitess/go/test/endtoend/cluster" +======= + "vitess.io/vitess/go/sqltypes" + +>>>>>>> bad431deed (Remove broken panic handler (#17354)) "vitess.io/vitess/go/test/endtoend/utils" ) @@ -45,7 +50,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/timeout/main_test.go b/go/test/endtoend/vtgate/queries/timeout/main_test.go index c265e824e88..c9d7e2487bb 100644 --- a/go/test/endtoend/vtgate/queries/timeout/main_test.go +++ b/go/test/endtoend/vtgate/queries/timeout/main_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go index 25a7f57b3bc..63dae071a55 100644 --- a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go +++ b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go @@ -23,7 +23,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" +<<<<<<< HEAD "vitess.io/vitess/go/test/endtoend/cluster" +======= + "vitess.io/vitess/go/mysql" +>>>>>>> bad431deed (Remove broken panic handler (#17354)) "vitess.io/vitess/go/test/endtoend/utils" ) @@ -43,7 +47,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/tpch/main_test.go b/go/test/endtoend/vtgate/queries/tpch/main_test.go index 103adb336ab..403ddd510ce 100644 --- a/go/test/endtoend/vtgate/queries/tpch/main_test.go +++ b/go/test/endtoend/vtgate/queries/tpch/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/tpch/tpch_test.go b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go index ec33bd0ae9d..a916bd9f4ae 100644 --- a/go/test/endtoend/vtgate/queries/tpch/tpch_test.go +++ b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go @@ -19,7 +19,12 @@ package union import ( "testing" +<<<<<<< HEAD "vitess.io/vitess/go/test/endtoend/cluster" +======= + "github.com/stretchr/testify/require" + +>>>>>>> bad431deed (Remove broken panic handler (#17354)) "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/require" @@ -43,7 +48,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/union/main_test.go b/go/test/endtoend/vtgate/queries/union/main_test.go index 06ec07a6c2f..a5f45f84156 100644 --- a/go/test/endtoend/vtgate/queries/union/main_test.go +++ b/go/test/endtoend/vtgate/queries/union/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/union/union_test.go b/go/test/endtoend/vtgate/queries/union/union_test.go index 312574c0120..50de6461efa 100644 --- a/go/test/endtoend/vtgate/queries/union/union_test.go +++ b/go/test/endtoend/vtgate/queries/union/union_test.go @@ -19,7 +19,6 @@ package union import ( "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/assert" @@ -44,7 +43,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { return mcmp, func() { deleteAll() mcmp.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/queries/vexplain/main_test.go b/go/test/endtoend/vtgate/queries/vexplain/main_test.go index c1c401bc573..96b6a1c41d1 100644 --- a/go/test/endtoend/vtgate/queries/vexplain/main_test.go +++ b/go/test/endtoend/vtgate/queries/vexplain/main_test.go @@ -43,7 +43,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go b/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go index ed43d57b578..f95cb200934 100644 --- a/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go +++ b/go/test/endtoend/vtgate/queries/vexplain/vexplain_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/utils" @@ -48,7 +47,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return vtConn, func() { deleteAll() vtConn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/readafterwrite/raw_test.go b/go/test/endtoend/vtgate/readafterwrite/raw_test.go index 0549a9b06b0..ce6db45d24e 100644 --- a/go/test/endtoend/vtgate/readafterwrite/raw_test.go +++ b/go/test/endtoend/vtgate/readafterwrite/raw_test.go @@ -100,7 +100,6 @@ CREATE TABLE test_vdx ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -143,7 +142,6 @@ func TestMain(m *testing.M) { } func TestRAWSettings(t *testing.T) { - defer cluster.PanicHandler(t) conn, err := mysql.Connect(context.Background(), &vtParams) require.NoError(t, err) defer conn.Close() diff --git a/go/test/endtoend/vtgate/reservedconn/main_test.go b/go/test/endtoend/vtgate/reservedconn/main_test.go index 528182a82e2..ce91d270c4b 100644 --- a/go/test/endtoend/vtgate/reservedconn/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/main_test.go @@ -103,7 +103,6 @@ CREATE TABLE test_vdx ( var enableSettingsPool bool func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() code := runAllTests(m) diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go index 11325a0f2f8..d525fc1aa79 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go @@ -65,7 +65,6 @@ var ( var enableSettingsPool bool func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() code := runAllTests(m) diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go index b66bb15dbd5..7a59da41fe4 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go @@ -66,7 +66,6 @@ var ( var enableSettingsPool bool func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() code := runAllTests(m) diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go index 25af85acc00..943b6fe0861 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go @@ -42,7 +42,6 @@ var ( var enableSettingsPool bool func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() code := runAllTests(m) diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go index 28367cd597a..7ac7ff54240 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go @@ -42,7 +42,6 @@ var ( var enableSettingsPool bool func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() code := runAllTests(m) diff --git a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go index 564cc671d5f..6ea97f84e9a 100644 --- a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go +++ b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go @@ -29,11 +29,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestSetSysVarSingle(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() type queriesWithExpectations struct { name, expr string diff --git a/go/test/endtoend/vtgate/reservedconn/udv_test.go b/go/test/endtoend/vtgate/reservedconn/udv_test.go index 55f4c54c612..14b65dbcd35 100644 --- a/go/test/endtoend/vtgate/reservedconn/udv_test.go +++ b/go/test/endtoend/vtgate/reservedconn/udv_test.go @@ -31,11 +31,9 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" ) func TestSetUDV(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() type queriesWithExpectations struct { @@ -123,7 +121,6 @@ func TestSetUDV(t *testing.T) { } func TestMysqlDumpInitialLog(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) diff --git a/go/test/endtoend/vtgate/schema/schema_test.go b/go/test/endtoend/vtgate/schema/schema_test.go index 04d91d8d978..e9fcaece128 100644 --- a/go/test/endtoend/vtgate/schema/schema_test.go +++ b/go/test/endtoend/vtgate/schema/schema_test.go @@ -55,7 +55,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -101,7 +100,6 @@ func TestMain(m *testing.M) { } func TestSchemaChange(t *testing.T) { - defer cluster.PanicHandler(t) testWithInitialSchema(t) testWithAlterSchema(t) testWithAlterDatabase(t) diff --git a/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go index 9586206221e..ec201487887 100644 --- a/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go +++ b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go @@ -57,7 +57,6 @@ var ( ) func TestLoadKeyspaceWithNoTablet(t *testing.T) { - defer cluster.PanicHandler(t) var err error clusterInstance = cluster.NewCluster(cell, hostname) @@ -100,7 +99,6 @@ func TestLoadKeyspaceWithNoTablet(t *testing.T) { } func TestNoInitialKeyspace(t *testing.T) { - defer cluster.PanicHandler(t) var err error clusterInstance = cluster.NewCluster(cell, hostname) diff --git a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go index 3bb4f6dfd9f..1943fefa9d7 100644 --- a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go +++ b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go @@ -64,7 +64,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode := func() int { @@ -116,7 +115,6 @@ func TestMain(m *testing.M) { } func TestVSchemaTrackerInit(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -137,7 +135,6 @@ func TestVSchemaTrackerInit(t *testing.T) { // properly handles primary tablet restarts -- meaning that we maintain // the exact same vschema state as before the restart. func TestVSchemaTrackerKeyspaceReInit(t *testing.T) { - defer cluster.PanicHandler(t) primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() diff --git a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go index c6f3d8469cc..049858d766c 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -161,7 +160,6 @@ func TestNewTable(t *testing.T) { } func TestAmbiguousColumnJoin(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go index 6ff8e69bb52..a00b9733673 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go @@ -122,7 +122,6 @@ create table t8( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -208,7 +207,6 @@ func TestMain(m *testing.M) { } func TestAddColumn(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go index 257dd7238f3..233eeb2da79 100644 --- a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go @@ -49,7 +49,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -112,7 +111,6 @@ func TestMain(m *testing.M) { } func TestNewUnshardedTable(t *testing.T) { - defer cluster.PanicHandler(t) // create a sql connection ctx := context.Background() @@ -182,8 +180,11 @@ func TestNewUnshardedTable(t *testing.T) { // creating two tables having the same name differing only in casing, but other operating systems don't. // More information at https://dev.mysql.com/doc/refman/8.0/en/identifier-case-sensitivity.html#:~:text=Table%20names%20are%20stored%20in,lowercase%20on%20storage%20and%20lookup. func TestCaseSensitiveSchemaTracking(t *testing.T) { +<<<<<<< HEAD utils.SkipIfBinaryIsBelowVersion(t, 19, "vttablet") defer cluster.PanicHandler(t) +======= +>>>>>>> bad431deed (Remove broken panic handler (#17354)) // create a sql connection ctx := context.Background() diff --git a/go/test/endtoend/vtgate/sec_vind/main_test.go b/go/test/endtoend/vtgate/sec_vind/main_test.go index 7aa5df76a83..7ec0d5c0682 100644 --- a/go/test/endtoend/vtgate/sec_vind/main_test.go +++ b/go/test/endtoend/vtgate/sec_vind/main_test.go @@ -44,7 +44,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -101,7 +100,6 @@ func start(t *testing.T) (*mysql.Conn, func()) { return conn, func() { deleteAll() conn.Close() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/sequence/seq_test.go b/go/test/endtoend/vtgate/sequence/seq_test.go index dd7542becc5..7daeff4fc7c 100644 --- a/go/test/endtoend/vtgate/sequence/seq_test.go +++ b/go/test/endtoend/vtgate/sequence/seq_test.go @@ -174,7 +174,6 @@ CREATE TABLE allDefaults ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -216,7 +215,6 @@ func TestMain(m *testing.M) { } func TestSeq(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -274,7 +272,6 @@ func TestSeq(t *testing.T) { } func TestDotTableSeq(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -297,7 +294,6 @@ func TestDotTableSeq(t *testing.T) { } func TestInsertAllDefaults(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go index dbc46bdda77..949b9b59012 100644 --- a/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go +++ b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go @@ -89,7 +89,6 @@ create table corder( // TestMain sets up the vitess cluster for any subsequent tests func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go index 9386c307a12..5d9724bb2a2 100644 --- a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go +++ b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go @@ -86,7 +86,6 @@ create table corder( // TestMain sets up the vitess cluster for any subsequent tests func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/transaction/restart/main_test.go b/go/test/endtoend/vtgate/transaction/restart/main_test.go index 3c7ac710e9d..5435c6fa028 100644 --- a/go/test/endtoend/vtgate/transaction/restart/main_test.go +++ b/go/test/endtoend/vtgate/transaction/restart/main_test.go @@ -42,7 +42,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/transaction/rollback/txn_rollback_shutdown_test.go b/go/test/endtoend/vtgate/transaction/rollback/txn_rollback_shutdown_test.go index 2dff9f7b95f..c7bef098c05 100644 --- a/go/test/endtoend/vtgate/transaction/rollback/txn_rollback_shutdown_test.go +++ b/go/test/endtoend/vtgate/transaction/rollback/txn_rollback_shutdown_test.go @@ -48,7 +48,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -87,7 +86,6 @@ func TestMain(m *testing.M) { } func TestTransactionRollBackWhenShutDown(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -122,7 +120,6 @@ func TestTransactionRollBackWhenShutDown(t *testing.T) { } func TestErrorInAutocommitSession(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/transaction/single/main_test.go b/go/test/endtoend/vtgate/transaction/single/main_test.go index ec2dbd6378a..1eab3cea276 100644 --- a/go/test/endtoend/vtgate/transaction/single/main_test.go +++ b/go/test/endtoend/vtgate/transaction/single/main_test.go @@ -46,7 +46,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { diff --git a/go/test/endtoend/vtgate/transaction/tx_test.go b/go/test/endtoend/vtgate/transaction/tx_test.go index 8a004277b89..86b1ebdf486 100644 --- a/go/test/endtoend/vtgate/transaction/tx_test.go +++ b/go/test/endtoend/vtgate/transaction/tx_test.go @@ -47,7 +47,6 @@ var ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitcode, err := func() (int, error) { @@ -96,7 +95,6 @@ func TestMain(m *testing.M) { // TestTransactionModes tests transactions using twopc mode func TestTransactionModes(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -142,7 +140,6 @@ func TestTransactionModes(t *testing.T) { // TestTransactionIsolation tests transaction isolation level. func TestTransactionIsolation(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) @@ -249,6 +246,5 @@ func start(t *testing.T) func() { return func() { deleteAll() - cluster.PanicHandler(t) } } diff --git a/go/test/endtoend/vtgate/unsharded/main_test.go b/go/test/endtoend/vtgate/unsharded/main_test.go index 7405a7dd87f..c8f2b0afc9a 100644 --- a/go/test/endtoend/vtgate/unsharded/main_test.go +++ b/go/test/endtoend/vtgate/unsharded/main_test.go @@ -149,7 +149,6 @@ END; var enableSettingsPool bool func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() code := runAllTests(m) @@ -207,7 +206,6 @@ func runAllTests(m *testing.M) int { func TestSelectIntoAndLoadFrom(t *testing.T) { // Test is skipped because it requires secure-file-priv variable to be set to not NULL or empty. t.Skip() - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -242,7 +240,6 @@ func TestSelectIntoAndLoadFrom(t *testing.T) { } func TestEmptyStatement(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -259,7 +256,6 @@ func TestEmptyStatement(t *testing.T) { } func TestTopoDownServingQuery(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -279,7 +275,6 @@ func TestTopoDownServingQuery(t *testing.T) { } func TestInsertAllDefaults(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -294,7 +289,6 @@ func TestInsertAllDefaults(t *testing.T) { } func TestDDLUnsharded(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -315,7 +309,6 @@ func TestDDLUnsharded(t *testing.T) { } func TestCallProcedure(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -364,7 +357,6 @@ func TestCallProcedure(t *testing.T) { } func TestTempTable(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -389,7 +381,6 @@ func TestTempTable(t *testing.T) { } func TestReservedConnDML(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -412,7 +403,6 @@ func TestReservedConnDML(t *testing.T) { } func TestNumericPrecisionScale(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", diff --git a/go/test/endtoend/vtgate/vindex_bindvars/main_test.go b/go/test/endtoend/vtgate/vindex_bindvars/main_test.go index 3251668e155..84c2c825784 100644 --- a/go/test/endtoend/vtgate/vindex_bindvars/main_test.go +++ b/go/test/endtoend/vtgate/vindex_bindvars/main_test.go @@ -265,7 +265,6 @@ CREATE TABLE thex ( ) func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) flag.Parse() exitCode := func() int { @@ -304,7 +303,6 @@ func TestMain(m *testing.M) { } func TestVindexHexTypes(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) @@ -326,7 +324,6 @@ func TestVindexHexTypes(t *testing.T) { } func TestVindexBindVarOverlap(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) diff --git a/go/test/endtoend/vtgate/vschema/vschema_test.go b/go/test/endtoend/vtgate/vschema/vschema_test.go index eec54f8f47f..82d08fa3056 100644 --- a/go/test/endtoend/vtgate/vschema/vschema_test.go +++ b/go/test/endtoend/vtgate/vschema/vschema_test.go @@ -97,7 +97,6 @@ func TestMain(m *testing.M) { } func TestVSchema(t *testing.T) { - defer cluster.PanicHandler(t) ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtorc/api/api_test.go b/go/test/endtoend/vtorc/api/api_test.go index 7dd5c50eefa..008fb75b9dc 100644 --- a/go/test/endtoend/vtorc/api/api_test.go +++ b/go/test/endtoend/vtorc/api/api_test.go @@ -33,7 +33,6 @@ import ( // TestAPIEndpoints tests the various API endpoints that VTOrc offers. func TestAPIEndpoints(t *testing.T) { - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, RecoveryPeriodBlockSeconds: 5, diff --git a/go/test/endtoend/vtorc/api/config_test.go b/go/test/endtoend/vtorc/api/config_test.go new file mode 100644 index 00000000000..821b0f8071e --- /dev/null +++ b/go/test/endtoend/vtorc/api/config_test.go @@ -0,0 +1,203 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +package api + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/vtorc/utils" +) + +// TestDynamicConfigs tests the dyanamic configurations that VTOrc offers. +func TestDynamicConfigs(t *testing.T) { + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 1, "") + vtorc := clusterInfo.ClusterInstance.VTOrcProcesses[0] + + // Restart VTOrc without any flag overrides so that all the configurations can be tested. + err := vtorc.TearDown() + require.NoError(t, err) + vtorc.Config = cluster.VTOrcConfiguration{} + vtorc.NoOverride = true + err = vtorc.Setup() + require.NoError(t, err) + + // Call API with retry to ensure VTOrc is up + status, resp := utils.MakeAPICallRetry(t, vtorc, "/debug/health", func(code int, response string) bool { + return code != 200 + }) + // Verify when VTOrc is healthy, it has also run the first discovery. + assert.Equal(t, 200, status) + assert.Contains(t, resp, `"Healthy": true,`) + + t.Run("InstancePollTime", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"instance-poll-time": 5000000000`) + // Update configuration and verify the output. + vtorc.Config.InstancePollTime = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"instance-poll-time": "10h"`) + }) + + t.Run("PreventCrossCellFailover", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"prevent-cross-cell-failover": false`) + // Update configuration and verify the output. + vtorc.Config.PreventCrossCellFailover = true + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"prevent-cross-cell-failover": true`) + }) + + t.Run("SnapshotTopologyInterval", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"snapshot-topology-interval": 0`) + // Update configuration and verify the output. + vtorc.Config.SnapshotTopologyInterval = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"snapshot-topology-interval": "10h"`) + }) + + t.Run("ReasonableReplicationLag", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"reasonable-replication-lag": 10000000000`) + // Update configuration and verify the output. + vtorc.Config.ReasonableReplicationLag = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"reasonable-replication-lag": "10h"`) + }) + + t.Run("AuditToBackend", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"audit-to-backend": false`) + // Update configuration and verify the output. + vtorc.Config.AuditToBackend = true + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"audit-to-backend": true`) + }) + + t.Run("AuditToSyslog", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"audit-to-syslog": false`) + // Update configuration and verify the output. + vtorc.Config.AuditToSyslog = true + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"audit-to-syslog": true`) + }) + + t.Run("AuditPurgeDuration", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"audit-purge-duration": 604800000000000`) + // Update configuration and verify the output. + vtorc.Config.AuditPurgeDuration = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"audit-purge-duration": "10h"`) + }) + + t.Run("WaitReplicasTimeout", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"wait-replicas-timeout": 30000000000`) + // Update configuration and verify the output. + vtorc.Config.WaitReplicasTimeout = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"wait-replicas-timeout": "10h"`) + }) + + t.Run("TolerableReplicationLag", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"tolerable-replication-lag": 0`) + // Update configuration and verify the output. + vtorc.Config.TolerableReplicationLag = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"tolerable-replication-lag": "10h"`) + }) + + t.Run("TopoInformationRefreshDuration", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"topo-information-refresh-duration": 15000000000`) + // Update configuration and verify the output. + vtorc.Config.TopoInformationRefreshDuration = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"topo-information-refresh-duration": "10h"`) + }) + + t.Run("RecoveryPollDuration", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"recovery-poll-duration": 1000000000`) + // Update configuration and verify the output. + vtorc.Config.RecoveryPollDuration = "10h" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"recovery-poll-duration": "10h"`) + }) + + t.Run("AllowEmergencyReparent", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"allow-emergency-reparent": true`) + // Update configuration and verify the output. + vtorc.Config.AllowEmergencyReparent = "false" + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"allow-emergency-reparent": "false"`) + }) + + t.Run("ChangeTabletsWithErrantGtidToDrained", func(t *testing.T) { + // Get configuration and verify the output. + waitForConfig(t, vtorc, `"change-tablets-with-errant-gtid-to-drained": false`) + // Update configuration and verify the output. + vtorc.Config.ChangeTabletsWithErrantGtidToDrained = true + err := vtorc.RewriteConfiguration() + assert.NoError(t, err) + // Wait until the config has been updated and seen. + waitForConfig(t, vtorc, `"change-tablets-with-errant-gtid-to-drained": true`) + }) +} + +// waitForConfig waits for the expectedConfig to be present in the VTOrc configuration. +func waitForConfig(t *testing.T, vtorc *cluster.VTOrcProcess, expectedConfig string) { + t.Helper() + status, _ := utils.MakeAPICallRetry(t, vtorc, "/api/config", func(_ int, response string) bool { + return !strings.Contains(response, expectedConfig) + }) + require.EqualValues(t, 200, status) +} diff --git a/go/test/endtoend/vtorc/api/main_test.go b/go/test/endtoend/vtorc/api/main_test.go index f89326bc856..cc3e796b293 100644 --- a/go/test/endtoend/vtorc/api/main_test.go +++ b/go/test/endtoend/vtorc/api/main_test.go @@ -21,7 +21,6 @@ import ( "os" "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" ) @@ -53,8 +52,6 @@ func TestMain(m *testing.M) { return m.Run(), nil }() - cluster.PanicHandler(nil) - if clusterInfo != nil { // stop vtorc first otherwise its logs get polluted // with instances being unreachable triggering unnecessary operations diff --git a/go/test/endtoend/vtorc/general/main_test.go b/go/test/endtoend/vtorc/general/main_test.go index 6db0792de3a..0cd88cd378c 100644 --- a/go/test/endtoend/vtorc/general/main_test.go +++ b/go/test/endtoend/vtorc/general/main_test.go @@ -21,7 +21,6 @@ import ( "os" "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" ) @@ -47,8 +46,6 @@ func TestMain(m *testing.M) { return m.Run(), nil }() - cluster.PanicHandler(nil) - if clusterInfo != nil { // stop vtorc first otherwise its logs get polluted // with instances being unreachable triggering unnecessary operations diff --git a/go/test/endtoend/vtorc/general/vtorc_test.go b/go/test/endtoend/vtorc/general/vtorc_test.go index 60b1c7eb3dc..52e84021644 100644 --- a/go/test/endtoend/vtorc/general/vtorc_test.go +++ b/go/test/endtoend/vtorc/general/vtorc_test.go @@ -39,7 +39,6 @@ import ( // verify that with multiple vtorc instances, we still only have 1 PlannedReparentShard call func TestPrimaryElection(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 2, "") @@ -61,13 +60,72 @@ func TestPrimaryElection(t *testing.T) { require.Len(t, res.Rows, 1, "There should only be 1 primary tablet which was elected") } +<<<<<<< HEAD +======= +// TestErrantGTIDOnPreviousPrimary tests that VTOrc is able to detect errant GTIDs on a previously demoted primary +// if it has an errant GTID. +func TestErrantGTIDOnPreviousPrimary(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 0, []string{"--change-tablets-with-errant-gtid-to-drained"}, cluster.VTOrcConfiguration{}, 1, "") + keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] + shard0 := &keyspace.Shards[0] + + // find primary from topo + curPrimary := utils.ShardPrimaryTablet(t, clusterInfo, keyspace, shard0) + assert.NotNil(t, curPrimary, "should have elected a primary") + vtOrcProcess := clusterInfo.ClusterInstance.VTOrcProcesses[0] + utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.ElectNewPrimaryRecoveryName, 1) + utils.WaitForSuccessfulPRSCount(t, vtOrcProcess, keyspace.Name, shard0.Name, 1) + + var replica, otherReplica *cluster.Vttablet + for _, tablet := range shard0.Vttablets { + // we know we have only two tablets, so the "other" one must be the new primary + if tablet.Alias != curPrimary.Alias { + if replica == nil { + replica = tablet + } else { + otherReplica = tablet + } + } + } + require.NotNil(t, replica, "should be able to find a replica") + require.NotNil(t, otherReplica, "should be able to find 2nd replica") + utils.CheckReplication(t, clusterInfo, curPrimary, []*cluster.Vttablet{replica, otherReplica}, 15*time.Second) + + // Disable global recoveries for the cluster. + vtOrcProcess.DisableGlobalRecoveries(t) + + // Run PRS to promote a different replica. + output, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput( + "PlannedReparentShard", + fmt.Sprintf("%s/%s", keyspace.Name, shard0.Name), + "--new-primary", replica.Alias) + require.NoError(t, err, "error in PlannedReparentShard output - %s", output) + + // Stop replicatin on the previous primary to simulate it not reparenting properly. + // Also insert an errant GTID on the previous primary. + err = utils.RunSQLs(t, []string{ + "STOP REPLICA", + "RESET REPLICA ALL", + "set global read_only=OFF", + "insert into vt_ks.vt_insert_test(id, msg) values (10173, 'test 178342')", + }, curPrimary, "") + require.NoError(t, err) + + // Wait for VTOrc to detect the errant GTID and change the tablet to a drained type. + vtOrcProcess.EnableGlobalRecoveries(t) + + // Wait for the tablet to be drained. + utils.WaitForTabletType(t, curPrimary, "drained") +} + +>>>>>>> bad431deed (Remove broken panic handler (#17354)) // Cases to test: // 1. create cluster with 1 replica and 1 rdonly, let orc choose primary // verify rdonly is not elected, only replica // verify replication is setup func TestSingleKeyspace(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, []string{"--clusters_to_watch", "ks"}, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -86,7 +144,6 @@ func TestSingleKeyspace(t *testing.T) { // verify replication is setup func TestKeyspaceShard(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, []string{"--clusters_to_watch", "ks/0"}, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -108,7 +165,6 @@ func TestKeyspaceShard(t *testing.T) { // 6. disable recoveries and make sure the detected problems are set correctly. func TestVTOrcRepairs(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 0, []string{"--change-tablets-with-errant-gtid-to-drained"}, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -275,7 +331,6 @@ func TestRepairAfterTER(t *testing.T) { // test fails intermittently on CI, skip until it can be fixed. t.SkipNow() defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 0, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -377,7 +432,6 @@ func TestSemiSync(t *testing.T) { // TestVTOrcWithPrs tests that VTOrc works fine even when PRS is called from vtctld func TestVTOrcWithPrs(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 4, 0, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -429,7 +483,6 @@ func TestVTOrcWithPrs(t *testing.T) { // TestMultipleDurabilities tests that VTOrc works with 2 keyspaces having 2 different durability policies func TestMultipleDurabilities(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) // Setup a normal cluster and start vtorc utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, nil, cluster.VTOrcConfiguration{}, 1, "") // Setup a semi-sync cluster @@ -447,6 +500,48 @@ func TestMultipleDurabilities(t *testing.T) { assert.NotNil(t, primary, "should have elected a primary") } +<<<<<<< HEAD +======= +// TestDrainedTablet tests that we don't forget drained tablets and they still show up in the vtorc output. +func TestDrainedTablet(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) + + // Setup a normal cluster and start vtorc + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 0, nil, cluster.VTOrcConfiguration{}, 1, "") + keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] + shard0 := &keyspace.Shards[0] + + // find primary from topo + curPrimary := utils.ShardPrimaryTablet(t, clusterInfo, keyspace, shard0) + assert.NotNil(t, curPrimary, "should have elected a primary") + vtOrcProcess := clusterInfo.ClusterInstance.VTOrcProcesses[0] + + // find any replica tablet other than the current primary + var replica *cluster.Vttablet + for _, tablet := range shard0.Vttablets { + if tablet.Alias != curPrimary.Alias { + replica = tablet + break + } + } + require.NotNil(t, replica, "could not find any replica tablet") + + output, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput( + "ChangeTabletType", replica.Alias, "DRAINED") + require.NoError(t, err, "error in changing tablet type output - %s", output) + + // Make sure VTOrc sees the drained tablets and doesn't forget them. + utils.WaitForDrainedTabletInVTOrc(t, vtOrcProcess, 1) + + output, err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput( + "ChangeTabletType", replica.Alias, "REPLICA") + require.NoError(t, err, "error in changing tablet type output - %s", output) + + // We have no drained tablets anymore. Wait for VTOrc to have processed that. + utils.WaitForDrainedTabletInVTOrc(t, vtOrcProcess, 0) +} + +>>>>>>> bad431deed (Remove broken panic handler (#17354)) // TestDurabilityPolicySetLater tests that VTOrc works even if the durability policy of the keyspace is // set after VTOrc has been started. func TestDurabilityPolicySetLater(t *testing.T) { @@ -498,7 +593,6 @@ func TestDurabilityPolicySetLater(t *testing.T) { func TestFullStatusConnectionPooling(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 4, 0, []string{ "--tablet_manager_grpc_concurrency=1", }, cluster.VTOrcConfiguration{ diff --git a/go/test/endtoend/vtorc/primaryfailure/main_test.go b/go/test/endtoend/vtorc/primaryfailure/main_test.go index a3e50bd0cc9..cd03df01bd6 100644 --- a/go/test/endtoend/vtorc/primaryfailure/main_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/main_test.go @@ -21,7 +21,6 @@ import ( "os" "testing" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" ) @@ -53,8 +52,6 @@ func TestMain(m *testing.M) { return m.Run(), nil }() - cluster.PanicHandler(nil) - if clusterInfo != nil { // stop vtorc first otherwise its logs get polluted // with instances being unreachable triggering unnecessary operations diff --git a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go index 180f367d7fb..4403724a808 100644 --- a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go @@ -39,7 +39,6 @@ import ( // Also tests that VTOrc can handle multiple failures, if the durability policies allow it func TestDownPrimary(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) // We specify the --wait-replicas-timeout to a small value because we spawn a cross-cell replica later in the test. // If that replica is more advanced than the same-cell-replica, then we try to promote the cross-cell replica as an intermediate source. // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. @@ -106,7 +105,6 @@ func TestDownPrimary(t *testing.T) { // bring down primary before VTOrc has started, let vtorc repair. func TestDownPrimaryBeforeVTOrc(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 0, "none") keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] @@ -162,7 +160,6 @@ func TestDownPrimaryBeforeVTOrc(t *testing.T) { // delete the primary record and let vtorc repair. func TestDeletedPrimaryTablet(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s"}, cluster.VTOrcConfiguration{}, 1, "none") keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] @@ -229,7 +226,6 @@ func TestDeletedPrimaryTablet(t *testing.T) { // that primary is unreachable. This help us save few seconds depending on value of `RemoteOperationTimeout` flag. func TestDeadPrimaryRecoversImmediately(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) // We specify the --wait-replicas-timeout to a small value because we spawn a cross-cell replica later in the test. // If that replica is more advanced than the same-cell-replica, then we try to promote the cross-cell replica as an intermediate source. // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. @@ -312,7 +308,6 @@ func TestDeadPrimaryRecoversImmediately(t *testing.T) { // covers part of the test case master-failover-lost-replicas from orchestrator func TestCrossDataCenterFailure(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -358,7 +353,6 @@ func TestCrossDataCenterFailure(t *testing.T) { // In case of no viable candidates, we should error out func TestCrossDataCenterFailureError(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -405,7 +399,6 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) { // were detected by vtorc and could be configured to have their sources detached t.Skip() defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 2, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") @@ -487,7 +480,6 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) { // covers the test case master-failover-fail-promotion-lag-minutes-success from orchestrator func TestPromotionLagSuccess(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ ReplicationLagQuery: "select 59", FailPrimaryPromotionOnLagMinutes: 1, @@ -537,7 +529,6 @@ func TestPromotionLagFailure(t *testing.T) { // was smaller than the configured value, otherwise it would fail the promotion t.Skip() defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 1, nil, cluster.VTOrcConfiguration{ ReplicationLagQuery: "select 61", FailPrimaryPromotionOnLagMinutes: 1, @@ -590,7 +581,6 @@ func TestPromotionLagFailure(t *testing.T) { // That is the replica which should be promoted in case of primary failure func TestDownPrimaryPromotionRule(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ LockShardTimeoutSeconds: 5, }, 1, "test") @@ -638,7 +628,6 @@ func TestDownPrimaryPromotionRule(t *testing.T) { // It should also be caught up when it is promoted func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ LockShardTimeoutSeconds: 5, }, 1, "test") @@ -718,7 +707,6 @@ func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { // It should also be caught up when it is promoted func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) { defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) - defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ LockShardTimeoutSeconds: 5, PreventCrossDataCenterPrimaryFailover: true, diff --git a/test/config.json b/test/config.json index d21994ddca0..ec5c7258900 100644 --- a/test/config.json +++ b/test/config.json @@ -331,17 +331,6 @@ "RetryMax": 1, "Tags": [] }, - "pitr": { - "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/recovery/pitr"], - "Command": [], - "Manual": false, - "Shard": "10", - "RetryMax": 1, - "Tags": [ - "site_test" - ] - }, "recovery": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/recovery/unshardedrecovery"],