Skip to content

Commit

Permalink
Remove custom panic handler entirely
Browse files Browse the repository at this point in the history
When a test panics, it's way more useful to see the actual backtrace for
the panic and not try to recover anything that hides that information.

Signed-off-by: Dirkjan Bussink <[email protected]>
  • Loading branch information
dbussink committed Dec 10, 2024
1 parent abb23c4 commit 826c780
Show file tree
Hide file tree
Showing 111 changed files with 0 additions and 310 deletions.
2 changes: 0 additions & 2 deletions go/test/endtoend/backup/vtbackup/backup_only_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@ func TestTabletInitialBackup(t *testing.T) {
// - Take a Second Backup
// - Bring up a second replica, and restore from the second backup
// - list the backups, remove them
defer cluster.PanicHandler(t)

waitForReplicationToCatchup([]cluster.Vttablet{*replica1, *replica2})

Expand Down Expand Up @@ -102,7 +101,6 @@ func TestTabletBackupOnly(t *testing.T) {
// - Take a Second Backup
// - Bring up a second replica, and restore from the second backup
// - list the backups, remove them
defer cluster.PanicHandler(t)

// Reset the tablet object values in order on init tablet in the next step.
primary.VttabletProcess.ServingStatus = "NOT_SERVING"
Expand Down
3 changes: 0 additions & 3 deletions go/test/endtoend/backup/vtctlbackup/backup_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,6 @@ func TestBackup(t *testing.T, setupType int, streamMode string, stripes int, cDe
}, //
}

defer cluster.PanicHandler(t)
// setup cluster for the testing
code, err := LaunchCluster(setupType, streamMode, stripes, cDetails)
require.Nilf(t, err, "setup failed with status code %d", code)
Expand Down Expand Up @@ -1507,7 +1506,6 @@ func getLastBackup(t *testing.T) string {

func TestBackupEngineSelector(t *testing.T) {
defer setDefaultCommonArgs()
defer cluster.PanicHandler(t)

// launch the custer with xtrabackup as the default engine
code, err := LaunchCluster(XtraBackup, "xbstream", 0, &CompressionDetails{CompressorEngineName: "pgzip"})
Expand Down Expand Up @@ -1548,7 +1546,6 @@ func TestBackupEngineSelector(t *testing.T) {

func TestRestoreAllowedBackupEngines(t *testing.T) {
defer setDefaultCommonArgs()
defer cluster.PanicHandler(t)

backupMsg := "right after xtrabackup backup"

Expand Down
3 changes: 0 additions & 3 deletions go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,6 @@ func waitForReplica(t *testing.T, replicaIndex int) int {
// in between, it makes writes to the database, and takes notes: what data was available in what backup.
// It then restores each and every one of those backups, in random order, and expects to find the specific data associated with the backup.
func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) {
defer cluster.PanicHandler(t)

t.Run(tcase.Name, func(t *testing.T) {
// setup cluster for the testing
Expand Down Expand Up @@ -339,7 +338,6 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase)

// ExecTestIncrementalBackupAndRestoreToPos
func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTestCase) {
defer cluster.PanicHandler(t)

var lastInsertedRowTimestamp time.Time
insertRowOnPrimary := func(t *testing.T, hint string) {
Expand Down Expand Up @@ -605,7 +603,6 @@ func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTes
// Specifically, it's designed to test how incremental backups are taken by interleaved replicas, so that they successfully build on
// one another.
func ExecTestIncrementalBackupOnTwoTablets(t *testing.T, tcase *PITRTestCase) {
defer cluster.PanicHandler(t)

t.Run(tcase.Name, func(t *testing.T) {
// setup cluster for the testing
Expand Down
2 changes: 0 additions & 2 deletions go/test/endtoend/cellalias/cell_alias_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,6 @@ func TestMain(m *testing.M) {
}

func TestAlias(t *testing.T) {
defer cluster.PanicHandler(t)

insertInitialValues(t)
defer deleteInitialValues(t)
Expand Down Expand Up @@ -295,7 +294,6 @@ func TestAlias(t *testing.T) {
}

func TestAddAliasWhileVtgateUp(t *testing.T) {
defer cluster.PanicHandler(t)

insertInitialValues(t)
defer deleteInitialValues(t)
Expand Down
1 change: 0 additions & 1 deletion go/test/endtoend/cluster/cluster_process.go
Original file line number Diff line number Diff line change
Expand Up @@ -1043,7 +1043,6 @@ func (cluster *LocalProcessCluster) StreamTabletHealthUntil(ctx context.Context,

// Teardown brings down the cluster by invoking teardown for individual processes
func (cluster *LocalProcessCluster) Teardown() {
PanicHandler(nil)
cluster.mx.Lock()
defer cluster.mx.Unlock()
if cluster.teardownCompleted {
Expand Down
9 changes: 0 additions & 9 deletions go/test/endtoend/cluster/cluster_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,15 +126,6 @@ func VerifyRowsInTablet(t *testing.T, vttablet *Vttablet, ksName string, expecte
VerifyRowsInTabletForTable(t, vttablet, ksName, expectedRows, "vt_insert_test")
}

// PanicHandler handles the panic in the testcase.
func PanicHandler(t testing.TB) {
err := recover()
if t == nil {
return
}
require.Nilf(t, err, "panic occured in testcase %v", t.Name())
}

// ListBackups Lists back preset in shard
func (cluster LocalProcessCluster) ListBackups(shardKsName string) ([]string, error) {
output, err := cluster.VtctldClientProcess.ExecuteCommandWithOutput("GetBackups", shardKsName)
Expand Down
1 change: 0 additions & 1 deletion go/test/endtoend/clustertest/add_keyspace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ primary key (id)
)

func TestAddKeyspace(t *testing.T) {
defer cluster.PanicHandler(t)
if err := clusterInstance.StartKeyspace(*testKeyspace, []string{"-80", "80-"}, 0, false); err != nil {
log.Errorf("failed to AddKeyspace %v: %v", *testKeyspace, err)
t.Fatal(err)
Expand Down
3 changes: 0 additions & 3 deletions go/test/endtoend/clustertest/etcd_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,9 @@ import (

"github.com/stretchr/testify/require"
clientv3 "go.etcd.io/etcd/client/v3"

"vitess.io/vitess/go/test/endtoend/cluster"
)

func TestEtcdServer(t *testing.T) {
defer cluster.PanicHandler(t)

// Confirm the basic etcd cluster health.
etcdHealthURL := fmt.Sprintf("http://%s:%d/health", clusterInstance.Hostname, clusterInstance.TopoPort)
Expand Down
3 changes: 0 additions & 3 deletions go/test/endtoend/clustertest/vtctld_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@ import (

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

"vitess.io/vitess/go/test/endtoend/cluster"
)

var (
Expand All @@ -44,7 +42,6 @@ var (
)

func TestVtctldProcess(t *testing.T) {
defer cluster.PanicHandler(t)
url := fmt.Sprintf("http://%s:%d/api/keyspaces/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort)
testURL(t, url, "keyspace url")

Expand Down
2 changes: 0 additions & 2 deletions go/test/endtoend/clustertest/vtgate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,9 @@ import (
"github.com/stretchr/testify/require"

"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/cluster"
)

func TestVtgateProcess(t *testing.T) {
defer cluster.PanicHandler(t)
verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL)
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
Expand Down
4 changes: 0 additions & 4 deletions go/test/endtoend/clustertest/vttablet_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,9 @@ import (
"testing"

"github.com/stretchr/testify/require"

"vitess.io/vitess/go/test/endtoend/cluster"
)

func TestVttabletProcess(t *testing.T) {
defer cluster.PanicHandler(t)
firstTabletPort := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].HTTPPort
testURL(t, fmt.Sprintf("http://localhost:%d/debug/vars/", firstTabletPort), "tablet debug var url")
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/debug/vars", firstTabletPort))
Expand All @@ -48,7 +45,6 @@ func TestVttabletProcess(t *testing.T) {
}

func TestDeleteTablet(t *testing.T) {
defer cluster.PanicHandler(t)
primary := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet()
require.NotNil(t, primary)
_, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("DeleteTablets", "--allow-primary", primary.Alias)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ var (

// This test makes sure that we can use SSL replication with Vitess
func TestSecure(t *testing.T) {
defer cluster.PanicHandler(t)
testReplicationBase(t, true)
testReplicationBase(t, false)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,6 @@ var (
)

func TestSecureTransport(t *testing.T) {
defer cluster.PanicHandler(t)
flag.Parse()

// initialize cluster
Expand Down
8 changes: 0 additions & 8 deletions go/test/endtoend/keyspace/keyspace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,6 @@ func checkDurabilityPolicy(t *testing.T, durabilityPolicy string) {
}

func TestGetSrvKeyspaceNames(t *testing.T) {
defer cluster.PanicHandler(t)
data, err := clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell)
require.Nil(t, err)

Expand All @@ -179,7 +178,6 @@ func TestGetSrvKeyspaceNames(t *testing.T) {
}

func TestGetSrvKeyspacePartitions(t *testing.T) {
defer cluster.PanicHandler(t)
shardedSrvKeyspace := getSrvKeyspace(t, cell, keyspaceShardedName)
otherShardRefFound := false
for _, partition := range shardedSrvKeyspace.Partitions {
Expand Down Expand Up @@ -208,20 +206,17 @@ func TestGetSrvKeyspacePartitions(t *testing.T) {
}

func TestShardNames(t *testing.T) {
defer cluster.PanicHandler(t)
output, err := clusterForKSTest.VtctldClientProcess.GetSrvKeyspaces(keyspaceShardedName, cell)
require.NoError(t, err)
require.NotNil(t, output[cell], "no srvkeyspace for cell %s", cell)
}

func TestGetKeyspace(t *testing.T) {
defer cluster.PanicHandler(t)
_, err := clusterForKSTest.VtctldClientProcess.GetKeyspace(keyspaceUnshardedName)
require.Nil(t, err)
}

func TestDeleteKeyspace(t *testing.T) {
defer cluster.PanicHandler(t)
_ = clusterForKSTest.VtctldClientProcess.CreateKeyspace("test_delete_keyspace", sidecar.DefaultName)
_ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0")
_ = clusterForKSTest.InitTablet(&cluster.Vttablet{
Expand Down Expand Up @@ -352,7 +347,6 @@ func TestDeleteKeyspace(t *testing.T) {
} */

func TestShardCountForAllKeyspaces(t *testing.T) {
defer cluster.PanicHandler(t)
testShardCountForKeyspace(t, keyspaceUnshardedName, 1)
testShardCountForKeyspace(t, keyspaceShardedName, 2)
}
Expand All @@ -369,7 +363,6 @@ func testShardCountForKeyspace(t *testing.T, keyspace string, count int) {
}

func TestShardNameForAllKeyspaces(t *testing.T) {
defer cluster.PanicHandler(t)
testShardNameForKeyspace(t, keyspaceUnshardedName, []string{"test_ks_unsharded"})
testShardNameForKeyspace(t, keyspaceShardedName, []string{"-80", "80-"})
}
Expand All @@ -388,7 +381,6 @@ func testShardNameForKeyspace(t *testing.T, keyspace string, shardNames []string
}

func TestKeyspaceToShardName(t *testing.T) {
defer cluster.PanicHandler(t)
var id []byte
srvKeyspace := getSrvKeyspace(t, cell, keyspaceShardedName)

Expand Down
3 changes: 0 additions & 3 deletions go/test/endtoend/messaging/message_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,6 @@ func TestUnsharded(t *testing.T) {

// TestReparenting checks the client connection count after reparenting.
func TestReparenting(t *testing.T) {
defer cluster.PanicHandler(t)
name := "sharded_message"

ctx := context.Background()
Expand Down Expand Up @@ -435,7 +434,6 @@ func TestReparenting(t *testing.T) {

// TestConnection validate the connection count and message streaming.
func TestConnection(t *testing.T) {
defer cluster.PanicHandler(t)

name := "sharded_message"

Expand Down Expand Up @@ -494,7 +492,6 @@ func TestConnection(t *testing.T) {
}

func testMessaging(t *testing.T, name, ks string) {
defer cluster.PanicHandler(t)
ctx := context.Background()
stream, err := VtgateGrpcConn(ctx, clusterInstance)
require.Nil(t, err)
Expand Down
2 changes: 0 additions & 2 deletions go/test/endtoend/mysqlctl/mysqlctl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,6 @@ func initCluster(shardNames []string, totalTabletsRequired int) {
}

func TestRestart(t *testing.T) {
defer cluster.PanicHandler(t)
err := primaryTablet.MysqlctlProcess.Stop()
require.NoError(t, err)
primaryTablet.MysqlctlProcess.CleanupFiles(primaryTablet.TabletUID)
Expand All @@ -147,7 +146,6 @@ func TestRestart(t *testing.T) {
}

func TestAutoDetect(t *testing.T) {
defer cluster.PanicHandler(t)

err := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.Setup()
require.NoError(t, err)
Expand Down
2 changes: 0 additions & 2 deletions go/test/endtoend/mysqlctld/mysqlctld_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,6 @@ func initCluster(shardNames []string, totalTabletsRequired int) error {
}

func TestRestart(t *testing.T) {
defer cluster.PanicHandler(t)
err := primaryTablet.MysqlctldProcess.Stop()
require.Nil(t, err)
require.Truef(t, primaryTablet.MysqlctldProcess.WaitForMysqlCtldShutdown(), "Mysqlctld has not stopped...")
Expand All @@ -150,7 +149,6 @@ func TestRestart(t *testing.T) {
}

func TestAutoDetect(t *testing.T) {
defer cluster.PanicHandler(t)

err := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.Setup()
require.Nil(t, err, "error should be nil")
Expand Down
9 changes: 0 additions & 9 deletions go/test/endtoend/mysqlserver/mysql_server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,12 @@ import (
"vitess.io/vitess/go/mysql/sqlerror"

"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/cluster"

_ "github.com/go-sql-driver/mysql"
)

// TestMultiStmt checks that multiStatements=True and multiStatements=False work properly.
func TestMultiStatement(t *testing.T) {
defer cluster.PanicHandler(t)
ctx := context.Background()

// connect database with multiStatements=True
Expand Down Expand Up @@ -70,7 +68,6 @@ func TestMultiStatement(t *testing.T) {

// TestLargeComment add large comment in insert stmt and validate the insert process.
func TestLargeComment(t *testing.T) {
defer cluster.PanicHandler(t)
ctx := context.Background()

conn, err := mysql.Connect(ctx, &vtParams)
Expand All @@ -89,7 +86,6 @@ func TestLargeComment(t *testing.T) {

// TestInsertLargerThenGrpcLimit insert blob larger then grpc limit and verify the error.
func TestInsertLargerThenGrpcLimit(t *testing.T) {
defer cluster.PanicHandler(t)

ctx := context.Background()

Expand All @@ -109,7 +105,6 @@ func TestInsertLargerThenGrpcLimit(t *testing.T) {

// TestTimeout executes sleep(5) with query_timeout of 1 second, and verifies the error.
func TestTimeout(t *testing.T) {
defer cluster.PanicHandler(t)
ctx := context.Background()

conn, err := mysql.Connect(ctx, &vtParams)
Expand All @@ -125,7 +120,6 @@ func TestTimeout(t *testing.T) {

// TestInvalidField tries to fetch invalid column and verifies the error.
func TestInvalidField(t *testing.T) {
defer cluster.PanicHandler(t)
ctx := context.Background()

conn, err := mysql.Connect(ctx, &vtParams)
Expand All @@ -141,7 +135,6 @@ func TestInvalidField(t *testing.T) {

// TestWarnings validates the behaviour of SHOW WARNINGS.
func TestWarnings(t *testing.T) {
defer cluster.PanicHandler(t)
ctx := context.Background()

conn, err := mysql.Connect(ctx, &vtParams)
Expand Down Expand Up @@ -183,7 +176,6 @@ func TestWarnings(t *testing.T) {
// TestSelectWithUnauthorizedUser verifies that an unauthorized user
// is not able to read from the table.
func TestSelectWithUnauthorizedUser(t *testing.T) {
defer cluster.PanicHandler(t)
ctx := context.Background()

tmpVtParam := vtParams
Expand All @@ -202,7 +194,6 @@ func TestSelectWithUnauthorizedUser(t *testing.T) {

// TestPartitionedTable validates that partitioned tables are recognized by schema engine
func TestPartitionedTable(t *testing.T) {
defer cluster.PanicHandler(t)

tablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet()

Expand Down
1 change: 0 additions & 1 deletion go/test/endtoend/onlineddl/flow/onlineddl_flow_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,6 @@ func TestMain(m *testing.M) {
}

func TestOnlineDDLFlow(t *testing.T) {
defer cluster.PanicHandler(t)
ctx := context.Background()

require.NotNil(t, clusterInstance)
Expand Down
1 change: 0 additions & 1 deletion go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,6 @@ func TestMain(m *testing.M) {
}

func TestRevertSchemaChanges(t *testing.T) {
defer cluster.PanicHandler(t)
shards = clusterInstance.Keyspaces[0].Shards
require.Equal(t, 1, len(shards))

Expand Down
Loading

0 comments on commit 826c780

Please sign in to comment.