Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[release-21.0] Remove broken panic handler (#17354) #17360

Merged
merged 2 commits into from
Dec 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions go/test/endtoend/backup/vtbackup/backup_only_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@ func TestTabletInitialBackup(t *testing.T) {
// - Take a Second Backup
// - Bring up a second replica, and restore from the second backup
// - list the backups, remove them
defer cluster.PanicHandler(t)

waitForReplicationToCatchup([]cluster.Vttablet{*replica1, *replica2})

Expand Down Expand Up @@ -102,7 +101,6 @@ func TestTabletBackupOnly(t *testing.T) {
// - Take a Second Backup
// - Bring up a second replica, and restore from the second backup
// - list the backups, remove them
defer cluster.PanicHandler(t)

// Reset the tablet object values in order on init tablet in the next step.
primary.VttabletProcess.ServingStatus = "NOT_SERVING"
Expand Down
1 change: 0 additions & 1 deletion go/test/endtoend/backup/vtbackup/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ var (
)

func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()

exitCode, err := func() (int, error) {
Expand Down
3 changes: 0 additions & 3 deletions go/test/endtoend/backup/vtctlbackup/backup_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,6 @@ func TestBackup(t *testing.T, setupType int, streamMode string, stripes int, cDe
}, //
}

defer cluster.PanicHandler(t)
// setup cluster for the testing
code, err := LaunchCluster(setupType, streamMode, stripes, cDetails)
require.Nilf(t, err, "setup failed with status code %d", code)
Expand Down Expand Up @@ -1507,7 +1506,6 @@ func getLastBackup(t *testing.T) string {

func TestBackupEngineSelector(t *testing.T) {
defer setDefaultCommonArgs()
defer cluster.PanicHandler(t)

// launch the custer with xtrabackup as the default engine
code, err := LaunchCluster(XtraBackup, "xbstream", 0, &CompressionDetails{CompressorEngineName: "pgzip"})
Expand Down Expand Up @@ -1548,7 +1546,6 @@ func TestBackupEngineSelector(t *testing.T) {

func TestRestoreAllowedBackupEngines(t *testing.T) {
defer setDefaultCommonArgs()
defer cluster.PanicHandler(t)

backupMsg := "right after xtrabackup backup"

Expand Down
3 changes: 0 additions & 3 deletions go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,6 @@ func waitForReplica(t *testing.T, replicaIndex int) int {
// in between, it makes writes to the database, and takes notes: what data was available in what backup.
// It then restores each and every one of those backups, in random order, and expects to find the specific data associated with the backup.
func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) {
defer cluster.PanicHandler(t)

t.Run(tcase.Name, func(t *testing.T) {
// setup cluster for the testing
Expand Down Expand Up @@ -339,7 +338,6 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase)

// ExecTestIncrementalBackupAndRestoreToPos
func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTestCase) {
defer cluster.PanicHandler(t)

var lastInsertedRowTimestamp time.Time
insertRowOnPrimary := func(t *testing.T, hint string) {
Expand Down Expand Up @@ -605,7 +603,6 @@ func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTes
// Specifically, it's designed to test how incremental backups are taken by interleaved replicas, so that they successfully build on
// one another.
func ExecTestIncrementalBackupOnTwoTablets(t *testing.T, tcase *PITRTestCase) {
defer cluster.PanicHandler(t)

t.Run(tcase.Name, func(t *testing.T) {
// setup cluster for the testing
Expand Down
3 changes: 0 additions & 3 deletions go/test/endtoend/cellalias/cell_alias_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,6 @@ var (
)

func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()

exitcode, err := func() (int, error) {
Expand Down Expand Up @@ -232,7 +231,6 @@ func TestMain(m *testing.M) {
}

func TestAlias(t *testing.T) {
defer cluster.PanicHandler(t)

insertInitialValues(t)
defer deleteInitialValues(t)
Expand Down Expand Up @@ -296,7 +294,6 @@ func TestAlias(t *testing.T) {
}

func TestAddAliasWhileVtgateUp(t *testing.T) {
defer cluster.PanicHandler(t)

insertInitialValues(t)
defer deleteInitialValues(t)
Expand Down
1 change: 0 additions & 1 deletion go/test/endtoend/cluster/cluster_process.go
Original file line number Diff line number Diff line change
Expand Up @@ -1031,7 +1031,6 @@ func (cluster *LocalProcessCluster) StreamTabletHealthUntil(ctx context.Context,

// Teardown brings down the cluster by invoking teardown for individual processes
func (cluster *LocalProcessCluster) Teardown() {
PanicHandler(nil)
cluster.mx.Lock()
defer cluster.mx.Unlock()
if cluster.teardownCompleted {
Expand Down
9 changes: 0 additions & 9 deletions go/test/endtoend/cluster/cluster_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,15 +126,6 @@ func VerifyRowsInTablet(t *testing.T, vttablet *Vttablet, ksName string, expecte
VerifyRowsInTabletForTable(t, vttablet, ksName, expectedRows, "vt_insert_test")
}

// PanicHandler handles the panic in the testcase.
func PanicHandler(t testing.TB) {
err := recover()
if t == nil {
return
}
require.Nilf(t, err, "panic occured in testcase %v", t.Name())
}

// ListBackups Lists back preset in shard
func (cluster LocalProcessCluster) ListBackups(shardKsName string) ([]string, error) {
output, err := cluster.VtctldClientProcess.ExecuteCommandWithOutput("GetBackups", shardKsName)
Expand Down
1 change: 0 additions & 1 deletion go/test/endtoend/clustertest/add_keyspace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ primary key (id)
)

func TestAddKeyspace(t *testing.T) {
defer cluster.PanicHandler(t)
if err := clusterInstance.StartKeyspace(*testKeyspace, []string{"-80", "80-"}, 0, false); err != nil {
log.Errorf("failed to AddKeyspace %v: %v", *testKeyspace, err)
t.Fatal(err)
Expand Down
3 changes: 0 additions & 3 deletions go/test/endtoend/clustertest/etcd_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,9 @@ import (

"github.com/stretchr/testify/require"
clientv3 "go.etcd.io/etcd/client/v3"

"vitess.io/vitess/go/test/endtoend/cluster"
)

func TestEtcdServer(t *testing.T) {
defer cluster.PanicHandler(t)

// Confirm the basic etcd cluster health.
etcdHealthURL := fmt.Sprintf("http://%s:%d/health", clusterInstance.Hostname, clusterInstance.TopoPort)
Expand Down
1 change: 0 additions & 1 deletion go/test/endtoend/clustertest/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ var (
)

func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()

exitCode := func() int {
Expand Down
3 changes: 0 additions & 3 deletions go/test/endtoend/clustertest/vtctld_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@ import (

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

"vitess.io/vitess/go/test/endtoend/cluster"
)

var (
Expand All @@ -44,7 +42,6 @@ var (
)

func TestVtctldProcess(t *testing.T) {
defer cluster.PanicHandler(t)
url := fmt.Sprintf("http://%s:%d/api/keyspaces/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort)
testURL(t, url, "keyspace url")

Expand Down
2 changes: 0 additions & 2 deletions go/test/endtoend/clustertest/vtgate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,9 @@ import (
"github.com/stretchr/testify/require"

"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/cluster"
)

func TestVtgateProcess(t *testing.T) {
defer cluster.PanicHandler(t)
verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL)
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
Expand Down
4 changes: 0 additions & 4 deletions go/test/endtoend/clustertest/vttablet_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,9 @@ import (
"testing"

"github.com/stretchr/testify/require"

"vitess.io/vitess/go/test/endtoend/cluster"
)

func TestVttabletProcess(t *testing.T) {
defer cluster.PanicHandler(t)
firstTabletPort := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].HTTPPort
testURL(t, fmt.Sprintf("http://localhost:%d/debug/vars/", firstTabletPort), "tablet debug var url")
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/debug/vars", firstTabletPort))
Expand All @@ -48,7 +45,6 @@ func TestVttabletProcess(t *testing.T) {
}

func TestDeleteTablet(t *testing.T) {
defer cluster.PanicHandler(t)
primary := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet()
require.NotNil(t, primary)
_, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("DeleteTablets", "--allow-primary", primary.Alias)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ var (

// This test makes sure that we can use SSL replication with Vitess
func TestSecure(t *testing.T) {
defer cluster.PanicHandler(t)
testReplicationBase(t, true)
testReplicationBase(t, false)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,6 @@ var (
)

func TestSecureTransport(t *testing.T) {
defer cluster.PanicHandler(t)
flag.Parse()

// initialize cluster
Expand Down
9 changes: 0 additions & 9 deletions go/test/endtoend/keyspace/keyspace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@ var (
)

func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()

exitCode := func() int {
Expand Down Expand Up @@ -167,7 +166,6 @@ func checkDurabilityPolicy(t *testing.T, durabilityPolicy string) {
}

func TestGetSrvKeyspaceNames(t *testing.T) {
defer cluster.PanicHandler(t)
data, err := clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell)
require.Nil(t, err)

Expand All @@ -180,7 +178,6 @@ func TestGetSrvKeyspaceNames(t *testing.T) {
}

func TestGetSrvKeyspacePartitions(t *testing.T) {
defer cluster.PanicHandler(t)
shardedSrvKeyspace := getSrvKeyspace(t, cell, keyspaceShardedName)
otherShardRefFound := false
for _, partition := range shardedSrvKeyspace.Partitions {
Expand Down Expand Up @@ -209,20 +206,17 @@ func TestGetSrvKeyspacePartitions(t *testing.T) {
}

func TestShardNames(t *testing.T) {
defer cluster.PanicHandler(t)
output, err := clusterForKSTest.VtctldClientProcess.GetSrvKeyspaces(keyspaceShardedName, cell)
require.NoError(t, err)
require.NotNil(t, output[cell], "no srvkeyspace for cell %s", cell)
}

func TestGetKeyspace(t *testing.T) {
defer cluster.PanicHandler(t)
_, err := clusterForKSTest.VtctldClientProcess.GetKeyspace(keyspaceUnshardedName)
require.Nil(t, err)
}

func TestDeleteKeyspace(t *testing.T) {
defer cluster.PanicHandler(t)
_ = clusterForKSTest.VtctldClientProcess.CreateKeyspace("test_delete_keyspace", sidecar.DefaultName)
_ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0")
_ = clusterForKSTest.InitTablet(&cluster.Vttablet{
Expand Down Expand Up @@ -353,7 +347,6 @@ func TestDeleteKeyspace(t *testing.T) {
} */

func TestShardCountForAllKeyspaces(t *testing.T) {
defer cluster.PanicHandler(t)
testShardCountForKeyspace(t, keyspaceUnshardedName, 1)
testShardCountForKeyspace(t, keyspaceShardedName, 2)
}
Expand All @@ -370,7 +363,6 @@ func testShardCountForKeyspace(t *testing.T, keyspace string, count int) {
}

func TestShardNameForAllKeyspaces(t *testing.T) {
defer cluster.PanicHandler(t)
testShardNameForKeyspace(t, keyspaceUnshardedName, []string{"test_ks_unsharded"})
testShardNameForKeyspace(t, keyspaceShardedName, []string{"-80", "80-"})
}
Expand All @@ -389,7 +381,6 @@ func testShardNameForKeyspace(t *testing.T, keyspace string, shardNames []string
}

func TestKeyspaceToShardName(t *testing.T) {
defer cluster.PanicHandler(t)
var id []byte
srvKeyspace := getSrvKeyspace(t, cell, keyspaceShardedName)

Expand Down
1 change: 0 additions & 1 deletion go/test/endtoend/messaging/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,6 @@ var (
)

func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()

exitcode, err := func() (int, error) {
Expand Down
3 changes: 0 additions & 3 deletions go/test/endtoend/messaging/message_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,6 @@ func TestUnsharded(t *testing.T) {

// TestReparenting checks the client connection count after reparenting.
func TestReparenting(t *testing.T) {
defer cluster.PanicHandler(t)
name := "sharded_message"

ctx := context.Background()
Expand Down Expand Up @@ -435,7 +434,6 @@ func TestReparenting(t *testing.T) {

// TestConnection validate the connection count and message streaming.
func TestConnection(t *testing.T) {
defer cluster.PanicHandler(t)

name := "sharded_message"

Expand Down Expand Up @@ -494,7 +492,6 @@ func TestConnection(t *testing.T) {
}

func testMessaging(t *testing.T, name, ks string) {
defer cluster.PanicHandler(t)
ctx := context.Background()
stream, err := VtgateGrpcConn(ctx, clusterInstance)
require.Nil(t, err)
Expand Down
3 changes: 0 additions & 3 deletions go/test/endtoend/mysqlctl/mysqlctl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ var (
)

func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()

exitCode := func() int {
Expand Down Expand Up @@ -139,7 +138,6 @@ func initCluster(shardNames []string, totalTabletsRequired int) {
}

func TestRestart(t *testing.T) {
defer cluster.PanicHandler(t)
err := primaryTablet.MysqlctlProcess.Stop()
require.NoError(t, err)
primaryTablet.MysqlctlProcess.CleanupFiles(primaryTablet.TabletUID)
Expand All @@ -148,7 +146,6 @@ func TestRestart(t *testing.T) {
}

func TestAutoDetect(t *testing.T) {
defer cluster.PanicHandler(t)

err := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.Setup()
require.NoError(t, err)
Expand Down
3 changes: 0 additions & 3 deletions go/test/endtoend/mysqlctld/mysqlctld_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@ var (
)

func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()

exitCode := func() int {
Expand Down Expand Up @@ -141,7 +140,6 @@ func initCluster(shardNames []string, totalTabletsRequired int) error {
}

func TestRestart(t *testing.T) {
defer cluster.PanicHandler(t)
err := primaryTablet.MysqlctldProcess.Stop()
require.Nil(t, err)
require.Truef(t, primaryTablet.MysqlctldProcess.WaitForMysqlCtldShutdown(), "Mysqlctld has not stopped...")
Expand All @@ -151,7 +149,6 @@ func TestRestart(t *testing.T) {
}

func TestAutoDetect(t *testing.T) {
defer cluster.PanicHandler(t)

err := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.Setup()
require.Nil(t, err, "error should be nil")
Expand Down
1 change: 0 additions & 1 deletion go/test/endtoend/mysqlserver/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ END;
)

func TestMain(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()

// setting grpc max size
Expand Down
Loading
Loading