From 4808c2ac923ccde7076345c5e52acef970f4e3a8 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Fri, 16 Feb 2024 14:37:13 -0500 Subject: [PATCH 01/17] Rewrite _many_ tests to use vtctldclient invocations, mostly non-output related stuff Signed-off-by: Andrew Mason --- .../backup/vtbackup/backup_only_test.go | 8 +- .../backup/vtctlbackup/backup_utils.go | 99 +++++++++---------- go/test/endtoend/cellalias/cell_alias_test.go | 24 ++--- go/test/endtoend/cluster/cluster_process.go | 12 +-- go/test/endtoend/cluster/cluster_util.go | 4 +- .../endtoend/cluster/vtctldclient_process.go | 76 ++++++++++++++ go/test/endtoend/clustertest/vttablet_test.go | 2 +- .../encrypted_replication_test.go | 2 +- go/test/endtoend/keyspace/keyspace_test.go | 39 ++++---- go/test/endtoend/messaging/message_test.go | 20 ++-- go/test/endtoend/migration/migration_test.go | 4 +- go/test/endtoend/mysqlctl/mysqlctl_test.go | 2 +- go/test/endtoend/mysqlctld/mysqlctld_test.go | 2 +- .../onlineddl/ghost/onlineddl_ghost_test.go | 4 +- .../onlineddl/revert/onlineddl_revert_test.go | 2 +- .../scheduler/onlineddl_scheduler_test.go | 14 +-- .../onlineddl/vrepl/onlineddl_vrepl_test.go | 14 +-- .../onlineddl_vrepl_mini_stress_test.go | 6 +- .../onlineddl_vrepl_stress_suite_test.go | 6 +- go/test/endtoend/onlineddl/vtctlutil.go | 4 +- .../recovery/pitr/shardedpitr_test.go | 36 +++---- .../recovery/unshardedrecovery/recovery.go | 16 +-- .../reparent/emergencyreparent/ers_test.go | 32 +++--- 23 files changed, 250 insertions(+), 178 deletions(-) diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go index 69c12d4376c..f5d96580bb2 100644 --- a/go/test/endtoend/backup/vtbackup/backup_only_test.go +++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go @@ -83,10 +83,10 @@ func TestTabletInitialBackup(t *testing.T) { restore(t, primary, "replica", "NOT_SERVING") // Vitess expects that the user has set the database into ReadWrite mode before calling // TabletExternallyReparented - err = localCluster.VtctlclientProcess.ExecuteCommand( + err = localCluster.VtctldClientProcess.ExecuteCommand( "SetReadWrite", primary.Alias) require.Nil(t, err) - err = localCluster.VtctlclientProcess.ExecuteCommand( + err = localCluster.VtctldClientProcess.ExecuteCommand( "TabletExternallyReparented", primary.Alias) require.Nil(t, err) restore(t, replica1, "replica", "SERVING") @@ -277,7 +277,7 @@ func initTablets(t *testing.T, startTablet bool, initShardPrimary bool) { if initShardPrimary { // choose primary and start replication - err := localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) + err := localCluster.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) require.Nil(t, err) } } @@ -339,7 +339,7 @@ func tearDown(t *testing.T, initMysql bool) { for _, tablet := range []cluster.Vttablet{*primary, *replica1, *replica2} { resetTabletDirectory(t, tablet, initMysql) // DeleteTablet on a primary will cause tablet to shutdown, so should only call it after tablet is already shut down - err := localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary", tablet.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", tablet.Alias) require.Nil(t, err) } } diff --git a/go/test/endtoend/backup/vtctlbackup/backup_utils.go b/go/test/endtoend/backup/vtctlbackup/backup_utils.go index a70d1804028..f03a5937b37 100644 --- a/go/test/endtoend/backup/vtctlbackup/backup_utils.go +++ b/go/test/endtoend/backup/vtctlbackup/backup_utils.go @@ -257,7 +257,7 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp return replica3, nil } - if err := localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil { + if err := localCluster.VtctldClientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil { return 1, err } @@ -455,7 +455,7 @@ func primaryBackup(t *testing.T) { localCluster.VerifyBackupCount(t, shardKsName, 0) - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", "--", "--allow_primary=true", primary.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", primary.Alias) require.Nil(t, err) // We'll restore this on the primary later to test restores using a backup timestamp @@ -475,7 +475,7 @@ func primaryBackup(t *testing.T) { // And only 1 record after we restore using the first backup timestamp cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", "--", "--allow_primary=true", primary.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", primary.Alias) require.Nil(t, err) backups = localCluster.VerifyBackupCount(t, shardKsName, 2) @@ -485,26 +485,25 @@ func primaryBackup(t *testing.T) { // Perform PRS to demote the primary tablet (primary) so that we can do a restore there and verify we don't have the // data from after the older/first backup - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica2.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica2.Alias, shardKsName) require.Nil(t, err) // Delete the current primary tablet (replica2) so that the original primary tablet (primary) can be restored from the // older/first backup w/o it replicating the subsequent insert done after the first backup was taken - err = localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary=true", replica2.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", replica2.Alias) require.Nil(t, err) err = replica2.VttabletProcess.TearDown() require.Nil(t, err) // Restore the older/first backup -- using the timestamp we saved -- on the original primary tablet (primary) - err = localCluster.VtctlclientProcess.ExecuteCommand("RestoreFromBackup", "--", "--backup_timestamp", firstBackupTimestamp, primary.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", "--backup-timestamp", firstBackupTimestamp, primary.Alias) require.Nil(t, err) verifyTabletRestoreStats(t, primary.VttabletProcess.GetVars()) // Re-init the shard -- making the original primary tablet (primary) primary again -- for subsequent tests - err = localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) + err = localCluster.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) require.Nil(t, err) // Verify that we don't have the record created after the older/first backup @@ -526,7 +525,7 @@ func primaryReplicaSameBackup(t *testing.T) { verifyInitialReplication(t) // backup the replica - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) @@ -544,9 +543,8 @@ func primaryReplicaSameBackup(t *testing.T) { cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) // Promote replica2 to primary - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica2.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica2.Alias, shardKsName) require.Nil(t, err) // insert more data on replica2 (current primary) @@ -564,7 +562,7 @@ func primaryReplicaSameBackup(t *testing.T) { // It is written into the MANIFEST and read back from the MANIFEST. // // Take another backup on the replica. - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) // Insert more data on replica2 (current primary). @@ -594,7 +592,7 @@ func primaryReplicaSameBackupModifiedCompressionEngine(t *testing.T) { time.Sleep(5 * time.Second) // backup the replica - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) @@ -619,9 +617,8 @@ func primaryReplicaSameBackupModifiedCompressionEngine(t *testing.T) { cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) // Promote replica2 to primary - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica2.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica2.Alias, shardKsName) require.Nil(t, err) // insert more data on replica2 (current primary) @@ -635,9 +632,8 @@ func primaryReplicaSameBackupModifiedCompressionEngine(t *testing.T) { cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 3) // Promote replica1 to primary - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica1.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica1.Alias, shardKsName) require.Nil(t, err) // Insert more data on replica1 (current primary). @@ -648,7 +644,7 @@ func primaryReplicaSameBackupModifiedCompressionEngine(t *testing.T) { cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 4) // Now take replica2 backup with gzip (new compressor) - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica2.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica2.Alias) require.Nil(t, err) verifyTabletBackupStats(t, replica2.VttabletProcess.GetVars()) @@ -688,7 +684,7 @@ func testRestoreOldPrimary(t *testing.T, method restoreMethod) { time.Sleep(5 * time.Second) // backup the replica - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) @@ -698,9 +694,8 @@ func testRestoreOldPrimary(t *testing.T, method restoreMethod) { require.Nil(t, err) // reparent to replica1 - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica1.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica1.Alias, shardKsName) require.Nil(t, err) // insert more data to new primary @@ -726,7 +721,7 @@ func restoreUsingRestart(t *testing.T, tablet *cluster.Vttablet) { } func restoreInPlace(t *testing.T, tablet *cluster.Vttablet) { - err := localCluster.VtctlclientProcess.ExecuteCommand("RestoreFromBackup", tablet.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", tablet.Alias) require.Nil(t, err) } @@ -756,7 +751,7 @@ func restartPrimaryAndReplica(t *testing.T) { err = tablet.VttabletProcess.Setup() require.Nil(t, err) } - err := localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) + err := localCluster.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) require.Nil(t, err) } @@ -766,12 +761,12 @@ func stopAllTablets() { tablet.VttabletProcess.TearDown() if tablet.MysqlctldProcess.TabletUID > 0 { tablet.MysqlctldProcess.Stop() - localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary", tablet.Alias) + localCluster.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", tablet.Alias) continue } proc, _ := tablet.MysqlctlProcess.StopProcess() mysqlProcs = append(mysqlProcs, proc) - localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary", tablet.Alias) + localCluster.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", tablet.Alias) } for _, proc := range mysqlProcs { proc.Wait() @@ -796,7 +791,7 @@ func terminatedRestore(t *testing.T) { checkTabletType(t, replica1.Alias, topodata.TabletType_REPLICA) // backup the replica - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) checkTabletType(t, replica1.Alias, topodata.TabletType_REPLICA) @@ -807,9 +802,8 @@ func terminatedRestore(t *testing.T) { require.Nil(t, err) // reparent to replica1 - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica1.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica1.Alias, shardKsName) require.Nil(t, err) // insert more data to new primary @@ -821,7 +815,7 @@ func terminatedRestore(t *testing.T) { // If restore fails then the tablet type goes back to original type. checkTabletType(t, primary.Alias, topodata.TabletType_REPLICA) - err = localCluster.VtctlclientProcess.ExecuteCommand("RestoreFromBackup", primary.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", primary.Alias) require.Nil(t, err) checkTabletType(t, primary.Alias, topodata.TabletType_REPLICA) @@ -863,7 +857,7 @@ func doNotDemoteNewlyPromotedPrimaryIfReparentingDuringBackup(t *testing.T) { checkTabletType(t, primary.Alias, topodata.TabletType_PRIMARY) // now backup - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) }() @@ -874,10 +868,11 @@ func doNotDemoteNewlyPromotedPrimaryIfReparentingDuringBackup(t *testing.T) { checkTabletType(t, primary.Alias, topodata.TabletType_PRIMARY) // now reparent - _, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput( - "PlannedReparentShard", "--", - "--keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, shardName), - "--new_primary", replica1.Alias) + _, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput( + "PlannedReparentShard", + "--new-primary", replica1.Alias, + fmt.Sprintf("%s/%s", keyspaceName, shardName), + ) require.Nil(t, err) // check that we reparented @@ -906,13 +901,13 @@ func doNotDemoteNewlyPromotedPrimaryIfReparentingDuringBackup(t *testing.T) { func vtctlBackup(t *testing.T, tabletType string) { // StopReplication on replica1. We verify that the replication works fine later in // verifyInitialReplication. So this will also check that VTOrc is running. - err := localCluster.VtctlclientProcess.ExecuteCommand("StopReplication", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("StopReplication", replica1.Alias) require.Nil(t, err) verifyInitialReplication(t) restoreWaitForBackup(t, tabletType, nil, true) - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) backups := localCluster.VerifyBackupCount(t, shardKsName, 1) @@ -930,7 +925,7 @@ func vtctlBackup(t *testing.T, tabletType string) { err = replica2.VttabletProcess.TearDown() require.Nil(t, err) - err = localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", replica2.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("DeleteTablets", replica2.Alias) require.Nil(t, err) _, err = primary.VttabletProcess.QueryTablet("DROP TABLE vt_insert_test", keyspaceName, true) require.Nil(t, err) @@ -976,7 +971,7 @@ func restoreWaitForBackup(t *testing.T, tabletType string, cDetails *Compression } func RemoveBackup(t *testing.T, backupName string) { - err := localCluster.VtctlclientProcess.ExecuteCommand("RemoveBackup", shardKsName, backupName) + err := localCluster.VtctldClientProcess.ExecuteCommand("RemoveBackup", shardKsName, backupName) require.Nil(t, err) } @@ -1002,9 +997,9 @@ func verifyRestoreTablet(t *testing.T, tablet *cluster.Vttablet, status string) // We restart replication here because semi-sync will not be set correctly on tablet startup since // we deprecated enable_semi_sync. StartReplication RPC fixes the semi-sync settings by consulting the // durability policies set. - err = localCluster.VtctlclientProcess.ExecuteCommand("StopReplication", tablet.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("StopReplication", tablet.Alias) require.NoError(t, err) - err = localCluster.VtctlclientProcess.ExecuteCommand("StartReplication", tablet.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("StartReplication", tablet.Alias) require.NoError(t, err) if tablet.Type == "replica" { @@ -1033,9 +1028,9 @@ func terminateBackup(t *testing.T, alias string) { }() } - args := append([]string{"--server", localCluster.VtctlclientProcess.Server, "--alsologtostderr"}, "Backup", "--", alias) + args := append([]string{"--server", localCluster.VtctldClientProcess.Server, "--alsologtostderr"}, "Backup", alias) tmpProcess := exec.Command( - "vtctlclient", + "vtctldclient", args..., ) @@ -1067,9 +1062,9 @@ func terminateRestore(t *testing.T) { }() } - args := append([]string{"--server", localCluster.VtctlclientProcess.Server, "--alsologtostderr"}, "RestoreFromBackup", "--", primary.Alias) + args := append([]string{"--server", localCluster.VtctldClientProcess.Server, "--alsologtostderr"}, "RestoreFromBackup", primary.Alias) tmpProcess := exec.Command( - "vtctlclient", + "vtctldclient", args..., ) @@ -1289,7 +1284,7 @@ func TestReplicaIncrementalBackup(t *testing.T, replicaIndex int, incrementalFro func TestReplicaFullRestore(t *testing.T, replicaIndex int, expectError string) { replica := getReplica(t, replicaIndex) - output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("RestoreFromBackup", replica.Alias) + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("RestoreFromBackup", replica.Alias) if expectError != "" { require.Errorf(t, err, "expected: %v", expectError) require.Contains(t, output, expectError) @@ -1304,7 +1299,7 @@ func TestReplicaRestoreToPos(t *testing.T, replicaIndex int, restoreToPos replic require.False(t, restoreToPos.IsZero()) restoreToPosArg := replication.EncodePosition(restoreToPos) - output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("RestoreFromBackup", "--", "--restore_to_pos", restoreToPosArg, replica.Alias) + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("RestoreFromBackup", "--restore-to-pos", restoreToPosArg, replica.Alias) if expectError != "" { require.Errorf(t, err, "expected: %v", expectError) require.Contains(t, output, expectError) diff --git a/go/test/endtoend/cellalias/cell_alias_test.go b/go/test/endtoend/cellalias/cell_alias_test.go index d357331d8cd..07e8d687f4e 100644 --- a/go/test/endtoend/cellalias/cell_alias_test.go +++ b/go/test/endtoend/cellalias/cell_alias_test.go @@ -186,14 +186,14 @@ func TestMain(m *testing.M) { return 1, err } } - if err := localCluster.VtctlclientProcess.InitializeShard(keyspaceName, shard1.Name, shard1Primary.Cell, shard1Primary.TabletUID); err != nil { + if err := localCluster.VtctldClientProcess.InitializeShard(keyspaceName, shard1.Name, shard1Primary.Cell, shard1Primary.TabletUID); err != nil { return 1, err } // run a health check on source replica so it responds to discovery // (for binlog players) and on the source rdonlys (for workers) for _, tablet := range []string{shard1Replica.Alias, shard1Rdonly.Alias} { - if err := localCluster.VtctlclientProcess.ExecuteCommand("RunHealthCheck", tablet); err != nil { + if err := localCluster.VtctldClientProcess.ExecuteCommand("RunHealthCheck", tablet); err != nil { return 1, err } } @@ -204,7 +204,7 @@ func TestMain(m *testing.M) { } } - if err := localCluster.VtctlclientProcess.InitializeShard(keyspaceName, shard2.Name, shard2Primary.Cell, shard2Primary.TabletUID); err != nil { + if err := localCluster.VtctldClientProcess.InitializeShard(keyspaceName, shard2.Name, shard2Primary.Cell, shard2Primary.TabletUID); err != nil { return 1, err } @@ -212,14 +212,14 @@ func TestMain(m *testing.M) { return 1, err } - if err := localCluster.VtctlclientProcess.ApplySchema(keyspaceName, fmt.Sprintf(sqlSchema, tableName)); err != nil { + if err := localCluster.VtctldClientProcess.ApplySchema(keyspaceName, fmt.Sprintf(sqlSchema, tableName)); err != nil { return 1, err } - if err := localCluster.VtctlclientProcess.ApplyVSchema(keyspaceName, fmt.Sprintf(vSchema, tableName)); err != nil { + if err := localCluster.VtctldClientProcess.ApplyVSchema(keyspaceName, fmt.Sprintf(vSchema, tableName)); err != nil { return 1, err } - _ = localCluster.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) + _ = localCluster.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) return m.Run(), nil }() @@ -237,7 +237,7 @@ func TestAlias(t *testing.T) { insertInitialValues(t) defer deleteInitialValues(t) - err := localCluster.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) + err := localCluster.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) require.NoError(t, err) shard1 := localCluster.Keyspaces[0].Shards[0] shard2 := localCluster.Keyspaces[0].Shards[1] @@ -251,11 +251,11 @@ func TestAlias(t *testing.T) { cluster.CheckSrvKeyspace(t, cell2, keyspaceName, expectedPartitions, *localCluster) // Adds alias so vtgate can route to replica/rdonly tablets that are not in the same cell, but same alias - err = localCluster.VtctlclientProcess.ExecuteCommand("AddCellsAlias", "--", + err = localCluster.VtctldClientProcess.ExecuteCommand("AddCellsAlias", "--cells", allCells, "region_east_coast") require.NoError(t, err) - err = localCluster.VtctlclientProcess.ExecuteCommand("UpdateCellsAlias", "--", + err = localCluster.VtctldClientProcess.ExecuteCommand("UpdateCellsAlias", "--cells", allCells, "region_east_coast") require.NoError(t, err) @@ -277,7 +277,7 @@ func TestAlias(t *testing.T) { testQueriesOnTabletType(t, "rdonly", vtgateInstance.GrpcPort, false) // now, delete the alias, so that if we run above assertions again, it will fail for replica,rdonly target type - err = localCluster.VtctlclientProcess.ExecuteCommand("DeleteCellsAlias", + err = localCluster.VtctldClientProcess.ExecuteCommand("DeleteCellsAlias", "region_east_coast") require.NoError(t, err) @@ -301,7 +301,7 @@ func TestAddAliasWhileVtgateUp(t *testing.T) { insertInitialValues(t) defer deleteInitialValues(t) - err := localCluster.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) + err := localCluster.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) require.NoError(t, err) shard1 := localCluster.Keyspaces[0].Shards[0] shard2 := localCluster.Keyspaces[0].Shards[1] @@ -328,7 +328,7 @@ func TestAddAliasWhileVtgateUp(t *testing.T) { testQueriesOnTabletType(t, "rdonly", vtgateInstance.GrpcPort, true) // Adds alias so vtgate can route to replica/rdonly tablets that are not in the same cell, but same alias - err = localCluster.VtctlclientProcess.ExecuteCommand("AddCellsAlias", "--", + err = localCluster.VtctldClientProcess.ExecuteCommand("AddCellsAlias", "--cells", allCells, "region_east_coast") require.NoError(t, err) diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go index a9cc482b9e3..98218bcf3fb 100644 --- a/go/test/endtoend/cluster/cluster_process.go +++ b/go/test/endtoend/cluster/cluster_process.go @@ -421,7 +421,7 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames } // Make first tablet as primary - if err = cluster.VtctlclientProcess.InitializeShard(keyspace.Name, shardName, cluster.Cell, shard.Vttablets[0].TabletUID); err != nil { + if err = cluster.VtctldClientProcess.InitializeShard(keyspace.Name, shardName, cluster.Cell, shard.Vttablets[0].TabletUID); err != nil { log.Errorf("error running InitializeShard on keyspace %v, shard %v: %v", keyspace.Name, shardName, err) return } @@ -441,7 +441,7 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames // Apply Schema SQL if keyspace.SchemaSQL != "" { - if err = cluster.VtctlclientProcess.ApplySchema(keyspace.Name, keyspace.SchemaSQL); err != nil { + if err = cluster.VtctldClientProcess.ApplySchema(keyspace.Name, keyspace.SchemaSQL); err != nil { log.Errorf("error applying schema: %v, %v", keyspace.SchemaSQL, err) return } @@ -449,7 +449,7 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames // Apply VSchema if keyspace.VSchema != "" { - if err = cluster.VtctlclientProcess.ApplyVSchema(keyspace.Name, keyspace.VSchema); err != nil { + if err = cluster.VtctldClientProcess.ApplyVSchema(keyspace.Name, keyspace.VSchema); err != nil { log.Errorf("error applying vschema: %v, %v", keyspace.VSchema, err) return } @@ -581,7 +581,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard } // Make first tablet as primary - if err = cluster.VtctlclientProcess.InitShardPrimary(keyspace.Name, shardName, cluster.Cell, shard.Vttablets[0].TabletUID); err != nil { + if err = cluster.VtctldClientProcess.InitShardPrimary(keyspace.Name, shardName, cluster.Cell, shard.Vttablets[0].TabletUID); err != nil { log.Errorf("error running ISM on keyspace %v, shard %v: %v", keyspace.Name, shardName, err) return } @@ -601,7 +601,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard // Apply Schema SQL if keyspace.SchemaSQL != "" { - if err = cluster.VtctlclientProcess.ApplySchema(keyspace.Name, keyspace.SchemaSQL); err != nil { + if err = cluster.VtctldClientProcess.ApplySchema(keyspace.Name, keyspace.SchemaSQL); err != nil { log.Errorf("error applying schema: %v, %v", keyspace.SchemaSQL, err) return } @@ -609,7 +609,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard // Apply VSchema if keyspace.VSchema != "" { - if err = cluster.VtctlclientProcess.ApplyVSchema(keyspace.Name, keyspace.VSchema); err != nil { + if err = cluster.VtctldClientProcess.ApplyVSchema(keyspace.Name, keyspace.VSchema); err != nil { log.Errorf("error applying vschema: %v, %v", keyspace.VSchema, err) return } diff --git a/go/test/endtoend/cluster/cluster_util.go b/go/test/endtoend/cluster/cluster_util.go index a35c3bf3769..9fcefba3892 100644 --- a/go/test/endtoend/cluster/cluster_util.go +++ b/go/test/endtoend/cluster/cluster_util.go @@ -138,7 +138,7 @@ func PanicHandler(t testing.TB) { // ListBackups Lists back preset in shard func (cluster LocalProcessCluster) ListBackups(shardKsName string) ([]string, error) { - output, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("ListBackups", shardKsName) + output, err := cluster.VtctldClientProcess.ExecuteCommandWithOutput("GetBackups", shardKsName) if err != nil { return nil, err } @@ -165,7 +165,7 @@ func (cluster LocalProcessCluster) RemoveAllBackups(t *testing.T, shardKsName st backups, err := cluster.ListBackups(shardKsName) require.Nil(t, err) for _, backup := range backups { - cluster.VtctlclientProcess.ExecuteCommand("RemoveBackup", shardKsName, backup) + cluster.VtctldClientProcess.ExecuteCommand("RemoveBackup", shardKsName, backup) } } diff --git a/go/test/endtoend/cluster/vtctldclient_process.go b/go/test/endtoend/cluster/vtctldclient_process.go index 52e0f985680..29b0568aa06 100644 --- a/go/test/endtoend/cluster/vtctldclient_process.go +++ b/go/test/endtoend/cluster/vtctldclient_process.go @@ -23,6 +23,7 @@ import ( "time" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vterrors" ) // VtctldClientProcess is a generic handle for a running vtctldclient command . @@ -93,6 +94,55 @@ func VtctldClientProcessInstance(hostname string, grpcPort int, tmpDirectory str return vtctldclient } +type ApplySchemaParams struct { + DDLStrategy string + MigrationContext string + UUIDList string + CallerID string + BatchSize int +} + +// ApplySchemaWithOutput applies SQL schema to the keyspace +func (vtctldclient *VtctldClientProcess) ApplySchemaWithOutput(keyspace string, sql string, params ApplySchemaParams) (result string, err error) { + args := []string{ + "ApplySchema", + "--sql", sql, + } + if params.MigrationContext != "" { + args = append(args, "--migration-context", params.MigrationContext) + } + if params.DDLStrategy != "" { + args = append(args, "--ddl-strategy", params.DDLStrategy) + } + if params.UUIDList != "" { + args = append(args, "--uuid-list", params.UUIDList) + } + if params.BatchSize > 0 { + args = append(args, "--batch-size", fmt.Sprintf("%d", params.BatchSize)) + } + if params.CallerID != "" { + args = append(args, "--caller-id", params.CallerID) + } + args = append(args, keyspace) + return vtctldclient.ExecuteCommandWithOutput(args...) +} + +// ApplySchema applies SQL schema to the keyspace +func (vtctldclient *VtctldClientProcess) ApplySchema(keyspace string, sql string) error { + message, err := vtctldclient.ApplySchemaWithOutput(keyspace, sql, ApplySchemaParams{DDLStrategy: "direct -allow-zero-in-date"}) + + return vterrors.Wrap(err, message) +} + +// ApplyVSchema applies vitess schema (JSON format) to the keyspace +func (vtctldclient *VtctldClientProcess) ApplyVSchema(keyspace string, json string) (err error) { + return vtctldclient.ExecuteCommand( + "ApplyVSchema", + "--vschema", json, + keyspace, + ) +} + // PlannedReparentShard executes vtctlclient command to make specified tablet the primary for the shard. func (vtctldclient *VtctldClientProcess) PlannedReparentShard(Keyspace string, Shard string, alias string) (err error) { output, err := vtctldclient.ExecuteCommandWithOutput( @@ -105,6 +155,32 @@ func (vtctldclient *VtctldClientProcess) PlannedReparentShard(Keyspace string, S return err } +// InitializeShard executes vtctldclient command to make specified tablet the primary for the shard. +func (vtctldclient *VtctldClientProcess) InitializeShard(keyspace string, shard string, cell string, uid int) error { + output, err := vtctldclient.ExecuteCommandWithOutput( + "PlannedReparentShard", + fmt.Sprintf("%s/%s", keyspace, shard), + "--wait-replicas-timeout", "31s", + "--new-primary", fmt.Sprintf("%s-%d", cell, uid)) + if err != nil { + log.Errorf("error in PlannedReparentShard output %s, err %s", output, err.Error()) + } + return err +} + +// InitShardPrimary executes vtctldclient command to make specified tablet the primary for the shard. +func (vtctldclient *VtctldClientProcess) InitShardPrimary(keyspace string, shard string, cell string, uid int) error { + output, err := vtctldclient.ExecuteCommandWithOutput( + "InitShardPrimary", + "--force", "--wait-replicas-timeout", "31s", + fmt.Sprintf("%s/%s", keyspace, shard), + fmt.Sprintf("%s-%d", cell, uid)) + if err != nil { + log.Errorf("error in InitShardPrimary output %s, err %s", output, err.Error()) + } + return err +} + // CreateKeyspace executes the vtctl command to create a keyspace func (vtctldclient *VtctldClientProcess) CreateKeyspace(keyspaceName string, sidecarDBName string) (err error) { var output string diff --git a/go/test/endtoend/clustertest/vttablet_test.go b/go/test/endtoend/clustertest/vttablet_test.go index 369deb18cfd..5e7d5e27182 100644 --- a/go/test/endtoend/clustertest/vttablet_test.go +++ b/go/test/endtoend/clustertest/vttablet_test.go @@ -51,6 +51,6 @@ func TestDeleteTablet(t *testing.T) { defer cluster.PanicHandler(t) primary := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() require.NotNil(t, primary) - _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("DeleteTablet", "--", "--allow_primary", primary.Alias) + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("DeleteTablets", "--allow-primary", primary.Alias) require.NoError(t, err) } diff --git a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go index 725659a5ee1..4c759ff577a 100644 --- a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go +++ b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go @@ -74,7 +74,7 @@ func testReplicationBase(t *testing.T, isClientCertPassed bool) { } // Reparent using SSL (this will also check replication works) - err = clusterInstance.VtctlclientProcess.InitializeShard(keyspace, shardName, clusterInstance.Cell, primaryTablet.TabletUID) + err = clusterInstance.VtctldClientProcess.InitializeShard(keyspace, shardName, clusterInstance.Cell, primaryTablet.TabletUID) if isClientCertPassed { require.NoError(t, err) } else { diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index 338ad5c8cd2..8ac7759feb7 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -24,6 +24,7 @@ import ( "strings" "testing" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/vt/key" "github.com/stretchr/testify/assert" @@ -109,7 +110,7 @@ func TestMain(m *testing.M) { if err := clusterForKSTest.StartKeyspace(*keyspaceSharded, []string{"-80", "80-"}, 1, false); err != nil { return 1 } - if err := clusterForKSTest.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceShardedName); err != nil { + if err := clusterForKSTest.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceShardedName); err != nil { return 1 } @@ -121,7 +122,7 @@ func TestMain(m *testing.M) { if err := clusterForKSTest.StartKeyspace(*keyspaceUnsharded, []string{keyspaceUnshardedName}, 1, false); err != nil { return 1 } - if err := clusterForKSTest.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceUnshardedName); err != nil { + if err := clusterForKSTest.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceUnshardedName); err != nil { return 1 } @@ -228,47 +229,47 @@ func TestGetKeyspace(t *testing.T) { func TestDeleteKeyspace(t *testing.T) { defer cluster.PanicHandler(t) - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateKeyspace", "test_delete_keyspace") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") + _ = clusterForKSTest.VtctldClientProcess.CreateKeyspace("test_delete_keyspace", sidecar.DefaultName) + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--", "--keyspace=test_delete_keyspace", "--shard=0", "zone1-0000000100", "primary") // Can't delete keyspace if there are shards present. - err := clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteKeyspace", "test_delete_keyspace") + err := clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteKeyspace", "test_delete_keyspace") require.Error(t, err) // Can't delete shard if there are tablets present. - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteShard", "--", "--even_if_serving", "test_delete_keyspace/0") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteShards", "--even-if-serving", "test_delete_keyspace/0") require.Error(t, err) // Use recursive DeleteShard to remove tablets. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteShard", "--", "--even_if_serving", "--recursive", "test_delete_keyspace/0") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteShards", "--even-if-serving", "--recursive", "test_delete_keyspace/0") // Now non-recursive DeleteKeyspace should work. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteKeyspace", "test_delete_keyspace") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteKeyspace", "test_delete_keyspace") // Start over and this time use recursive DeleteKeyspace to do everything. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateKeyspace", "test_delete_keyspace") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") + _ = clusterForKSTest.VtctldClientProcess.CreateKeyspace("test_delete_keyspace", sidecar.DefaultName) + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--", "--port=1234", "--bind-address=127.0.0.1", "--keyspace=test_delete_keyspace", "--shard=0", "zone1-0000000100", "primary") // Create the serving/replication entries and check that they exist, // so we can later check they're deleted. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "test_delete_keyspace") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShardReplication", cell, "test_delete_keyspace/0") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetSrvKeyspace", cell, "test_delete_keyspace") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", "test_delete_keyspace") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetShardReplication", cell, "test_delete_keyspace/0") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetSrvKeyspace", cell, "test_delete_keyspace") // Recursive DeleteKeyspace - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteKeyspace", "--", "--recursive", "test_delete_keyspace") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteKeyspace", "--recursive", "test_delete_keyspace") // Check that everything is gone. - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetKeyspace", "test_delete_keyspace") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetKeyspace", "test_delete_keyspace") require.Error(t, err) - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShard", "test_delete_keyspace/0") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetShard", "test_delete_keyspace/0") require.Error(t, err) - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetTablet", "zone1-0000000100") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetTablet", "zone1-0000000100") require.Error(t, err) - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShardReplication", cell, "test_delete_keyspace/0") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetShardReplication", cell, "test_delete_keyspace/0") require.Error(t, err) - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetSrvKeyspace", cell, "test_delete_keyspace") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetSrvKeyspace", cell, "test_delete_keyspace") require.Error(t, err) } diff --git a/go/test/endtoend/messaging/message_test.go b/go/test/endtoend/messaging/message_test.go index 3082f295055..7e1190c16bb 100644 --- a/go/test/endtoend/messaging/message_test.go +++ b/go/test/endtoend/messaging/message_test.go @@ -393,12 +393,12 @@ func TestReparenting(t *testing.T) { // do planned reparenting, make one replica as primary // and validate client connection count in correspond tablets - clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( - "PlannedReparentShard", "--", - "--keyspace_shard", userKeyspace+"/-80", - "--new_primary", shard0Replica.Alias) + clusterInstance.VtctldClientProcess.ExecuteCommand( + "PlannedReparentShard", + userKeyspace+"/-80", + "--new-primary", shard0Replica.Alias) // validate topology - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate") require.Nil(t, err) // Verify connection has migrated. @@ -417,12 +417,12 @@ func TestReparenting(t *testing.T) { stream.Next() // make old primary again as new primary - clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( - "PlannedReparentShard", "--", - "--keyspace_shard", userKeyspace+"/-80", - "--new_primary", shard0Primary.Alias) + clusterInstance.VtctldClientProcess.ExecuteCommand( + "PlannedReparentShard", + userKeyspace+"/-80", + "--new-primary", shard0Primary.Alias) // validate topology - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate") require.Nil(t, err) time.Sleep(10 * time.Second) assertClientCount(t, 1, shard0Primary) diff --git a/go/test/endtoend/migration/migration_test.go b/go/test/endtoend/migration/migration_test.go index f0b91e2d6df..eca112e388d 100644 --- a/go/test/endtoend/migration/migration_test.go +++ b/go/test/endtoend/migration/migration_test.go @@ -145,7 +145,7 @@ func TestMigration(t *testing.T) { vt.ExtraArgs = append(vt.ExtraArgs, "--tablet_config", yamlFile) } createKeyspace(t, commerce, []string{"0"}, tabletConfig) - err := clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "commerce") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", "commerce") require.NoError(t, err) err = clusterInstance.StartVtgate() @@ -221,7 +221,7 @@ func migrate(t *testing.T, fromdb, toks string, tables []string) { "('%s', '%s', %s, '', 9999, 9999, 'primary', 0, 0, 'Running')", tables[0], "vt_"+toks, sqlEscaped.String()) fmt.Printf("VReplicationExec: %s\n", query) vttablet := keyspaces[toks].Shards[0].Vttablets[0].VttabletProcess - err := clusterInstance.VtctlclientProcess.ExecuteCommand("VReplicationExec", vttablet.TabletPath, query) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("VReplicationExec", vttablet.TabletPath, query) require.NoError(t, err) } diff --git a/go/test/endtoend/mysqlctl/mysqlctl_test.go b/go/test/endtoend/mysqlctl/mysqlctl_test.go index 3b28c5bcf30..bdea4d3988c 100644 --- a/go/test/endtoend/mysqlctl/mysqlctl_test.go +++ b/go/test/endtoend/mysqlctl/mysqlctl_test.go @@ -156,6 +156,6 @@ func TestAutoDetect(t *testing.T) { require.Nil(t, err, "error should be nil") // Reparent tablets, which requires flavor detection - err = clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shardName, cell, primaryTablet.TabletUID) + err = clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shardName, cell, primaryTablet.TabletUID) require.Nil(t, err, "error should be nil") } diff --git a/go/test/endtoend/mysqlctld/mysqlctld_test.go b/go/test/endtoend/mysqlctld/mysqlctld_test.go index e1577acfc52..52be2fa4323 100644 --- a/go/test/endtoend/mysqlctld/mysqlctld_test.go +++ b/go/test/endtoend/mysqlctld/mysqlctld_test.go @@ -159,7 +159,7 @@ func TestAutoDetect(t *testing.T) { require.Nil(t, err, "error should be nil") // Reparent tablets, which requires flavor detection - err = clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shardName, cell, primaryTablet.TabletUID) + err = clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shardName, cell, primaryTablet.TabletUID) require.Nil(t, err, "error should be nil") } diff --git a/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go b/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go index 3dc635c8870..41a9a80086b 100644 --- a/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go +++ b/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go @@ -370,7 +370,7 @@ func testWithInitialSchema(t *testing.T) { for i := 0; i < totalTableCount; i++ { tableName := fmt.Sprintf("vt_onlineddl_test_%02d", i) sqlQuery = fmt.Sprintf(createTable, tableName) - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sqlQuery) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, sqlQuery) require.Nil(t, err) for _, insert := range insertStatements { @@ -395,7 +395,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, cluster.VtctlClientParams{DDLStrategy: ddlStrategy, CallerID: callerID}) + uuid, err = clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, CallerID: callerID}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) diff --git a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go index d4517e67aff..672e79c0985 100644 --- a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go +++ b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go @@ -1194,7 +1194,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) + uuid, err = clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.ApplySchemaParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go index 3c2c25ec8b5..341787552a0 100644 --- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go +++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go @@ -82,7 +82,7 @@ var ( keyspaceName = "ks" cell = "zone1" schemaChangeDirectory = "" - overrideVtctlParams *cluster.VtctlClientParams + overrideVtctlParams *cluster.ApplySchemaParams ) type WriteMetrics struct { @@ -949,7 +949,7 @@ func testScheduler(t *testing.T) { t.Run("Idempotent submission, retry failed migration", func(t *testing.T) { uuid := "00000000_1111_2222_3333_444444444444" - overrideVtctlParams = &cluster.VtctlClientParams{DDLStrategy: ddlStrategy, UUIDList: uuid, MigrationContext: "idempotent:1111-2222-3333"} + overrideVtctlParams = &cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, UUIDList: uuid, MigrationContext: "idempotent:1111-2222-3333"} defer func() { overrideVtctlParams = nil }() // create a migration and cancel it. We don't let it complete. We want it in "failed" state t.Run("start and fail migration", func(t *testing.T) { @@ -985,7 +985,7 @@ func testScheduler(t *testing.T) { t.Run("Idempotent submission, retry failed migration in singleton context", func(t *testing.T) { uuid := "00000000_1111_3333_3333_444444444444" ddlStrategy := ddlStrategy + " --singleton-context" - overrideVtctlParams = &cluster.VtctlClientParams{DDLStrategy: ddlStrategy, UUIDList: uuid, MigrationContext: "idempotent:1111-3333-3333"} + overrideVtctlParams = &cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, UUIDList: uuid, MigrationContext: "idempotent:1111-3333-3333"} defer func() { overrideVtctlParams = nil }() // create a migration and cancel it. We don't let it complete. We want it in "failed" state t.Run("start and fail migration", func(t *testing.T) { @@ -2391,7 +2391,7 @@ func testForeignKeys(t *testing.T) { continue } statement := fmt.Sprintf("DROP TABLE IF EXISTS %s", artifact) - _, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, statement, cluster.VtctlClientParams{DDLStrategy: "direct"}) + _, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, statement, cluster.ApplySchemaParams{DDLStrategy: "direct"}) if err == nil { droppedTables[artifact] = true } @@ -2423,11 +2423,11 @@ func testOnlineDDLStatement(t *testing.T, params *testOnlineDDLStatementParams) } } } else { - vtctlParams := &cluster.VtctlClientParams{DDLStrategy: params.ddlStrategy, MigrationContext: params.migrationContext} + vtctlParams := &cluster.ApplySchemaParams{DDLStrategy: params.ddlStrategy, MigrationContext: params.migrationContext} if overrideVtctlParams != nil { vtctlParams = overrideVtctlParams } - output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, params.ddlStatement, *vtctlParams) + output, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, params.ddlStatement, *vtctlParams) switch params.expectError { case anyErrorIndicator: if err != nil { @@ -2472,7 +2472,7 @@ func testRevertMigration(t *testing.T, params *testRevertMigrationParams) (uuid } } } else { - output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, revertQuery, cluster.VtctlClientParams{DDLStrategy: params.ddlStrategy, MigrationContext: params.migrationContext}) + output, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, revertQuery, cluster.ApplySchemaParams{DDLStrategy: params.ddlStrategy, MigrationContext: params.migrationContext}) if params.expectError == "" { assert.NoError(t, err) uuid = output diff --git a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go index 49e72eda290..5fefdec8f70 100644 --- a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go +++ b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go @@ -458,7 +458,7 @@ func TestSchemaChange(t *testing.T) { time.Sleep(10 * time.Second) onlineddl.CheckCancelAllMigrations(t, &vtParams, 0) // Validate that invoking CANCEL ALL via vtctl works - onlineddl.CheckCancelAllMigrationsViaVtctl(t, &clusterInstance.VtctlclientProcess, keyspaceName) + onlineddl.CheckCancelAllMigrationsViaVtctld(t, &clusterInstance.VtctldClientProcess, keyspaceName) }) t.Run("cancel all migrations: some migrations to cancel", func(t *testing.T) { // Use VTGate for throttling, issue a `ALTER VITESS_MIGRATION THROTTLE ALL ...` @@ -497,7 +497,7 @@ func TestSchemaChange(t *testing.T) { } wg.Wait() // cancelling via vtctl does not return values. We CANCEL ALL via vtctl, then validate via VTGate that nothing remains to be cancelled. - onlineddl.CheckCancelAllMigrationsViaVtctl(t, &clusterInstance.VtctlclientProcess, keyspaceName) + onlineddl.CheckCancelAllMigrationsViaVtctld(t, &clusterInstance.VtctldClientProcess, keyspaceName) onlineddl.CheckCancelAllMigrations(t, &vtParams, 0) }) @@ -555,7 +555,7 @@ func TestSchemaChange(t *testing.T) { }) t.Run("PRS shard -80", func(t *testing.T) { // migration has started and is throttled. We now run PRS - err := clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", keyspaceName+"/-80", "--new_primary", reparentTablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", keyspaceName+"/-80", "--new-primary", reparentTablet.Alias) require.NoError(t, err, "failed PRS: %v", err) rs := onlineddl.VtgateExecQuery(t, &vtParams, "show vitess_tablets", "") onlineddl.PrintQueryResult(os.Stdout, rs) @@ -650,7 +650,7 @@ func TestSchemaChange(t *testing.T) { }) t.Run("PRS shard -80", func(t *testing.T) { // migration has started and completion is postponed. We now PRS - err := clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", keyspaceName+"/-80", "--new_primary", reparentTablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", keyspaceName+"/-80", "--new-primary", reparentTablet.Alias) require.NoError(t, err, "failed PRS: %v", err) rs := onlineddl.VtgateExecQuery(t, &vtParams, "show vitess_tablets", "") onlineddl.PrintQueryResult(os.Stdout, rs) @@ -905,7 +905,7 @@ func testWithInitialSchema(t *testing.T) { var sqlQuery = "" //nolint for i := 0; i < totalTableCount; i++ { sqlQuery = fmt.Sprintf(createTable, fmt.Sprintf("vt_onlineddl_test_%02d", i)) - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sqlQuery) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, sqlQuery) require.Nil(t, err) } @@ -923,8 +923,8 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str uuid = row.AsString("uuid", "") } } else { - params := cluster.VtctlClientParams{DDLStrategy: ddlStrategy, UUIDList: providedUUIDList, MigrationContext: providedMigrationContext} - output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, params) + params := cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, UUIDList: providedUUIDList, MigrationContext: providedMigrationContext} + output, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, params) if expectError == "" { assert.NoError(t, err) uuid = output diff --git a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go index 9f442a39c76..84c1ea7165c 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go @@ -329,11 +329,11 @@ func TestSchemaChange(t *testing.T) { func testWithInitialSchema(t *testing.T) { for _, statement := range cleanupStatements { - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, statement) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, statement) require.Nil(t, err) } // Create the stress table - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, createStatement) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, createStatement) require.Nil(t, err) // Check if table is created @@ -349,7 +349,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) + uuid, err = clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.ApplySchemaParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) diff --git a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go index 2d9caaa6703..0db25088bd0 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go @@ -553,10 +553,10 @@ func TestSchemaChange(t *testing.T) { func testWithInitialSchema(t *testing.T) { // Create the stress table for _, statement := range cleanupStatements { - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, statement) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, statement) require.Nil(t, err) } - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, createStatement) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, createStatement) require.Nil(t, err) // Check if table is created @@ -572,7 +572,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) + uuid, err = clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.ApplySchemaParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) diff --git a/go/test/endtoend/onlineddl/vtctlutil.go b/go/test/endtoend/onlineddl/vtctlutil.go index 19a6ff79604..52a832f0e1f 100644 --- a/go/test/endtoend/onlineddl/vtctlutil.go +++ b/go/test/endtoend/onlineddl/vtctlutil.go @@ -25,9 +25,9 @@ import ( ) // CheckCancelAllMigrations cancels all pending migrations. There is no validation for affected migrations. -func CheckCancelAllMigrationsViaVtctl(t *testing.T, vtctlclient *cluster.VtctlClientProcess, keyspace string) { +func CheckCancelAllMigrationsViaVtctld(t *testing.T, vtctldclient *cluster.VtctldClientProcess, keyspace string) { cancelQuery := "alter vitess_migration cancel all" - _, err := vtctlclient.ApplySchemaWithOutput(keyspace, cancelQuery, cluster.VtctlClientParams{}) + _, err := vtctldclient.ApplySchemaWithOutput(keyspace, cancelQuery, cluster.ApplySchemaParams{}) assert.NoError(t, err) } diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go index 7f70a926be3..0d9ee4bd5c2 100644 --- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go +++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go @@ -141,7 +141,7 @@ func TestPITRRecovery(t *testing.T) { cluster.VerifyRowsInTabletForTable(t, replica1, keyspaceName, 2, "product") // backup the replica - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.NoError(t, err) // check that the backup shows up in the listing @@ -181,10 +181,10 @@ func TestPITRRecovery(t *testing.T) { cluster.VerifyRowsInTabletForTable(t, shard1Replica1, keyspaceName, 4, "product") // take the backup (to simulate the regular backup) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", shard0Replica1.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Backup", shard0Replica1.Alias) require.NoError(t, err) // take the backup (to simulate the regular backup) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", shard1Replica1.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Backup", shard1Replica1.Alias) require.NoError(t, err) backups, err := clusterInstance.ListBackups(keyspaceName + "/-80") @@ -295,10 +295,10 @@ func TestPITRRecovery(t *testing.T) { } func performResharding(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema) + err := clusterInstance.VtctldClientProcess.ApplyVSchema(keyspaceName, vSchema) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--source_shards=0", "--target_shards=-80,80-", "Create", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--source-shards=0", "--target-shards=-80,80-", "create", "--worflow", "ks.reshardWorkflow") require.NoError(t, err) waitTimeout := 30 * time.Second @@ -307,32 +307,32 @@ func performResharding(t *testing.T) { waitForNoWorkflowLag(t, clusterInstance, "ks.reshardWorkflow") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=rdonly", "SwitchTraffic", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--tablet-types=rdonly", "SwitchTraffic", "--workflow", "ks.reshardWorkflow") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=replica", "SwitchTraffic", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--tablet-types=replica", "SwitchTraffic", "--workflow", "ks.reshardWorkflow") require.NoError(t, err) // then serve primary from the split shards - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=primary", "SwitchTraffic", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--tablet-types=primary", "SwitchTraffic", "--workflow", "ks.reshardWorkflow") require.NoError(t, err) // remove the original tablets in the original shard removeTablets(t, []*cluster.Vttablet{primary, replica1, replica2}) for _, tablet := range []*cluster.Vttablet{replica1, replica2} { - err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", tablet.Alias) require.NoError(t, err) } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary", primary.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", primary.Alias) require.NoError(t, err) // rebuild the serving graph, all mentions of the old shards should be gone - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "ks") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", "ks") require.NoError(t, err) // delete the original shard - err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteShard", "ks/0") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteShards", "ks/0") require.NoError(t, err) // Restart vtgate process @@ -460,13 +460,13 @@ func initializeCluster(t *testing.T) { } } - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard0.Name, cell, shard0Primary.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shard0.Name, cell, shard0Primary.TabletUID) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard1.Name, cell, shard1Primary.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shard1.Name, cell, shard1Primary.TabletUID) require.NoError(t, err) err = clusterInstance.StartVTOrc(keyspaceName) @@ -497,9 +497,9 @@ func insertRow(t *testing.T, id int, productName string, isSlow bool) { } func createRestoreKeyspace(t *testing.T, timeToRecover, restoreKeyspaceName string) { - output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("CreateKeyspace", "--", - "--keyspace_type=SNAPSHOT", "--base_keyspace="+keyspaceName, - "--snapshot_time", timeToRecover, restoreKeyspaceName) + output, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", + "--type=SNAPSHOT", "--base-keyspace="+keyspaceName, + "--snapshot-time", timeToRecover, restoreKeyspaceName) log.Info(output) require.NoError(t, err) } diff --git a/go/test/endtoend/recovery/unshardedrecovery/recovery.go b/go/test/endtoend/recovery/unshardedrecovery/recovery.go index 8966e66ed47..1ebb7c2647f 100644 --- a/go/test/endtoend/recovery/unshardedrecovery/recovery.go +++ b/go/test/endtoend/recovery/unshardedrecovery/recovery.go @@ -164,7 +164,7 @@ func TestMainImpl(m *testing.M) { if err != nil { return 1, err } - if err := localCluster.VtctlclientProcess.InitializeShard(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil { + if err := localCluster.VtctldClientProcess.InitializeShard(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil { return 1, err } if err := localCluster.StartVTOrc(keyspaceName); err != nil { @@ -206,17 +206,17 @@ func TestRecoveryImpl(t *testing.T) { verifyInitialReplication(t) // take first backup of value = test1 - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) assert.NoError(t, err) backups := listBackups(t) require.Equal(t, len(backups), 1) assert.Contains(t, backups[0], replica1.Alias) - err = localCluster.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema) + err = localCluster.VtctldClientProcess.ApplyVSchema(keyspaceName, vSchema) assert.NoError(t, err) - output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", keyspaceName) + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetVSchema", keyspaceName) assert.NoError(t, err) assert.Contains(t, output, "vt_insert_test") @@ -224,12 +224,12 @@ func TestRecoveryImpl(t *testing.T) { restoreTime := time.Now().UTC() recovery.RestoreTablet(t, localCluster, replica2, recoveryKS1, "0", keyspaceName, commonTabletArg, restoreTime) - output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvVSchema", cell) + output, err = localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetSrvVSchema", cell) assert.NoError(t, err) assert.Contains(t, output, keyspaceName) assert.Contains(t, output, recoveryKS1) - output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS1) + output, err = localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS1) assert.NoError(t, err) assert.Contains(t, output, "vt_insert_test") @@ -277,13 +277,13 @@ func TestRecoveryImpl(t *testing.T) { } // take second backup of value = msgx1 - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) assert.NoError(t, err) // restore to first backup recovery.RestoreTablet(t, localCluster, replica3, recoveryKS2, "0", keyspaceName, commonTabletArg, restoreTime) - output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS2) + output, err = localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS2) assert.NoError(t, err) assert.Contains(t, output, "vt_insert_test") diff --git a/go/test/endtoend/reparent/emergencyreparent/ers_test.go b/go/test/endtoend/reparent/emergencyreparent/ers_test.go index fbd4770e15e..f0cf4f2cd6a 100644 --- a/go/test/endtoend/reparent/emergencyreparent/ers_test.go +++ b/go/test/endtoend/reparent/emergencyreparent/ers_test.go @@ -233,10 +233,10 @@ func TestERSPromoteRdonly(t *testing.T) { tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[2].Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[2].Alias, "rdonly") require.NoError(t, err) utils.ConfirmReplication(t, tablets[0], tablets[1:]) @@ -248,7 +248,7 @@ func TestERSPromoteRdonly(t *testing.T) { out, err := utils.ErsIgnoreTablet(clusterInstance, nil, "30s", "30s", []*cluster.Vttablet{tablets[3]}, false) require.NotNil(t, err, out) - out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShard", utils.KeyspaceShard) + out, err = clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetShard", utils.KeyspaceShard) require.NoError(t, err) require.Contains(t, out, `"uid": 101`, "the primary should still be 101 in the shard info") } @@ -288,16 +288,16 @@ func TestPullFromRdonly(t *testing.T) { // make tablets[1] a rdonly tablet. // rename tablet so that the test is not confusing rdonly := tablets[1] - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly.Alias, "rdonly") require.NoError(t, err) // confirm that all the tablets can replicate successfully right now utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{rdonly, tablets[2], tablets[3]}) // stop replication on the other two tablets - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", tablets[2].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", tablets[2].Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", tablets[3].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", tablets[3].Alias) require.NoError(t, err) // stop semi-sync on the primary so that any transaction now added does not require an ack @@ -311,9 +311,9 @@ func TestPullFromRdonly(t *testing.T) { utils.StopTablet(t, tablets[0], true) // start the replication back on the two tablets - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", tablets[2].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", tablets[2].Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", tablets[3].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", tablets[3].Alias) require.NoError(t, err) // check that tablets[2] and tablets[3] still only has 1 value @@ -349,12 +349,12 @@ func TestNoReplicationStatusAndIOThreadStopped(t *testing.T) { tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[1].Alias, `STOP SLAVE`) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[1].Alias, `STOP SLAVE`) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[1].Alias, `RESET SLAVE ALL`) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[1].Alias, `RESET SLAVE ALL`) require.NoError(t, err) // - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[3].Alias, `STOP SLAVE IO_THREAD;`) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[3].Alias, `STOP SLAVE IO_THREAD;`) require.NoError(t, err) // Run an additional command in the current primary which will only be acked by tablets[2] and be in its relay log. insertedVal := utils.ConfirmReplication(t, tablets[0], nil) @@ -450,7 +450,7 @@ func TestRecoverWithMultipleFailures(t *testing.T) { utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) // make tablets[1] a rdonly tablet. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") require.NoError(t, err) // Confirm that replication is still working as intended @@ -478,7 +478,7 @@ func TestERSFailFast(t *testing.T) { utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) // make tablets[1] a rdonly tablet. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") require.NoError(t, err) // Confirm that replication is still working as intended @@ -517,9 +517,9 @@ func TestReplicationStopped(t *testing.T) { tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[1].Alias, `STOP SLAVE SQL_THREAD;`) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[1].Alias, `STOP SLAVE SQL_THREAD;`) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[2].Alias, `STOP SLAVE;`) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[2].Alias, `STOP SLAVE;`) require.NoError(t, err) // Run an additional command in the current primary which will only be acked by tablets[3] and be in its relay log. insertedVal := utils.ConfirmReplication(t, tablets[0], nil) @@ -528,7 +528,7 @@ func TestReplicationStopped(t *testing.T) { require.Error(t, err, "ERS should fail with 2 replicas having replication stopped") // Start replication back on tablet[1] - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[1].Alias, `START SLAVE;`) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[1].Alias, `START SLAVE;`) require.NoError(t, err) // Failover to tablets[3] again. This time it should succeed out, err := utils.Ers(clusterInstance, tablets[3], "60s", "30s") From ced5167f0f01a90fccafdae9b1dd238f76c9c1c7 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Fri, 16 Feb 2024 15:25:00 -0500 Subject: [PATCH 02/17] fix flags for Reshard create Signed-off-by: Andrew Mason --- go/test/endtoend/recovery/pitr/shardedpitr_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go index 0d9ee4bd5c2..7dba67cecfc 100644 --- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go +++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go @@ -298,7 +298,7 @@ func performResharding(t *testing.T) { err := clusterInstance.VtctldClientProcess.ApplyVSchema(keyspaceName, vSchema) require.NoError(t, err) - err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--source-shards=0", "--target-shards=-80,80-", "create", "--worflow", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "create", "--source-shards=0", "--target-shards=-80,80-", "--worflow", "ks.reshardWorkflow") require.NoError(t, err) waitTimeout := 30 * time.Second From 7b6720fd4adc19349434b729de6c408b39da0155 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Fri, 16 Feb 2024 15:45:38 -0500 Subject: [PATCH 03/17] vtctldclient uses stdout not stderr Signed-off-by: Andrew Mason --- go/test/endtoend/backup/vtctlbackup/backup_utils.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/go/test/endtoend/backup/vtctlbackup/backup_utils.go b/go/test/endtoend/backup/vtctlbackup/backup_utils.go index f03a5937b37..c20ab70e652 100644 --- a/go/test/endtoend/backup/vtctlbackup/backup_utils.go +++ b/go/test/endtoend/backup/vtctlbackup/backup_utils.go @@ -1034,7 +1034,7 @@ func terminateBackup(t *testing.T, alias string) { args..., ) - reader, _ := tmpProcess.StderrPipe() + reader, _ := tmpProcess.StdoutPipe() err := tmpProcess.Start() require.Nil(t, err) found := false @@ -1068,7 +1068,7 @@ func terminateRestore(t *testing.T) { args..., ) - reader, _ := tmpProcess.StderrPipe() + reader, _ := tmpProcess.StdoutPipe() err := tmpProcess.Start() require.Nil(t, err) found := false From a41769298d7498619c0abad15e72cf3723b55ce9 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Fri, 16 Feb 2024 17:14:38 -0500 Subject: [PATCH 04/17] dumbass Signed-off-by: Andrew Mason --- go/test/endtoend/recovery/pitr/shardedpitr_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go index 7dba67cecfc..a9edbe00a04 100644 --- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go +++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go @@ -298,7 +298,7 @@ func performResharding(t *testing.T) { err := clusterInstance.VtctldClientProcess.ApplyVSchema(keyspaceName, vSchema) require.NoError(t, err) - err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "create", "--source-shards=0", "--target-shards=-80,80-", "--worflow", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "create", "--source-shards=0", "--target-shards=-80,80-", "--workflow", "ks.reshardWorkflow") require.NoError(t, err) waitTimeout := 30 * time.Second From f3a9739b433d11864dd5bc0c438cf7ec6c85eb74 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Fri, 16 Feb 2024 17:25:04 -0500 Subject: [PATCH 05/17] fix param name for applyschema Signed-off-by: Andrew Mason --- go/test/endtoend/cluster/vtctldclient_process.go | 6 +++--- .../onlineddl/scheduler/onlineddl_scheduler_test.go | 4 ++-- go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go/test/endtoend/cluster/vtctldclient_process.go b/go/test/endtoend/cluster/vtctldclient_process.go index 29b0568aa06..e2127160c14 100644 --- a/go/test/endtoend/cluster/vtctldclient_process.go +++ b/go/test/endtoend/cluster/vtctldclient_process.go @@ -97,7 +97,7 @@ func VtctldClientProcessInstance(hostname string, grpcPort int, tmpDirectory str type ApplySchemaParams struct { DDLStrategy string MigrationContext string - UUIDList string + UUIDs string CallerID string BatchSize int } @@ -114,8 +114,8 @@ func (vtctldclient *VtctldClientProcess) ApplySchemaWithOutput(keyspace string, if params.DDLStrategy != "" { args = append(args, "--ddl-strategy", params.DDLStrategy) } - if params.UUIDList != "" { - args = append(args, "--uuid-list", params.UUIDList) + if params.UUIDs != "" { + args = append(args, "--uuid", params.UUIDs) } if params.BatchSize > 0 { args = append(args, "--batch-size", fmt.Sprintf("%d", params.BatchSize)) diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go index 341787552a0..01f7024f59c 100644 --- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go +++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go @@ -949,7 +949,7 @@ func testScheduler(t *testing.T) { t.Run("Idempotent submission, retry failed migration", func(t *testing.T) { uuid := "00000000_1111_2222_3333_444444444444" - overrideVtctlParams = &cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, UUIDList: uuid, MigrationContext: "idempotent:1111-2222-3333"} + overrideVtctlParams = &cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, UUIDs: uuid, MigrationContext: "idempotent:1111-2222-3333"} defer func() { overrideVtctlParams = nil }() // create a migration and cancel it. We don't let it complete. We want it in "failed" state t.Run("start and fail migration", func(t *testing.T) { @@ -985,7 +985,7 @@ func testScheduler(t *testing.T) { t.Run("Idempotent submission, retry failed migration in singleton context", func(t *testing.T) { uuid := "00000000_1111_3333_3333_444444444444" ddlStrategy := ddlStrategy + " --singleton-context" - overrideVtctlParams = &cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, UUIDList: uuid, MigrationContext: "idempotent:1111-3333-3333"} + overrideVtctlParams = &cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, UUIDs: uuid, MigrationContext: "idempotent:1111-3333-3333"} defer func() { overrideVtctlParams = nil }() // create a migration and cancel it. We don't let it complete. We want it in "failed" state t.Run("start and fail migration", func(t *testing.T) { diff --git a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go index 5fefdec8f70..e5df3051612 100644 --- a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go +++ b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go @@ -923,7 +923,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str uuid = row.AsString("uuid", "") } } else { - params := cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, UUIDList: providedUUIDList, MigrationContext: providedMigrationContext} + params := cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, UUIDs: providedUUIDList, MigrationContext: providedMigrationContext} output, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, params) if expectError == "" { assert.NoError(t, err) From 25a3d2d10d9f7af8ffc7f05d9863b1aaa83a4e8f Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Fri, 16 Feb 2024 17:58:45 -0500 Subject: [PATCH 06/17] correct usages for vtctldclient Reshard create Signed-off-by: Andrew Mason --- go/test/endtoend/recovery/pitr/shardedpitr_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go index a9edbe00a04..c45fe3f96cd 100644 --- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go +++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go @@ -298,7 +298,7 @@ func performResharding(t *testing.T) { err := clusterInstance.VtctldClientProcess.ApplyVSchema(keyspaceName, vSchema) require.NoError(t, err) - err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "create", "--source-shards=0", "--target-shards=-80,80-", "--workflow", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "create", "--source-shards=0", "--target-shards=-80,80-", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") require.NoError(t, err) waitTimeout := 30 * time.Second @@ -307,14 +307,14 @@ func performResharding(t *testing.T) { waitForNoWorkflowLag(t, clusterInstance, "ks.reshardWorkflow") - err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--tablet-types=rdonly", "SwitchTraffic", "--workflow", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--tablet-types=rdonly", "SwitchTraffic", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") require.NoError(t, err) - err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--tablet-types=replica", "SwitchTraffic", "--workflow", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--tablet-types=replica", "SwitchTraffic", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") require.NoError(t, err) // then serve primary from the split shards - err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--tablet-types=primary", "SwitchTraffic", "--workflow", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--tablet-types=primary", "SwitchTraffic", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") require.NoError(t, err) // remove the original tablets in the original shard From 6739d4e232ae146506de4f521aaf0b4b29243b67 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Fri, 16 Feb 2024 18:01:10 -0500 Subject: [PATCH 07/17] another command name change Signed-off-by: Andrew Mason --- go/test/endtoend/backup/vtbackup/backup_only_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go index f5d96580bb2..3f5389d2726 100644 --- a/go/test/endtoend/backup/vtbackup/backup_only_test.go +++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go @@ -84,7 +84,7 @@ func TestTabletInitialBackup(t *testing.T) { // Vitess expects that the user has set the database into ReadWrite mode before calling // TabletExternallyReparented err = localCluster.VtctldClientProcess.ExecuteCommand( - "SetReadWrite", primary.Alias) + "SetWritable", primary.Alias, "true") require.Nil(t, err) err = localCluster.VtctldClientProcess.ExecuteCommand( "TabletExternallyReparented", primary.Alias) From 2ddc3e6a28820065b203ad1a4b2f5d77eb16b566 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Fri, 16 Feb 2024 18:48:40 -0500 Subject: [PATCH 08/17] got em now Signed-off-by: Andrew Mason --- go/test/endtoend/recovery/pitr/shardedpitr_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go index c45fe3f96cd..4e0e0573764 100644 --- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go +++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go @@ -307,14 +307,14 @@ func performResharding(t *testing.T) { waitForNoWorkflowLag(t, clusterInstance, "ks.reshardWorkflow") - err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--tablet-types=rdonly", "SwitchTraffic", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "SwitchTraffic", "--tablet-types=rdonly", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") require.NoError(t, err) - err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--tablet-types=replica", "SwitchTraffic", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "SwitchTraffic", "--tablet-types=replica", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") require.NoError(t, err) // then serve primary from the split shards - err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--tablet-types=primary", "SwitchTraffic", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "SwitchTraffic", "--tablet-types=primary", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") require.NoError(t, err) // remove the original tablets in the original shard From edb232873aa4b9c7c7c8caedf4cb4c04017f5f75 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Fri, 16 Feb 2024 19:37:09 -0500 Subject: [PATCH 09/17] ugh Signed-off-by: Andrew Mason --- go/test/endtoend/recovery/pitr/shardedpitr_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go index 4e0e0573764..0aed6573337 100644 --- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go +++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go @@ -499,7 +499,7 @@ func insertRow(t *testing.T, id int, productName string, isSlow bool) { func createRestoreKeyspace(t *testing.T, timeToRecover, restoreKeyspaceName string) { output, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", "--type=SNAPSHOT", "--base-keyspace="+keyspaceName, - "--snapshot-time", timeToRecover, restoreKeyspaceName) + "--snapshot-timestamp", timeToRecover, restoreKeyspaceName) log.Info(output) require.NoError(t, err) } From b34995953a877210fc1698f83be5404053e9b639 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Sat, 17 Feb 2024 11:18:52 -0500 Subject: [PATCH 10/17] Migrate many more e2e tests to use vtctldclient commands Signed-off-by: Andrew Mason --- .../endtoend/cluster/vtctldclient_process.go | 18 +++++++ .../reparent/newfeaturetest/reparent_test.go | 6 +-- .../reparent/plannedreparent/reparent_test.go | 26 ++++----- go/test/endtoend/reparent/utils/utils.go | 53 ++++++++----------- .../endtoend/sharded/sharded_keyspace_test.go | 18 +++---- .../buffer/reparent/failover_buffer_test.go | 6 +-- .../buffer/reshard/sharded_buffer_test.go | 9 ++-- .../endtoend/tabletmanager/commands_test.go | 37 +++++++------ .../tabletmanager/custom_rule_topo_test.go | 2 +- .../tabletmanager/primary/tablet_test.go | 23 ++++---- 10 files changed, 101 insertions(+), 97 deletions(-) diff --git a/go/test/endtoend/cluster/vtctldclient_process.go b/go/test/endtoend/cluster/vtctldclient_process.go index e2127160c14..09c08eab4d5 100644 --- a/go/test/endtoend/cluster/vtctldclient_process.go +++ b/go/test/endtoend/cluster/vtctldclient_process.go @@ -22,8 +22,11 @@ import ( "strings" "time" + "vitess.io/vitess/go/json2" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vterrors" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // VtctldClientProcess is a generic handle for a running vtctldclient command . @@ -197,6 +200,21 @@ func (vtctldclient *VtctldClientProcess) CreateKeyspace(keyspaceName string, sid return err } +// GetTablet executes vtctldclient command to get a tablet, and parses the response. +func (vtctldclient *VtctldClientProcess) GetTablet(alias string) (*topodatapb.Tablet, error) { + data, err := vtctldclient.ExecuteCommandWithOutput("GetTablet", alias) + if err != nil { + return nil, err + } + + var tablet topodatapb.Tablet + err = json2.Unmarshal([]byte(data), &tablet) + if err != nil { + return nil, vterrors.Wrapf(err, "failed to parse tablet output: %s", data) + } + return &tablet, nil +} + // OnlineDDLShowRecent responds with recent schema migration list func (vtctldclient *VtctldClientProcess) OnlineDDLShowRecent(Keyspace string) (result string, err error) { return vtctldclient.ExecuteCommandWithOutput( diff --git a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go index b570509f1a7..44cd89a306a 100644 --- a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go +++ b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go @@ -40,7 +40,7 @@ func TestRecoverWithMultipleVttabletFailures(t *testing.T) { utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) // make tablets[1] a rdonly tablet. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") require.NoError(t, err) // Confirm that replication is still working as intended @@ -139,10 +139,10 @@ func TestChangeTypeWithoutSemiSync(t *testing.T) { utils.CheckPrimaryTablet(t, clusterInstance, primary) // Change replica's type to rdonly - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly") require.NoError(t, err) // Change tablets type from rdonly back to replica - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "replica") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "replica") require.NoError(t, err) } diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go index 6aa5972b928..07e488b52d0 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go @@ -44,7 +44,7 @@ func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets // We cannot change a primary to spare - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ChangeTabletType", tablets[0].Alias, "spare") + out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ChangeTabletType", tablets[0].Alias, "spare") require.Error(t, err, out) require.Contains(t, out, "type change PRIMARY -> SPARE is not an allowed transition for ChangeTabletType") } @@ -92,7 +92,7 @@ func TestPRSWithDrainedLaggingTablet(t *testing.T) { defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "drained") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "drained") require.NoError(t, err) utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) @@ -258,13 +258,13 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus if downPrimary { err := tablets[0].VttabletProcess.TearDownWithTimeout(30 * time.Second) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", - "--allow_primary", tablets[0].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", + "--allow-primary", tablets[0].Alias) require.NoError(t, err) } // update topology with the new server - err := clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented", + err := clusterInstance.VtctldClientProcess.ExecuteCommand("TabletExternallyReparented", tablets[1].Alias) require.NoError(t, err) @@ -318,7 +318,7 @@ func TestReparentWithDownReplica(t *testing.T) { // We have to StartReplication on tablets[2] since the MySQL instance is restarted and does not have replication running // We earlier used to rely on replicationManager to fix this but we have disabled it in our testing environment for latest versions of vttablet and vtctl. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", tablets[2].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", tablets[2].Alias) require.NoError(t, err) // wait until it gets the data @@ -338,9 +338,9 @@ func TestChangeTypeSemiSync(t *testing.T) { primary, replica, rdonly1, rdonly2 := tablets[0], tablets[1], tablets[2], tablets[3] // Updated rdonly tablet and set tablet type to rdonly - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "rdonly") require.NoError(t, err) utils.ValidateTopology(t, clusterInstance, true) @@ -349,7 +349,7 @@ func TestChangeTypeSemiSync(t *testing.T) { // Stop replication on rdonly1, to make sure when we make it replica it doesn't start again. // Note we do a similar test for replica -> rdonly below. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", rdonly1.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", rdonly1.Alias) require.NoError(t, err) // Check semi-sync on replicas. @@ -364,27 +364,27 @@ func TestChangeTypeSemiSync(t *testing.T) { utils.CheckDBstatus(ctx, t, rdonly2, "Rpl_semi_sync_slave_status", "OFF") // Change replica to rdonly while replicating, should turn off semi-sync, and restart replication. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly") require.NoError(t, err) utils.CheckDBvar(ctx, t, replica, "rpl_semi_sync_slave_enabled", "OFF") utils.CheckDBstatus(ctx, t, replica, "Rpl_semi_sync_slave_status", "OFF") // Change rdonly1 to replica, should turn on semi-sync, and not start replication. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "replica") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "replica") require.NoError(t, err) utils.CheckDBvar(ctx, t, rdonly1, "rpl_semi_sync_slave_enabled", "ON") utils.CheckDBstatus(ctx, t, rdonly1, "Rpl_semi_sync_slave_status", "OFF") utils.CheckReplicaStatus(ctx, t, rdonly1) // Now change from replica back to rdonly, make sure replication is still not enabled. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly") require.NoError(t, err) utils.CheckDBvar(ctx, t, rdonly1, "rpl_semi_sync_slave_enabled", "OFF") utils.CheckDBstatus(ctx, t, rdonly1, "Rpl_semi_sync_slave_status", "OFF") utils.CheckReplicaStatus(ctx, t, rdonly1) // Change rdonly2 to replica, should turn on semi-sync, and restart replication. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "replica") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "replica") require.NoError(t, err) utils.CheckDBvar(ctx, t, rdonly2, "rpl_semi_sync_slave_enabled", "ON") utils.CheckDBstatus(ctx, t, rdonly2, "Rpl_semi_sync_slave_status", "ON") diff --git a/go/test/endtoend/reparent/utils/utils.go b/go/test/endtoend/reparent/utils/utils.go index 675648dcf37..a648b0174d3 100644 --- a/go/test/endtoend/reparent/utils/utils.go +++ b/go/test/endtoend/reparent/utils/utils.go @@ -34,7 +34,6 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/vttablet/tabletconn" - "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" @@ -185,7 +184,7 @@ func setupShard(ctx context.Context, t *testing.T, clusterInstance *cluster.Loca } // Initialize shard - err := clusterInstance.VtctlclientProcess.InitializeShard(KeyspaceName, shardName, tablets[0].Cell, tablets[0].TabletUID) + err := clusterInstance.VtctldClientProcess.InitializeShard(KeyspaceName, shardName, tablets[0].Cell, tablets[0].TabletUID) require.NoError(t, err) ValidateTopology(t, clusterInstance, true) @@ -306,21 +305,21 @@ func PrsAvoid(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tab *c // PrsWithTimeout runs PRS func PrsWithTimeout(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet, avoid bool, actionTimeout, waitTimeout string) (string, error) { args := []string{ - "PlannedReparentShard", "--", - "--keyspace_shard", fmt.Sprintf("%s/%s", KeyspaceName, ShardName)} + "PlannedReparentShard", + fmt.Sprintf("%s/%s", KeyspaceName, ShardName)} if actionTimeout != "" { args = append(args, "--action_timeout", actionTimeout) } if waitTimeout != "" { - args = append(args, "--wait_replicas_timeout", waitTimeout) + args = append(args, "--wait-replicas-timeout", waitTimeout) } if avoid { - args = append(args, "--avoid_tablet") + args = append(args, "--avoid-primary") } else { - args = append(args, "--new_primary") + args = append(args, "--new-primary") } args = append(args, tab.Alias) - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...) + out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput(args...) return out, err } @@ -333,17 +332,17 @@ func Ers(clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet, to func ErsIgnoreTablet(clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet, timeout, waitReplicasTimeout string, tabletsToIgnore []*cluster.Vttablet, preventCrossCellPromotion bool) (string, error) { var args []string if timeout != "" { - args = append(args, "--action_timeout", timeout) + args = append(args, "--action-timeout", timeout) } - args = append(args, "EmergencyReparentShard", "--", "--keyspace_shard", fmt.Sprintf("%s/%s", KeyspaceName, ShardName)) + args = append(args, "EmergencyReparentShard", fmt.Sprintf("%s/%s", KeyspaceName, ShardName)) if tab != nil { - args = append(args, "--new_primary", tab.Alias) + args = append(args, "--new-primary", tab.Alias) } if waitReplicasTimeout != "" { - args = append(args, "--wait_replicas_timeout", waitReplicasTimeout) + args = append(args, "--wait-replicas-timeout", waitReplicasTimeout) } if preventCrossCellPromotion { - args = append(args, "--prevent_cross_cell_promotion=true") + args = append(args, "--prevent-cross-cell-promotion") } if len(tabletsToIgnore) != 0 { tabsString := "" @@ -354,9 +353,9 @@ func ErsIgnoreTablet(clusterInstance *cluster.LocalProcessCluster, tab *cluster. tabsString = tabsString + "," + vttablet.Alias } } - args = append(args, "--ignore_replicas", tabsString) + args = append(args, "--ignore-replicas", tabsString) } - return clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...) + return clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput(args...) } // ErsWithVtctl runs ERS via vtctl binary @@ -374,9 +373,9 @@ func ValidateTopology(t *testing.T, clusterInstance *cluster.LocalProcessCluster args := []string{"Validate"} if pingTablets { - args = append(args, "--", "--ping-tablets=true") + args = append(args, "--ping-tablets") } - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...) + out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput(args...) require.Empty(t, out) require.NoError(t, err) } @@ -398,17 +397,14 @@ func ConfirmReplication(t *testing.T, primary *cluster.Vttablet, replicas []*clu // ConfirmOldPrimaryIsHangingAround confirms that the old primary is hanging around func ConfirmOldPrimaryIsHangingAround(t *testing.T, clusterInstance *cluster.LocalProcessCluster) { - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate") + out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("Validate") require.Error(t, err) require.Contains(t, out, "already has primary") } // CheckPrimaryTablet makes sure the tablet type is primary, and its health check agrees. func CheckPrimaryTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet) { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias) - require.NoError(t, err) - var tabletInfo topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tabletInfo) + tabletInfo, err := clusterInstance.VtctldClientProcess.GetTablet(tablet.Alias) require.NoError(t, err) assert.Equal(t, topodatapb.TabletType_PRIMARY, tabletInfo.GetType()) @@ -424,10 +420,7 @@ func CheckPrimaryTablet(t *testing.T, clusterInstance *cluster.LocalProcessClust // isHealthyPrimaryTablet will return if tablet is primary AND healthy. func isHealthyPrimaryTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet) bool { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias) - require.Nil(t, err) - var tabletInfo topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tabletInfo) + tabletInfo, err := clusterInstance.VtctldClientProcess.GetTablet(tablet.Alias) require.Nil(t, err) if tabletInfo.GetType() != topodatapb.TabletType_PRIMARY { return false @@ -541,9 +534,9 @@ func ResurrectTablet(ctx context.Context, t *testing.T, clusterInstance *cluster // DeleteTablet is used to delete the given tablet func DeleteTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand( - "DeleteTablet", "--", - "--allow_primary", + err := clusterInstance.VtctldClientProcess.ExecuteCommand( + "DeleteTablets", + "--allow-primary", tab.Alias) require.NoError(t, err) } @@ -629,7 +622,7 @@ func CheckReparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProces // make sure the primary health stream says it's the primary too // (health check is disabled on these servers, force it first) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", tablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", tablet.Alias) require.NoError(t, err) shrs, err := clusterInstance.StreamTabletHealth(context.Background(), tablet, 1) diff --git a/go/test/endtoend/sharded/sharded_keyspace_test.go b/go/test/endtoend/sharded/sharded_keyspace_test.go index 857dc455206..f311404ad7e 100644 --- a/go/test/endtoend/sharded/sharded_keyspace_test.go +++ b/go/test/endtoend/sharded/sharded_keyspace_test.go @@ -108,9 +108,9 @@ func TestShardedKeyspace(t *testing.T) { shard1Primary := shard1.Vttablets[0] shard2Primary := shard2.Vttablets[0] - err := clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shard1.Name, cell, shard1Primary.TabletUID) + err := clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shard1.Name, cell, shard1Primary.TabletUID) require.Nil(t, err) - err = clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shard2.Name, cell, shard2Primary.TabletUID) + err = clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shard2.Name, cell, shard2Primary.TabletUID) require.Nil(t, err) err = clusterInstance.StartVTOrc(keyspaceName) @@ -125,7 +125,7 @@ func TestShardedKeyspace(t *testing.T) { _, err = shard2Primary.VttabletProcess.QueryTablet(sqlSchemaReverse, keyspaceName, true) require.Nil(t, err) - if err = clusterInstance.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema); err != nil { + if err = clusterInstance.VtctldClientProcess.ApplyVSchema(keyspaceName, vSchema); err != nil { log.Error(err.Error()) return } @@ -136,13 +136,13 @@ func TestShardedKeyspace(t *testing.T) { shard2Primary.Alias, shard2.Vttablets[1].Alias) - _ = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", shard1Primary.Alias) - _ = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", shard2Primary.Alias) + _ = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", shard1Primary.Alias, "true") + _ = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", shard2Primary.Alias, "true") _, _ = shard1Primary.VttabletProcess.QueryTablet("insert into vt_select_test (id, msg) values (1, 'test 1')", keyspaceName, true) _, _ = shard2Primary.VttabletProcess.QueryTablet("insert into vt_select_test (id, msg) values (10, 'test 10')", keyspaceName, true) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate", "--", "--ping-tablets") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate", "--ping-tablets") require.Nil(t, err) rows, err := shard1Primary.VttabletProcess.QueryTablet("select id, msg from vt_select_test order by id", keyspaceName, true) @@ -164,9 +164,9 @@ func TestShardedKeyspace(t *testing.T) { assert.Contains(t, output, shard1Primary.Alias+": CREATE TABLE") assert.Contains(t, output, shard2Primary.Alias+": CREATE TABLE") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateVersionShard", fmt.Sprintf("%s/%s", keyspaceName, shard1.Name)) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ValidateVersionShard", fmt.Sprintf("%s/%s", keyspaceName, shard1.Name)) require.Nil(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("GetPermissions", shard1.Vttablets[1].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("GetPermissions", shard1.Vttablets[1].Alias) require.Nil(t, err) err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidatePermissionsShard", fmt.Sprintf("%s/%s", keyspaceName, shard1.Name)) require.Nil(t, err) @@ -184,7 +184,7 @@ func TestShardedKeyspace(t *testing.T) { func reloadSchemas(t *testing.T, aliases ...string) { for _, alias := range aliases { - if err := clusterInstance.VtctlclientProcess.ExecuteCommand("ReloadSchema", alias); err != nil { + if err := clusterInstance.VtctldClientProcess.ExecuteCommand("ReloadSchema", alias); err != nil { assert.Fail(t, "Unable to reload schema") } diff --git a/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go b/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go index 2be57120050..486dc3ef9e5 100644 --- a/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go +++ b/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go @@ -83,7 +83,7 @@ func failoverExternalReparenting(t *testing.T, clusterInstance *cluster.LocalPro require.NoError(t, err) // Notify the new vttablet primary about the reparent. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented", newPrimary.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("TabletExternallyReparented", newPrimary.Alias) require.NoError(t, err) } @@ -92,9 +92,9 @@ func failoverPlannedReparenting(t *testing.T, clusterInstance *cluster.LocalProc reads.ExpectQueries(10) writes.ExpectQueries(10) - err := clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", + err := clusterInstance.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", fmt.Sprintf("%s/%s", keyspaceUnshardedName, "0"), - "--new_primary", clusterInstance.Keyspaces[0].Shards[0].Vttablets[1].Alias) + "--new-primary", clusterInstance.Keyspaces[0].Shards[0].Vttablets[1].Alias) require.NoError(t, err) } diff --git a/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go b/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go index ae922108012..d58d8901165 100644 --- a/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go +++ b/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go @@ -68,9 +68,8 @@ func reshard02(t *testing.T, clusterInstance *cluster.LocalProcessCluster, keysp err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false) require.NoError(t, err) workflowName := "buf2buf" - workflow := fmt.Sprintf("%s.%s", keyspaceName, "buf2buf") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--source_shards", "0", "--target_shards", "-80,80-", "Create", workflow) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "Create", "--target-keyspace", keyspaceName, "--workflow", workflowName, "--source-shards", "0", "--target-shards", "-80,80-") require.NoError(t, err) // Execute the resharding operation @@ -78,13 +77,13 @@ func reshard02(t *testing.T, clusterInstance *cluster.LocalProcessCluster, keysp writes.ExpectQueries(25) waitForLowLag(t, clusterInstance, keyspaceName, workflowName) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=rdonly,replica", "SwitchTraffic", workflow) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "SwitchTraffic", "--target-keyspace", keyspaceName, "--workflow", workflowName, "--tablet-types=rdonly,replica") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=primary", "SwitchTraffic", workflow) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "SwitchTraffic", "--target-keyspace", keyspaceName, "--workflow", workflowName, "--tablet-types=primary") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "Complete", workflow) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--target-keyspace", keyspaceName, "--workflow", workflowName, "Complete") require.NoError(t, err) } diff --git a/go/test/endtoend/tabletmanager/commands_test.go b/go/test/endtoend/tabletmanager/commands_test.go index 1a2d2424cb4..3967b36a1d3 100644 --- a/go/test/endtoend/tabletmanager/commands_test.go +++ b/go/test/endtoend/tabletmanager/commands_test.go @@ -62,52 +62,52 @@ func TestTabletCommands(t *testing.T) { // make sure direct dba queries work sql := "select * from t1" - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ExecuteFetchAsDba", "--", "--json", primaryTablet.Alias, sql) + result, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ExecuteFetchAsDBA", "--json", primaryTablet.Alias, sql) require.Nil(t, err) assertExecuteFetch(t, result) // check Ping / RefreshState / RefreshStateByShard - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Ping", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("PingTablet", primaryTablet.Alias) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshState", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RefreshState", primaryTablet.Alias) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshStateByShard", keyspaceShard) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RefreshStateByShard", keyspaceShard) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshStateByShard", "--", "--cells="+cell, keyspaceShard) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RefreshStateByShard", "--cells", cell, keyspaceShard) require.Nil(t, err, "error should be Nil") // Check basic actions. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadOnly", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", primaryTablet.Alias, "false") require.Nil(t, err, "error should be Nil") qr := utils.Exec(t, conn, "show variables like 'read_only'") got := fmt.Sprintf("%v", qr.Rows) want := "[[VARCHAR(\"read_only\") VARCHAR(\"ON\")]]" assert.Equal(t, want, got) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", primaryTablet.Alias, "true") require.Nil(t, err, "error should be Nil") qr = utils.Exec(t, conn, "show variables like 'read_only'") got = fmt.Sprintf("%v", qr.Rows) want = "[[VARCHAR(\"read_only\") VARCHAR(\"OFF\")]]" assert.Equal(t, want, got) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate") require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate", "--", "--ping-tablets=true") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate", "--ping-tablets") require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateKeyspace", keyspaceName) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ValidateKeyspace", keyspaceName) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateKeyspace", "--", "--ping-tablets=true", keyspaceName) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ValidateKeyspace", "--ping-tablets", keyspaceName) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateShard", "--", "--ping-tablets=false", keyspaceShard) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ValidateShard", "--ping-tablets", keyspaceShard) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateShard", "--", "--ping-tablets=true", keyspaceShard) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ValidateShard", "--ping-tablets", keyspaceShard) require.Nil(t, err, "error should be Nil") } @@ -142,14 +142,13 @@ func assertExecuteFetch(t *testing.T, qr string) { // ActionAndTimeout test func TestActionAndTimeout(t *testing.T) { - defer cluster.PanicHandler(t) - err := clusterInstance.VtctlclientProcess.ExecuteCommand("Sleep", primaryTablet.Alias, "5s") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("SleepTablet", primaryTablet.Alias, "5s") require.Nil(t, err) time.Sleep(1 * time.Second) // try a frontend RefreshState that should timeout as the tablet is busy running the other one - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshState", "--", primaryTablet.Alias, "--wait-time", "2s") + err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshState", primaryTablet.Alias, "--wait-timeout", "2s") assert.Error(t, err, "timeout as tablet is in Sleep") } @@ -207,14 +206,14 @@ func TestShardReplicationFix(t *testing.T) { assertNodeCount(t, result, int(3)) // Manually add a bogus entry to the replication graph, and check it is removed by ShardReplicationFix - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ShardReplicationAdd", keyspaceShard, fmt.Sprintf("%s-9000", cell)) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ShardReplicationAdd", keyspaceShard, fmt.Sprintf("%s-9000", cell)) require.Nil(t, err, "error should be Nil") result, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShardReplication", cell, keyspaceShard) require.Nil(t, err, "error should be Nil") assertNodeCount(t, result, int(4)) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ShardReplicationFix", cell, keyspaceShard) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ShardReplicationFix", cell, keyspaceShard) require.Nil(t, err, "error should be Nil") result, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShardReplication", cell, keyspaceShard) require.Nil(t, err, "error should be Nil") @@ -224,7 +223,7 @@ func TestShardReplicationFix(t *testing.T) { func TestGetSchema(t *testing.T) { defer cluster.PanicHandler(t) - res, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetSchema", "--", + res, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetSchema", "--include-views", "--tables", "t1,v1", fmt.Sprintf("%s-%d", clusterInstance.Cell, primaryTablet.TabletUID)) require.Nil(t, err) diff --git a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go index aa09a99e0fe..0c6e056af36 100644 --- a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go +++ b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go @@ -74,7 +74,7 @@ func TestTopoCustomRule(t *testing.T) { err = clusterInstance.StartVttablet(rTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate") require.Nil(t, err, "error should be Nil") // And wait until the query is working. diff --git a/go/test/endtoend/tabletmanager/primary/tablet_test.go b/go/test/endtoend/tabletmanager/primary/tablet_test.go index f6255b1f71a..297e5540fac 100644 --- a/go/test/endtoend/tabletmanager/primary/tablet_test.go +++ b/go/test/endtoend/tabletmanager/primary/tablet_test.go @@ -26,7 +26,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/json2" "vitess.io/vitess/go/test/endtoend/cluster" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -121,16 +120,16 @@ func TestRepeatedInitShardPrimary(t *testing.T) { // Test that using InitShardPrimary can go back and forth between 2 hosts. // Make replica tablet as primary - err := clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID) + err := clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID) require.NoError(t, err) // Run health check on both, make sure they are both healthy. // Also make sure the types are correct. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias) require.NoError(t, err) checkHealth(t, primaryTablet.HTTPPort, false) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias) require.NoError(t, err) checkHealth(t, replicaTablet.HTTPPort, false) @@ -138,16 +137,16 @@ func TestRepeatedInitShardPrimary(t *testing.T) { checkTabletType(t, replicaTablet.Alias, "PRIMARY") // Come back to the original tablet. - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID) require.NoError(t, err) // Run health check on both, make sure they are both healthy. // Also make sure the types are correct. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias) require.NoError(t, err) checkHealth(t, primaryTablet.HTTPPort, false) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias) require.NoError(t, err) checkHealth(t, replicaTablet.HTTPPort, false) @@ -162,7 +161,7 @@ func TestPrimaryRestartSetsPTSTimestamp(t *testing.T) { // See StreamHealthResponse.primary_term_start_timestamp for details. // Make replica as primary - err := clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID) + err := clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID) require.NoError(t, err) err = replicaTablet.VttabletProcess.WaitForTabletStatus("SERVING") @@ -212,7 +211,7 @@ func TestPrimaryRestartSetsPTSTimestamp(t *testing.T) { streamHealthRes2.GetPrimaryTermStartTimestamp())) // Reset primary - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID) require.NoError(t, err) err = primaryTablet.VttabletProcess.WaitForTabletStatus("SERVING") require.NoError(t, err) @@ -232,11 +231,7 @@ func checkHealth(t *testing.T, port int, shouldError bool) { } func checkTabletType(t *testing.T, tabletAlias string, typeWant string) { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tabletAlias) - require.NoError(t, err) - - var tablet topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tablet) + tablet, err := clusterInstance.VtctldClientProcess.GetTablet(tabletAlias) require.NoError(t, err) actualType := tablet.GetType() From fa1c48422dab41b79ecf96c790ff9959dc5fbec9 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Sat, 17 Feb 2024 11:56:34 -0500 Subject: [PATCH 11/17] more e2e vtctldclient migrations Signed-off-by: Andrew Mason --- .../endtoend/cluster/vtctldclient_process.go | 21 +++++++++ .../tabletmanager/tablet_health_test.go | 43 ++++++++----------- go/test/endtoend/tabletmanager/tablet_test.go | 6 +-- .../throttler_topo/throttler_test.go | 8 ++-- go/test/endtoend/topoconncache/main_test.go | 12 +++--- .../topoconncache/topo_conn_cache_test.go | 4 +- go/test/endtoend/vault/vault_test.go | 2 +- .../endtoend/versionupgrade/upgrade_test.go | 2 +- .../vtgate/createdb_plugin/main_test.go | 4 +- .../endtoend/vtgate/foreignkey/main_test.go | 2 +- .../foreignkey/stress/fk_stress_test.go | 8 ++-- go/test/endtoend/vtgate/gen4/main_test.go | 4 +- go/test/endtoend/vtgate/main_test.go | 4 +- .../queries/informationschema/main_test.go | 4 +- .../queries/lookup_queries/main_test.go | 2 +- .../reservedconn/reconnect1/main_test.go | 12 +++--- .../reservedconn/reconnect2/main_test.go | 4 +- .../reservedconn/reconnect3/main_test.go | 2 +- .../reservedconn/reconnect4/main_test.go | 2 +- go/test/endtoend/vtgate/schema/schema_test.go | 30 ++++++------- .../sharded_prs/st_sharded_test.go | 2 +- .../tablet_healthcheck/reparent_test.go | 6 +-- .../correctness_test.go | 2 +- go/test/endtoend/vtorc/api/api_test.go | 2 +- go/test/endtoend/vtorc/general/vtorc_test.go | 14 +++--- .../primaryfailure/primary_failure_test.go | 14 +++--- go/test/endtoend/vtorc/utils/utils.go | 24 ++++------- 27 files changed, 124 insertions(+), 116 deletions(-) diff --git a/go/test/endtoend/cluster/vtctldclient_process.go b/go/test/endtoend/cluster/vtctldclient_process.go index 09c08eab4d5..88f7490d89c 100644 --- a/go/test/endtoend/cluster/vtctldclient_process.go +++ b/go/test/endtoend/cluster/vtctldclient_process.go @@ -27,6 +27,7 @@ import ( "vitess.io/vitess/go/vt/vterrors" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) // VtctldClientProcess is a generic handle for a running vtctldclient command . @@ -97,6 +98,11 @@ func VtctldClientProcessInstance(hostname string, grpcPort int, tmpDirectory str return vtctldclient } +// ApplyRoutingRules applies the given routing rules. +func (vtctldclient *VtctldClientProcess) ApplyRoutingRules(json string) error { + return vtctldclient.ExecuteCommand("ApplyRoutingRules", "--rules", json) +} + type ApplySchemaParams struct { DDLStrategy string MigrationContext string @@ -200,6 +206,21 @@ func (vtctldclient *VtctldClientProcess) CreateKeyspace(keyspaceName string, sid return err } +// GetShard executes the vtctldclient command to get a shard, and parses the response. +func (vtctldclient *VtctldClientProcess) GetShard(keyspace string, shard string) (*vtctldatapb.Shard, error) { + data, err := vtctldclient.ExecuteCommandWithOutput("GetShard", fmt.Sprintf("%s/%s", keyspace, shard)) + if err != nil { + return nil, err + } + + var si vtctldatapb.Shard + err = json2.Unmarshal([]byte(data), &si) + if err != nil { + return nil, vterrors.Wrapf(err, "failed to parse shard output: %s", data) + } + return &si, nil +} + // GetTablet executes vtctldclient command to get a tablet, and parses the response. func (vtctldclient *VtctldClientProcess) GetTablet(alias string) (*topodatapb.Tablet, error) { data, err := vtctldclient.ExecuteCommandWithOutput("GetTablet", alias) diff --git a/go/test/endtoend/tabletmanager/tablet_health_test.go b/go/test/endtoend/tabletmanager/tablet_health_test.go index 7dc4bcd97d2..c8eb43b682f 100644 --- a/go/test/endtoend/tabletmanager/tablet_health_test.go +++ b/go/test/endtoend/tabletmanager/tablet_health_test.go @@ -30,7 +30,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" @@ -85,7 +84,7 @@ func TestTabletReshuffle(t *testing.T) { require.NoError(t, err) assertExcludeFields(t, string(result)) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Backup", rTablet.Alias) assert.Error(t, err, "cannot perform backup without my.cnf") killTablets(rTablet) @@ -114,7 +113,7 @@ func TestHealthCheck(t *testing.T) { require.NoError(t, err) defer conn.Close() - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) require.NoError(t, err) checkHealth(t, rTablet.HTTPPort, false) @@ -123,9 +122,9 @@ func TestHealthCheck(t *testing.T) { utils.Exec(t, conn, "stop slave") // stop replication, make sure we don't go unhealthy. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", rTablet.Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) require.NoError(t, err) // make sure the health stream is updated @@ -136,9 +135,9 @@ func TestHealthCheck(t *testing.T) { } // then restart replication, make sure we stay healthy - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", rTablet.Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) require.NoError(t, err) checkHealth(t, rTablet.HTTPPort, false) @@ -173,16 +172,16 @@ func TestHealthCheck(t *testing.T) { // On a MySQL restart, it comes up as a read-only tablet (check default.cnf file). // We have to explicitly set it to read-write otherwise heartbeat writer is unable // to write the heartbeats - err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", primaryTablet.Alias, "true") require.NoError(t, err) // explicitly start replication on all of the replicas to avoid any test flakiness as they were all // replicating from the primary instance - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", rTablet.Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", rdonlyTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", rdonlyTablet.Alias) require.NoError(t, err) time.Sleep(tabletHealthcheckRefreshInterval) @@ -348,11 +347,7 @@ func checkHealth(t *testing.T, port int, shouldError bool) { } func checkTabletType(t *testing.T, tabletAlias string, typeWant string) { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tabletAlias) - require.NoError(t, err) - - var tablet topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tablet) + tablet, err := clusterInstance.VtctldClientProcess.GetTablet(tabletAlias) require.NoError(t, err) actualType := tablet.GetType() @@ -398,16 +393,16 @@ func TestHealthCheckDrainedStateDoesNotShutdownQueryService(t *testing.T) { // Change from rdonly to drained and stop replication. The tablet will stay // healthy, and the query service is still running. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "drained") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "drained") require.NoError(t, err) // Trying to drain the same tablet again, should error - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "drained") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "drained") assert.Error(t, err, "already drained") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", rdonlyTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", rdonlyTablet.Alias) require.NoError(t, err) // Trigger healthcheck explicitly to avoid waiting for the next interval. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", rdonlyTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", rdonlyTablet.Alias) require.NoError(t, err) checkTabletType(t, rdonlyTablet.Alias, "DRAINED") @@ -417,11 +412,11 @@ func TestHealthCheckDrainedStateDoesNotShutdownQueryService(t *testing.T) { require.NoError(t, err) // Restart replication. Tablet will become healthy again. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "rdonly") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", rdonlyTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", rdonlyTablet.Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", rdonlyTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", rdonlyTablet.Alias) require.NoError(t, err) checkHealth(t, rdonlyTablet.HTTPPort, false) } @@ -434,7 +429,7 @@ func killTablets(tablets ...*cluster.Vttablet) { defer wg.Done() _ = tablet.VttabletProcess.TearDown() _ = tablet.MysqlctlProcess.Stop() - _ = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + _ = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", tablet.Alias) }(tablet) } wg.Wait() diff --git a/go/test/endtoend/tabletmanager/tablet_test.go b/go/test/endtoend/tabletmanager/tablet_test.go index 6212b4a418b..830502268d1 100644 --- a/go/test/endtoend/tabletmanager/tablet_test.go +++ b/go/test/endtoend/tabletmanager/tablet_test.go @@ -47,7 +47,7 @@ func TestEnsureDB(t *testing.T) { require.NoError(t, err) // Make it the primary. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented", tablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("TabletExternallyReparented", tablet.Alias) require.EqualError(t, err, "exit status 1") // It is still NOT_SERVING because the db is read-only. @@ -56,8 +56,8 @@ func TestEnsureDB(t *testing.T) { assert.Contains(t, status, "read-only") // Switch to read-write and verify that we go serving. - // Note: for TabletExternallyReparented, we expect SetReadWrite to be called by the user - err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", tablet.Alias) + // Note: for TabletExternallyReparented, we expect SetWritable to be called by the user + err = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", tablet.Alias, "true") require.NoError(t, err) err = tablet.VttabletProcess.WaitForTabletStatus("SERVING") require.NoError(t, err) diff --git a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go index 7c0f05bdcc2..9824f28ae2b 100644 --- a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go +++ b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go @@ -370,7 +370,7 @@ func TestLag(t *testing.T) { defer clusterInstance.EnableVTOrcRecoveries(t) t.Run("stopping replication", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replicaTablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", replicaTablet.Alias) assert.NoError(t, err) }) t.Run("accumulating lag, expecting throttler push back", func(t *testing.T) { @@ -415,7 +415,7 @@ func TestLag(t *testing.T) { }) t.Run("starting replication", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) assert.NoError(t, err) }) t.Run("expecting replication to catch up and throttler check to return OK", func(t *testing.T) { @@ -439,7 +439,7 @@ func TestLag(t *testing.T) { func TestNoReplicas(t *testing.T) { defer cluster.PanicHandler(t) t.Run("changing replica to RDONLY", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "RDONLY") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "RDONLY") assert.NoError(t, err) // This makes no REPLICA servers available. We expect something like: @@ -447,7 +447,7 @@ func TestNoReplicas(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) }) t.Run("restoring to REPLICA", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "REPLICA") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "REPLICA") assert.NoError(t, err) waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) diff --git a/go/test/endtoend/topoconncache/main_test.go b/go/test/endtoend/topoconncache/main_test.go index 4c17481ec84..26eb3918a0b 100644 --- a/go/test/endtoend/topoconncache/main_test.go +++ b/go/test/endtoend/topoconncache/main_test.go @@ -193,14 +193,14 @@ func TestMain(m *testing.M) { return 1, err } } - if err := clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shard1.Name, shard1Primary.Cell, shard1Primary.TabletUID); err != nil { + if err := clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shard1.Name, shard1Primary.Cell, shard1Primary.TabletUID); err != nil { return 1, err } // run a health check on source replica so it responds to discovery // (for binlog players) and on the source rdonlys (for workers) for _, tablet := range []string{shard1Replica.Alias, shard1Rdonly.Alias} { - if err := clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", tablet); err != nil { + if err := clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", tablet); err != nil { return 1, err } } @@ -211,7 +211,7 @@ func TestMain(m *testing.M) { } } - if err := clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shard2.Name, shard2Primary.Cell, shard2Primary.TabletUID); err != nil { + if err := clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shard2.Name, shard2Primary.Cell, shard2Primary.TabletUID); err != nil { return 1, err } @@ -219,14 +219,14 @@ func TestMain(m *testing.M) { return 1, err } - if err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, fmt.Sprintf(sqlSchema, tableName)); err != nil { + if err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, fmt.Sprintf(sqlSchema, tableName)); err != nil { return 1, err } - if err := clusterInstance.VtctlclientProcess.ApplyVSchema(keyspaceName, fmt.Sprintf(vSchema, tableName)); err != nil { + if err := clusterInstance.VtctldClientProcess.ApplyVSchema(keyspaceName, fmt.Sprintf(vSchema, tableName)); err != nil { return 1, err } - _ = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) + _ = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) return m.Run(), nil }() diff --git a/go/test/endtoend/topoconncache/topo_conn_cache_test.go b/go/test/endtoend/topoconncache/topo_conn_cache_test.go index 504ca218047..4ffcc309e29 100644 --- a/go/test/endtoend/topoconncache/topo_conn_cache_test.go +++ b/go/test/endtoend/topoconncache/topo_conn_cache_test.go @@ -74,7 +74,7 @@ func deleteCell(t *testing.T) { deleteTablet(t, shard2Rdonly) // Delete cell2 info from topo - res, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("DeleteCellInfo", "--", "--force", cell2) + res, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("DeleteCellInfo", "--force", cell2) t.Log(res) require.NoError(t, err) @@ -111,7 +111,7 @@ func deleteTablet(t *testing.T, tablet *cluster.Vttablet) { }(tablet) wg.Wait() - err := clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", tablet.Alias) require.NoError(t, err) } diff --git a/go/test/endtoend/vault/vault_test.go b/go/test/endtoend/vault/vault_test.go index 684a374707d..f8e19c07a0c 100644 --- a/go/test/endtoend/vault/vault_test.go +++ b/go/test/endtoend/vault/vault_test.go @@ -283,7 +283,7 @@ func initializeClusterLate(t *testing.T) { tablet.MysqlctlProcess.ExtraArgs = append(tablet.MysqlctlProcess.ExtraArgs, mysqlctlArg...) } - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID) require.NoError(t, err) err = clusterInstance.StartVTOrc(keyspaceName) diff --git a/go/test/endtoend/versionupgrade/upgrade_test.go b/go/test/endtoend/versionupgrade/upgrade_test.go index 87f7f9e8675..181b5dfc9ad 100644 --- a/go/test/endtoend/versionupgrade/upgrade_test.go +++ b/go/test/endtoend/versionupgrade/upgrade_test.go @@ -148,7 +148,7 @@ func TestDeploySchema(t *testing.T) { { sqlQuery := fmt.Sprintf(createTable, tableName) - result, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, cluster.VtctlClientParams{DDLStrategy: ""}) + result, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, cluster.ApplySchemaParams{DDLStrategy: ""}) require.Nil(t, err, result) } for i := range clusterInstance.Keyspaces[0].Shards { diff --git a/go/test/endtoend/vtgate/createdb_plugin/main_test.go b/go/test/endtoend/vtgate/createdb_plugin/main_test.go index e712fee7b36..5bfec3890b5 100644 --- a/go/test/endtoend/vtgate/createdb_plugin/main_test.go +++ b/go/test/endtoend/vtgate/createdb_plugin/main_test.go @@ -164,8 +164,8 @@ func shutdown(t *testing.T, ksName string) { } require.NoError(t, - clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteKeyspace", "--", "--recursive", ksName)) + clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteKeyspace", "--recursive", ksName)) require.NoError(t, - clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph")) + clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildVSchemaGraph")) } diff --git a/go/test/endtoend/vtgate/foreignkey/main_test.go b/go/test/endtoend/vtgate/foreignkey/main_test.go index 483c1d05e80..63590df9247 100644 --- a/go/test/endtoend/vtgate/foreignkey/main_test.go +++ b/go/test/endtoend/vtgate/foreignkey/main_test.go @@ -139,7 +139,7 @@ func TestMain(m *testing.M) { return 1 } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildVSchemaGraph") if err != nil { return 1 } diff --git a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go index 60eff73b820..341db836fd8 100644 --- a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go +++ b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go @@ -597,7 +597,7 @@ func ExecuteFKTest(t *testing.T, tcase *testCase) { artifacts := textutil.SplitDelimitedList(row.AsString("artifacts", "")) for _, artifact := range artifacts { t.Run(artifact, func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, "drop table if exists "+artifact) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, "drop table if exists "+artifact) require.NoError(t, err) }) } @@ -781,7 +781,7 @@ func createInitialSchema(t *testing.T, tcase *testCase) { t.Run("dropping tables", func(t *testing.T) { for _, tableName := range reverseTableNames { - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, "drop table if exists "+tableName) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, "drop table if exists "+tableName) require.NoError(t, err) } }) @@ -808,7 +808,7 @@ func createInitialSchema(t *testing.T, tcase *testCase) { } b.WriteString(";") } - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, b.String()) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, b.String()) require.NoError(t, err) }) if tcase.preStatement != "" { @@ -862,7 +862,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) + uuid, err = clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.ApplySchemaParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) diff --git a/go/test/endtoend/vtgate/gen4/main_test.go b/go/test/endtoend/vtgate/gen4/main_test.go index 378b2d2969e..4c94e8e2ec8 100644 --- a/go/test/endtoend/vtgate/gen4/main_test.go +++ b/go/test/endtoend/vtgate/gen4/main_test.go @@ -102,12 +102,12 @@ func TestMain(m *testing.M) { } // apply routing rules - err = clusterInstance.VtctlclientProcess.ApplyRoutingRules(routingRules) + err = clusterInstance.VtctldClientProcess.ApplyRoutingRules(routingRules) if err != nil { return 1 } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildVSchemaGraph") if err != nil { return 1 } diff --git a/go/test/endtoend/vtgate/main_test.go b/go/test/endtoend/vtgate/main_test.go index 12abcf4dd01..b276508f269 100644 --- a/go/test/endtoend/vtgate/main_test.go +++ b/go/test/endtoend/vtgate/main_test.go @@ -79,12 +79,12 @@ func TestMain(m *testing.M) { return 1 } - err = clusterInstance.VtctlclientProcess.ApplyRoutingRules(routingRules) + err = clusterInstance.VtctldClientProcess.ApplyRoutingRules(routingRules) if err != nil { return 1 } - _, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("RebuildVSchemaGraph") + _, err = clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("RebuildVSchemaGraph") if err != nil { return 1 } diff --git a/go/test/endtoend/vtgate/queries/informationschema/main_test.go b/go/test/endtoend/vtgate/queries/informationschema/main_test.go index 06c5b188d18..3696617281e 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/main_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/main_test.go @@ -78,12 +78,12 @@ func TestMain(m *testing.M) { return 1 } - err = clusterInstance.VtctlclientProcess.ApplyRoutingRules(routingRules) + err = clusterInstance.VtctldClientProcess.ApplyRoutingRules(routingRules) if err != nil { return 1 } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildVSchemaGraph") if err != nil { return 1 } diff --git a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go index c385941502a..9486dc194ff 100644 --- a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go +++ b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go @@ -74,7 +74,7 @@ func TestMain(m *testing.M) { return 1 } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildVSchemaGraph") if err != nil { return 1 } diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go index 11325a0f2f8..491ce6bc6ab 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go @@ -133,7 +133,7 @@ func TestServingChange(t *testing.T) { // changing rdonly tablet to spare (non serving). rdonlyTablet := clusterInstance.Keyspaces[0].Shards[0].Rdonly() - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "replica") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "replica") require.NoError(t, err) rdonlyTablet.Type = "replica" @@ -143,12 +143,12 @@ func TestServingChange(t *testing.T) { // changing replica tablet to rdonly to make rdonly available for serving. replicaTablet := clusterInstance.Keyspaces[0].Shards[0].Replica() - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "rdonly") require.NoError(t, err) replicaTablet.Type = "rdonly" // to see/make the new rdonly available - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Ping", replicaTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("PingTablet", replicaTablet.Alias) require.NoError(t, err) // this should pass now as there is rdonly present @@ -174,7 +174,7 @@ func TestServingChangeStreaming(t *testing.T) { // changing rdonly tablet to spare (non serving). rdonlyTablet := clusterInstance.Keyspaces[0].Shards[0].Rdonly() - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "replica") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "replica") require.NoError(t, err) rdonlyTablet.Type = "replica" @@ -192,12 +192,12 @@ func TestServingChangeStreaming(t *testing.T) { // changing replica tablet to rdonly to make rdonly available for serving. replicaTablet := clusterInstance.Keyspaces[0].Shards[0].Replica() - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "rdonly") require.NoError(t, err) replicaTablet.Type = "rdonly" // to see/make the new rdonly available - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Ping", replicaTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("PingTablet", replicaTablet.Alias) require.NoError(t, err) // this should pass now as there is rdonly present diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go index b66bb15dbd5..a448574c282 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go @@ -129,7 +129,7 @@ func TestTabletChange(t *testing.T) { utils.Exec(t, conn, "select * from test") // Change Primary - err = clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, "-80")) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", fmt.Sprintf("%s/%s", keyspaceName, "-80")) require.NoError(t, err) // this should pass as there is a new primary tablet and is serving. @@ -150,7 +150,7 @@ func TestTabletChangeStreaming(t *testing.T) { utils.Exec(t, conn, "select * from test") // Change Primary - err = clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, "-80")) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", fmt.Sprintf("%s/%s", keyspaceName, "-80")) require.NoError(t, err) // this should pass as there is a new primary tablet and is serving. diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go index 25af85acc00..677c24666b2 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go @@ -102,7 +102,7 @@ func TestMysqlDownServingChange(t *testing.T) { require.NoError(t, primaryTablet.MysqlctlProcess.Stop()) require.NoError(t, - clusterInstance.VtctlclientProcess.ExecuteCommand("EmergencyReparentShard", "--", "--keyspace_shard", "ks/0")) + clusterInstance.VtctldClientProcess.ExecuteCommand("EmergencyReparentShard", "ks/0")) // This should work without any error. _ = utils.Exec(t, conn, "select /*vt+ PLANNER=gen4 */ * from test") diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go index 28367cd597a..1dc53a89506 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go @@ -104,7 +104,7 @@ func TestVttabletDownServingChange(t *testing.T) { // kill vttablet process _ = primaryTablet.VttabletProcess.TearDown() require.NoError(t, - clusterInstance.VtctlclientProcess.ExecuteCommand("EmergencyReparentShard", "--", "--keyspace_shard", "ks/0")) + clusterInstance.VtctldClientProcess.ExecuteCommand("EmergencyReparentShard", "ks/0")) // This should work without any error. _ = utils.Exec(t, conn, "select /*vt+ PLANNER=gen4 */ * from test") diff --git a/go/test/endtoend/vtgate/schema/schema_test.go b/go/test/endtoend/vtgate/schema/schema_test.go index 04d91d8d978..14b6c13034e 100644 --- a/go/test/endtoend/vtgate/schema/schema_test.go +++ b/go/test/endtoend/vtgate/schema/schema_test.go @@ -120,7 +120,7 @@ func testWithInitialSchema(t *testing.T) { var sqlQuery = "" // nolint for i := 0; i < totalTableCount; i++ { sqlQuery = fmt.Sprintf(createTable, fmt.Sprintf("vt_select_test_%02d", i)) - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sqlQuery) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, sqlQuery) require.Nil(t, err) } @@ -135,7 +135,7 @@ func testWithInitialSchema(t *testing.T) { // testWithAlterSchema if we alter schema and then apply, the resultant schema should match across shards func testWithAlterSchema(t *testing.T) { sqlQuery := fmt.Sprintf(alterTable, fmt.Sprintf("vt_select_test_%02d", 3), "msg") - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sqlQuery) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, sqlQuery) require.Nil(t, err) matchSchema(t, clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.TabletPath, clusterInstance.Keyspaces[0].Shards[1].Vttablets[0].VttabletProcess.TabletPath) } @@ -143,7 +143,7 @@ func testWithAlterSchema(t *testing.T) { // testWithAlterDatabase tests that ALTER DATABASE is accepted by the validator. func testWithAlterDatabase(t *testing.T) { sql := "create database alter_database_test; alter database alter_database_test default character set = utf8mb4; drop database alter_database_test" - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sql) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, sql) assert.NoError(t, err) } @@ -157,7 +157,7 @@ func testWithAlterDatabase(t *testing.T) { // See: https://github.com/vitessio/vitess/issues/1731#issuecomment-222914389 func testWithDropCreateSchema(t *testing.T) { dropCreateTable := fmt.Sprintf("DROP TABLE vt_select_test_%02d ;", 2) + fmt.Sprintf(createTable, fmt.Sprintf("vt_select_test_%02d", 2)) - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, dropCreateTable) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, dropCreateTable) require.NoError(t, err) checkTables(t, totalTableCount) } @@ -186,10 +186,10 @@ func testWithAutoSchemaFromChangeDir(t *testing.T) { // matchSchema schema for supplied tablets should match func matchSchema(t *testing.T, firstTablet string, secondTablet string) { - firstShardSchema, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetSchema", firstTablet) + firstShardSchema, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetSchema", firstTablet) require.Nil(t, err) - secondShardSchema, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetSchema", secondTablet) + secondShardSchema, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetSchema", secondTablet) require.Nil(t, err) assert.Equal(t, firstShardSchema, secondShardSchema) @@ -203,12 +203,12 @@ func matchSchema(t *testing.T, firstTablet string, secondTablet string) { // is the MySQL behavior the user expects. func testDropNonExistentTables(t *testing.T) { dropNonExistentTable := "DROP TABLE nonexistent_table;" - output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", dropNonExistentTable, keyspaceName) + output, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--sql", dropNonExistentTable, keyspaceName) require.Error(t, err) assert.True(t, strings.Contains(output, "Unknown table")) dropIfExists := "DROP TABLE IF EXISTS nonexistent_table;" - err = clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, dropIfExists) + err = clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, dropIfExists) require.Nil(t, err) checkTables(t, totalTableCount) @@ -219,7 +219,7 @@ func testDropNonExistentTables(t *testing.T) { func testCreateInvalidView(t *testing.T) { for _, ddlStrategy := range []string{"direct", "direct -allow-zero-in-date"} { createInvalidView := "CREATE OR REPLACE VIEW invalid_view AS SELECT * FROM nonexistent_table;" - output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--ddl_strategy", ddlStrategy, "--sql", createInvalidView, keyspaceName) + output, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--ddl-strategy", ddlStrategy, "--sql", createInvalidView, keyspaceName) require.Error(t, err) assert.Contains(t, output, "doesn't exist (errno 1146)") } @@ -228,25 +228,25 @@ func testCreateInvalidView(t *testing.T) { func testApplySchemaBatch(t *testing.T) { { sqls := "create table batch1(id int primary key);create table batch2(id int primary key);create table batch3(id int primary key);create table batch4(id int primary key);create table batch5(id int primary key);" - _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", sqls, "--batch_size", "2", keyspaceName) + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--sql", sqls, "--batch-size", "2", keyspaceName) require.NoError(t, err) checkTables(t, totalTableCount+5) } { sqls := "drop table batch1; drop table batch2; drop table batch3; drop table batch4; drop table batch5" - _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", sqls, keyspaceName) + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--sql", sqls, keyspaceName) require.NoError(t, err) checkTables(t, totalTableCount) } { sqls := "create table batch1(id int primary key);create table batch2(id int primary key);create table batch3(id int primary key);create table batch4(id int primary key);create table batch5(id int primary key);" - _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--ddl_strategy", "direct --allow-zero-in-date", "--sql", sqls, "--batch_size", "2", keyspaceName) + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--ddl-strategy", "direct --allow-zero-in-date", "--sql", sqls, "--batch-size", "2", keyspaceName) require.NoError(t, err) checkTables(t, totalTableCount+5) } { sqls := "drop table batch1; drop table batch2; drop table batch3; drop table batch4; drop table batch5" - _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", sqls, keyspaceName) + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--sql", sqls, keyspaceName) require.NoError(t, err) checkTables(t, totalTableCount) } @@ -291,7 +291,7 @@ func testCopySchemaShardWithDifferentDB(t *testing.T, shard int) { source := fmt.Sprintf("%s/0", keyspaceName) tabletAlias := clusterInstance.Keyspaces[0].Shards[shard].Vttablets[0].VttabletProcess.TabletPath - schema, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetSchema", tabletAlias) + schema, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetSchema", tabletAlias) require.Nil(t, err) resultMap := make(map[string]any) @@ -305,7 +305,7 @@ func testCopySchemaShardWithDifferentDB(t *testing.T, shard int) { // (The different charset won't be corrected on the destination shard // because we use "CREATE DATABASE IF NOT EXISTS" and this doesn't fail if // there are differences in the options e.g. the character set.) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", "--", "--json", tabletAlias, "ALTER DATABASE vt_ks CHARACTER SET latin1") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", "--json", tabletAlias, "ALTER DATABASE vt_ks CHARACTER SET latin1") require.Nil(t, err) output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("CopySchemaShard", source, fmt.Sprintf("%s/%d", keyspaceName, shard)) diff --git a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go index 6ff8e69bb52..09bd97eb9fe 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go @@ -181,7 +181,7 @@ func TestMain(m *testing.M) { // This is supposed to change the primary tablet in the shards, meaning that a different tablet // will be responsible for sending schema tracking updates. for _, shard := range clusterInstance.Keyspaces[0].Shards { - err := clusterInstance.VtctlclientProcess.InitializeShard(KeyspaceName, shard.Name, Cell, shard.Vttablets[1].TabletUID) + err := clusterInstance.VtctldClientProcess.InitializeShard(KeyspaceName, shard.Name, Cell, shard.Vttablets[1].TabletUID) if err != nil { fmt.Println(err) return 1 diff --git a/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go index dbc46bdda77..d6357ce8f2a 100644 --- a/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go +++ b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go @@ -145,11 +145,11 @@ func TestHealthCheckExternallyReparentNewTablet(t *testing.T) { tablet := addTablet(t, reparentTabletUID, reparentTabletType) // promote the new tablet to the primary - err = clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented", tablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("TabletExternallyReparented", tablet.Alias) require.NoError(t, err) // update the new primary tablet to be read-write - err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", tablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", tablet.Alias, "true") require.NoError(t, err) // wait for the vtgate to finish updating the new primary tablet @@ -236,7 +236,7 @@ func deleteTablet(t *testing.T, tablet *cluster.Vttablet) { }(tablet) wg.Wait() - err := clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", tablet.Alias) require.NoError(t, err) t.Logf("Deleted tablet: %s", tablet.Alias) diff --git a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go index 9386c307a12..50529d9fdf9 100644 --- a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go +++ b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go @@ -234,7 +234,7 @@ func deleteTablet(t *testing.T, tablet *cluster.Vttablet) { }(tablet) wg.Wait() - err := clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", tablet.Alias) require.Nil(t, err) t.Logf("Deleted tablet: %s", tablet.Alias) diff --git a/go/test/endtoend/vtorc/api/api_test.go b/go/test/endtoend/vtorc/api/api_test.go index 7dd5c50eefa..7b277fd7a0f 100644 --- a/go/test/endtoend/vtorc/api/api_test.go +++ b/go/test/endtoend/vtorc/api/api_test.go @@ -106,7 +106,7 @@ func TestAPIEndpoints(t *testing.T) { t.Run("Replication Analysis API", func(t *testing.T) { // use vtctlclient to stop replication - _, err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) + _, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) require.NoError(t, err) // We know VTOrc won't fix this since we disabled global recoveries! diff --git a/go/test/endtoend/vtorc/general/vtorc_test.go b/go/test/endtoend/vtorc/general/vtorc_test.go index f7deffe20b3..d79e2964f3e 100644 --- a/go/test/endtoend/vtorc/general/vtorc_test.go +++ b/go/test/endtoend/vtorc/general/vtorc_test.go @@ -163,7 +163,7 @@ func TestVTOrcRepairs(t *testing.T) { t.Run("StopReplication", func(t *testing.T) { // use vtctlclient to stop replication - _, err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) + _, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) require.NoError(t, err) // check replication is setup correctly @@ -300,7 +300,7 @@ func TestRepairAfterTER(t *testing.T) { } // TER to other tablet - _, err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("TabletExternallyReparented", newPrimary.Alias) + _, err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("TabletExternallyReparented", newPrimary.Alias) require.NoError(t, err) utils.CheckReplication(t, clusterInfo, newPrimary, []*cluster.Vttablet{curPrimary}, 15*time.Second) @@ -404,11 +404,11 @@ func TestVTOrcWithPrs(t *testing.T) { // check that the replication is setup correctly before we failover utils.CheckReplication(t, clusterInfo, curPrimary, shard0.Vttablets, 10*time.Second) - output, err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( - "PlannedReparentShard", "--", - "--keyspace_shard", fmt.Sprintf("%s/%s", keyspace.Name, shard0.Name), - "--wait_replicas_timeout", "31s", - "--new_primary", replica.Alias) + output, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput( + "PlannedReparentShard", + fmt.Sprintf("%s/%s", keyspace.Name, shard0.Name), + "--wait-replicas-timeout", "31s", + "--new-primary", replica.Alias) require.NoError(t, err, "error in PlannedReparentShard output - %s", output) time.Sleep(40 * time.Second) diff --git a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go index e226e8d13ae..d91dadddcb4 100644 --- a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go @@ -113,7 +113,7 @@ func TestDownPrimaryBeforeVTOrc(t *testing.T) { curPrimary := shard0.Vttablets[0] // Promote the first tablet as the primary - err := clusterInfo.ClusterInstance.VtctlclientProcess.InitializeShard(keyspace.Name, shard0.Name, clusterInfo.ClusterInstance.Cell, curPrimary.TabletUID) + err := clusterInfo.ClusterInstance.VtctldClientProcess.InitializeShard(keyspace.Name, shard0.Name, clusterInfo.ClusterInstance.Cell, curPrimary.TabletUID) require.NoError(t, err) // find the replica and rdonly tablets @@ -442,9 +442,9 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) { utils.DisableGlobalRecoveries(t, clusterInfo.ClusterInstance.VTOrcProcesses[0]) // stop replication on the replica and rdonly. - err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replica.Alias) + err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", replica.Alias) require.NoError(t, err) - err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", rdonly.Alias) + err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", rdonly.Alias) require.NoError(t, err) // check that aheadRdonly is able to replicate. We also want to add some queries to aheadRdonly which will not be there in replica and rdonly @@ -669,7 +669,7 @@ func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { utils.DisableGlobalRecoveries(t, clusterInfo.ClusterInstance.VTOrcProcesses[0]) // stop replication on the crossCellReplica. - err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", crossCellReplica.Alias) + err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", crossCellReplica.Alias) require.NoError(t, err) // check that rdonly and replica are able to replicate. We also want to add some queries to replica which will not be there in crossCellReplica @@ -679,7 +679,7 @@ func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { utils.ResetPrimaryLogs(t, curPrimary) // start replication back on the crossCellReplica. - err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", crossCellReplica.Alias) + err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", crossCellReplica.Alias) require.NoError(t, err) // enable recoveries back on vtorc so that it can repair @@ -750,7 +750,7 @@ func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) { utils.DisableGlobalRecoveries(t, clusterInfo.ClusterInstance.VTOrcProcesses[0]) // stop replication on the replica. - err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replica.Alias) + err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", replica.Alias) require.NoError(t, err) // check that rdonly and crossCellReplica are able to replicate. We also want to add some queries to crossCenterReplica which will not be there in replica @@ -760,7 +760,7 @@ func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) { utils.ResetPrimaryLogs(t, curPrimary) // start replication back on the replica. - err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replica.Alias) + err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", replica.Alias) require.NoError(t, err) // enable recoveries back on vtorc so that it can repair diff --git a/go/test/endtoend/vtorc/utils/utils.go b/go/test/endtoend/vtorc/utils/utils.go index 11294319658..dca2c7b1e26 100644 --- a/go/test/endtoend/vtorc/utils/utils.go +++ b/go/test/endtoend/vtorc/utils/utils.go @@ -33,7 +33,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" @@ -206,7 +205,7 @@ func shutdownVttablets(clusterInfo *VTOrcClusterInfo) error { // Remove the tablet record for this tablet } // Ignoring error here because some tests delete tablets themselves. - _ = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", vttablet.Alias) + _ = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", vttablet.Alias) } clusterInfo.ClusterInstance.Keyspaces[0].Shards[0].Vttablets = nil return nil @@ -352,19 +351,16 @@ func ShardPrimaryTablet(t *testing.T, clusterInfo *VTOrcClusterInfo, keyspace *c if now.Sub(start) > time.Second*60 { assert.FailNow(t, "failed to elect primary before timeout") } - result, err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShard", fmt.Sprintf("%s/%s", keyspace.Name, shard.Name)) - assert.Nil(t, err) + si, err := clusterInfo.ClusterInstance.VtctldClientProcess.GetShard(keyspace.Name, shard.Name) + require.NoError(t, err) - var shardInfo topodatapb.Shard - err = json2.Unmarshal([]byte(result), &shardInfo) - assert.Nil(t, err) - if shardInfo.PrimaryAlias == nil { + if si.Shard.PrimaryAlias == nil { log.Warningf("Shard %v/%v has no primary yet, sleep for 1 second\n", keyspace.Name, shard.Name) time.Sleep(time.Second) continue } for _, tablet := range shard.Vttablets { - if tablet.Alias == topoproto.TabletAliasString(shardInfo.PrimaryAlias) { + if tablet.Alias == topoproto.TabletAliasString(si.Shard.PrimaryAlias) { return tablet } } @@ -381,12 +377,8 @@ func CheckPrimaryTablet(t *testing.T, clusterInfo *VTOrcClusterInfo, tablet *clu //log.Exitf("error") assert.FailNow(t, "failed to elect primary before timeout") } - result, err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias) - require.NoError(t, err) - var tabletInfo topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tabletInfo) + tabletInfo, err := clusterInfo.ClusterInstance.VtctldClientProcess.GetTablet(tablet.Alias) require.NoError(t, err) - if topodatapb.TabletType_PRIMARY != tabletInfo.GetType() { log.Warningf("Tablet %v is not primary yet, sleep for 1 second\n", tablet.Alias) time.Sleep(time.Second) @@ -535,9 +527,9 @@ func validateTopology(t *testing.T, clusterInfo *VTOrcClusterInfo, pingTablets b var err error var output string if pingTablets { - output, err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate", "--", "--ping-tablets=true") + output, err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("Validate", "--ping-tablets") } else { - output, err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate") + output, err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("Validate") } if err != nil { log.Warningf("Validate failed, retrying, output - %s", output) From e653d822082c1c267e1a31be5cf898f74915d11b Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Sat, 17 Feb 2024 13:57:40 -0500 Subject: [PATCH 12/17] fix validation assertion to match new output Signed-off-by: Andrew Mason --- go/test/endtoend/reparent/utils/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/test/endtoend/reparent/utils/utils.go b/go/test/endtoend/reparent/utils/utils.go index a648b0174d3..1288c84c1d3 100644 --- a/go/test/endtoend/reparent/utils/utils.go +++ b/go/test/endtoend/reparent/utils/utils.go @@ -376,7 +376,7 @@ func ValidateTopology(t *testing.T, clusterInstance *cluster.LocalProcessCluster args = append(args, "--ping-tablets") } out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput(args...) - require.Empty(t, out) + require.Contains(t, out, "no issues found") require.NoError(t, err) } From ecd73a31d8ead2374e8b4c3e63da42cb4fcbaeb0 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Sat, 17 Feb 2024 16:13:02 -0500 Subject: [PATCH 13/17] whoops Signed-off-by: Andrew Mason --- go/test/endtoend/reparent/utils/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/test/endtoend/reparent/utils/utils.go b/go/test/endtoend/reparent/utils/utils.go index 1288c84c1d3..790fd0028e2 100644 --- a/go/test/endtoend/reparent/utils/utils.go +++ b/go/test/endtoend/reparent/utils/utils.go @@ -332,7 +332,7 @@ func Ers(clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet, to func ErsIgnoreTablet(clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet, timeout, waitReplicasTimeout string, tabletsToIgnore []*cluster.Vttablet, preventCrossCellPromotion bool) (string, error) { var args []string if timeout != "" { - args = append(args, "--action-timeout", timeout) + args = append(args, "--action_timeout", timeout) } args = append(args, "EmergencyReparentShard", fmt.Sprintf("%s/%s", KeyspaceName, ShardName)) if tab != nil { From 6f922388ebbd9c984010bcc30acaadae188f9ca4 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Sun, 18 Feb 2024 11:52:16 -0500 Subject: [PATCH 14/17] replace GetKeyspace Signed-off-by: Andrew Mason --- .../endtoend/cluster/vtctldclient_process.go | 15 ++++++++++++++ go/test/endtoend/keyspace/keyspace_test.go | 20 ++++++------------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/go/test/endtoend/cluster/vtctldclient_process.go b/go/test/endtoend/cluster/vtctldclient_process.go index 88f7490d89c..6d6b64c15d1 100644 --- a/go/test/endtoend/cluster/vtctldclient_process.go +++ b/go/test/endtoend/cluster/vtctldclient_process.go @@ -206,6 +206,21 @@ func (vtctldclient *VtctldClientProcess) CreateKeyspace(keyspaceName string, sid return err } +// GetKeyspace executes the vtctldclient command to get a shard, and parses the response. +func (vtctldclient *VtctldClientProcess) GetKeyspace(keyspace string) (*vtctldatapb.Keyspace, error) { + data, err := vtctldclient.ExecuteCommandWithOutput("GetKeyspace", keyspace) + if err != nil { + return nil, err + } + + var ks vtctldatapb.Keyspace + err = json2.Unmarshal([]byte(data), &ks) + if err != nil { + return nil, vterrors.Wrapf(err, "failed to parse keyspace output: %s", data) + } + return &ks, nil +} + // GetShard executes the vtctldclient command to get a shard, and parses the response. func (vtctldclient *VtctldClientProcess) GetShard(keyspace string, shard string) (*vtctldatapb.Shard, error) { data, err := vtctldclient.ExecuteCommandWithOutput("GetShard", fmt.Sprintf("%s/%s", keyspace, shard)) diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index 8ac7759feb7..4a84ec928df 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -152,26 +152,23 @@ func TestDurabilityPolicyField(t *testing.T) { out, err = vtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability") require.NoError(t, err, out) - out, err = clusterForKSTest.VtctlProcess.ExecuteCommandWithOutput("CreateKeyspace", "--", "--durability-policy=semi_sync", "ks_durability") + out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", "--durability-policy=semi_sync", "ks_durability") require.NoError(t, err, out) checkDurabilityPolicy(t, "semi_sync") - out, err = clusterForKSTest.VtctlProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability") + out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability") require.NoError(t, err, out) } func checkDurabilityPolicy(t *testing.T, durabilityPolicy string) { - var keyspace topodata.Keyspace - out, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetKeyspace", "ks_durability") - require.NoError(t, err, out) - err = json.Unmarshal([]byte(out), &keyspace) + ks, err := clusterForKSTest.VtctldClientProcess.GetKeyspace("ks_durability") require.NoError(t, err) - require.Equal(t, keyspace.DurabilityPolicy, durabilityPolicy) + require.Equal(t, ks.Keyspace.DurabilityPolicy, durabilityPolicy) } func TestGetSrvKeyspaceNames(t *testing.T) { defer cluster.PanicHandler(t) - output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell) + output, err := clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell) require.Nil(t, err) assert.Contains(t, strings.Split(output, "\n"), keyspaceUnshardedName) assert.Contains(t, strings.Split(output, "\n"), keyspaceShardedName) @@ -218,12 +215,7 @@ func TestShardNames(t *testing.T) { func TestGetKeyspace(t *testing.T) { defer cluster.PanicHandler(t) - output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetKeyspace", keyspaceUnshardedName) - require.Nil(t, err) - - var keyspace topodata.Keyspace - - err = json.Unmarshal([]byte(output), &keyspace) + _, err := clusterForKSTest.VtctldClientProcess.GetKeyspace(keyspaceUnshardedName) require.Nil(t, err) } From 22aa6894102e2166ff3b8be98fffa35e70bb8fd5 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Mon, 19 Feb 2024 12:17:47 -0500 Subject: [PATCH 15/17] ugh Signed-off-by: Andrew Mason --- go/test/endtoend/tabletmanager/commands_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/test/endtoend/tabletmanager/commands_test.go b/go/test/endtoend/tabletmanager/commands_test.go index 3967b36a1d3..537a3b9d0fc 100644 --- a/go/test/endtoend/tabletmanager/commands_test.go +++ b/go/test/endtoend/tabletmanager/commands_test.go @@ -148,7 +148,7 @@ func TestActionAndTimeout(t *testing.T) { time.Sleep(1 * time.Second) // try a frontend RefreshState that should timeout as the tablet is busy running the other one - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshState", primaryTablet.Alias, "--wait-timeout", "2s") + err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshState", primaryTablet.Alias, "--wait_timeout", "2s") assert.Error(t, err, "timeout as tablet is in Sleep") } From feeb7f99da6d809f990fb273dac7a5ef52791f11 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Tue, 20 Feb 2024 08:54:29 -0500 Subject: [PATCH 16/17] fix GetSrvKeyspaceNames test Signed-off-by: Andrew Mason --- go/test/endtoend/keyspace/keyspace_test.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index 88f6868cf93..9578b816778 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -21,10 +21,10 @@ import ( "encoding/json" "flag" "os" - "strings" "testing" "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/json2" "vitess.io/vitess/go/vt/key" "github.com/stretchr/testify/assert" @@ -32,6 +32,7 @@ import ( "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) var ( @@ -168,10 +169,15 @@ func checkDurabilityPolicy(t *testing.T, durabilityPolicy string) { func TestGetSrvKeyspaceNames(t *testing.T) { defer cluster.PanicHandler(t) - output, err := clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell) + data, err := clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell) require.Nil(t, err) - assert.Contains(t, strings.Split(output, "\n"), keyspaceUnshardedName) - assert.Contains(t, strings.Split(output, "\n"), keyspaceShardedName) + + var namesByCell = map[string]*vtctldatapb.GetSrvKeyspaceNamesResponse_NameList{} + err = json2.Unmarshal([]byte(data), &namesByCell) + require.NoError(t, err) + + assert.Contains(t, namesByCell[cell].Names, keyspaceUnshardedName) + assert.Contains(t, namesByCell[cell].Names, keyspaceShardedName) } func TestGetSrvKeyspacePartitions(t *testing.T) { From de1ff633b24076996a9a8f3d3d5f49e7efc6aae8 Mon Sep 17 00:00:00 2001 From: Andrew Mason Date: Tue, 20 Feb 2024 08:55:26 -0500 Subject: [PATCH 17/17] tidy imports Signed-off-by: Andrew Mason --- go/test/endtoend/keyspace/keyspace_test.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index 9578b816778..7f7d4198135 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -23,15 +23,15 @@ import ( "os" "testing" - "vitess.io/vitess/go/constants/sidecar" - "vitess.io/vitess/go/json2" - "vitess.io/vitess/go/vt/key" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/json2" "vitess.io/vitess/go/test/endtoend/cluster" - "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/key" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) @@ -42,7 +42,7 @@ var ( cell = "zone1" cell2 = "zone2" hostname = "localhost" - servedTypes = map[topodata.TabletType]bool{topodata.TabletType_PRIMARY: true, topodata.TabletType_REPLICA: true, topodata.TabletType_RDONLY: true} + servedTypes = map[topodatapb.TabletType]bool{topodatapb.TabletType_PRIMARY: true, topodatapb.TabletType_REPLICA: true, topodatapb.TabletType_RDONLY: true} sqlSchema = `create table vt_insert_test ( id bigint auto_increment, msg varchar(64), @@ -213,7 +213,7 @@ func TestShardNames(t *testing.T) { defer cluster.PanicHandler(t) output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvKeyspace", cell, keyspaceShardedName) require.Nil(t, err) - var srvKeyspace topodata.SrvKeyspace + var srvKeyspace topodatapb.SrvKeyspace err = json.Unmarshal([]byte(output), &srvKeyspace) require.Nil(t, err) @@ -388,7 +388,7 @@ func TestKeyspaceToShardName(t *testing.T) { // for each served type PRIMARY REPLICA RDONLY, the shard ref count should match for _, partition := range srvKeyspace.Partitions { - if partition.ServedType == topodata.TabletType_PRIMARY { + if partition.ServedType == topodatapb.TabletType_PRIMARY { for _, shardRef := range partition.ShardReferences { shardKIDs := shardKIdMap[shardRef.Name] for _, kid := range shardKIDs { @@ -403,7 +403,7 @@ func TestKeyspaceToShardName(t *testing.T) { srvKeyspace = getSrvKeyspace(t, cell, keyspaceUnshardedName) for _, partition := range srvKeyspace.Partitions { - if partition.ServedType == topodata.TabletType_PRIMARY { + if partition.ServedType == topodatapb.TabletType_PRIMARY { for _, shardRef := range partition.ShardReferences { assert.Equal(t, shardRef.Name, keyspaceUnshardedName) } @@ -418,10 +418,10 @@ func packKeyspaceID(keyspaceID uint64) []byte { return (keybytes[:]) } -func getSrvKeyspace(t *testing.T, cell string, ksname string) *topodata.SrvKeyspace { +func getSrvKeyspace(t *testing.T, cell string, ksname string) *topodatapb.SrvKeyspace { output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvKeyspace", cell, ksname) require.Nil(t, err) - var srvKeyspace topodata.SrvKeyspace + var srvKeyspace topodatapb.SrvKeyspace err = json.Unmarshal([]byte(output), &srvKeyspace) require.Nil(t, err)