diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go index 69c12d4376c..3f5389d2726 100644 --- a/go/test/endtoend/backup/vtbackup/backup_only_test.go +++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go @@ -83,10 +83,10 @@ func TestTabletInitialBackup(t *testing.T) { restore(t, primary, "replica", "NOT_SERVING") // Vitess expects that the user has set the database into ReadWrite mode before calling // TabletExternallyReparented - err = localCluster.VtctlclientProcess.ExecuteCommand( - "SetReadWrite", primary.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand( + "SetWritable", primary.Alias, "true") require.Nil(t, err) - err = localCluster.VtctlclientProcess.ExecuteCommand( + err = localCluster.VtctldClientProcess.ExecuteCommand( "TabletExternallyReparented", primary.Alias) require.Nil(t, err) restore(t, replica1, "replica", "SERVING") @@ -277,7 +277,7 @@ func initTablets(t *testing.T, startTablet bool, initShardPrimary bool) { if initShardPrimary { // choose primary and start replication - err := localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) + err := localCluster.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) require.Nil(t, err) } } @@ -339,7 +339,7 @@ func tearDown(t *testing.T, initMysql bool) { for _, tablet := range []cluster.Vttablet{*primary, *replica1, *replica2} { resetTabletDirectory(t, tablet, initMysql) // DeleteTablet on a primary will cause tablet to shutdown, so should only call it after tablet is already shut down - err := localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary", tablet.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", tablet.Alias) require.Nil(t, err) } } diff --git a/go/test/endtoend/backup/vtctlbackup/backup_utils.go b/go/test/endtoend/backup/vtctlbackup/backup_utils.go index a70d1804028..c20ab70e652 100644 --- a/go/test/endtoend/backup/vtctlbackup/backup_utils.go +++ b/go/test/endtoend/backup/vtctlbackup/backup_utils.go @@ -257,7 +257,7 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp return replica3, nil } - if err := localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil { + if err := localCluster.VtctldClientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil { return 1, err } @@ -455,7 +455,7 @@ func primaryBackup(t *testing.T) { localCluster.VerifyBackupCount(t, shardKsName, 0) - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", "--", "--allow_primary=true", primary.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", primary.Alias) require.Nil(t, err) // We'll restore this on the primary later to test restores using a backup timestamp @@ -475,7 +475,7 @@ func primaryBackup(t *testing.T) { // And only 1 record after we restore using the first backup timestamp cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", "--", "--allow_primary=true", primary.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", primary.Alias) require.Nil(t, err) backups = localCluster.VerifyBackupCount(t, shardKsName, 2) @@ -485,26 +485,25 @@ func primaryBackup(t *testing.T) { // Perform PRS to demote the primary tablet (primary) so that we can do a restore there and verify we don't have the // data from after the older/first backup - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica2.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica2.Alias, shardKsName) require.Nil(t, err) // Delete the current primary tablet (replica2) so that the original primary tablet (primary) can be restored from the // older/first backup w/o it replicating the subsequent insert done after the first backup was taken - err = localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary=true", replica2.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", replica2.Alias) require.Nil(t, err) err = replica2.VttabletProcess.TearDown() require.Nil(t, err) // Restore the older/first backup -- using the timestamp we saved -- on the original primary tablet (primary) - err = localCluster.VtctlclientProcess.ExecuteCommand("RestoreFromBackup", "--", "--backup_timestamp", firstBackupTimestamp, primary.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", "--backup-timestamp", firstBackupTimestamp, primary.Alias) require.Nil(t, err) verifyTabletRestoreStats(t, primary.VttabletProcess.GetVars()) // Re-init the shard -- making the original primary tablet (primary) primary again -- for subsequent tests - err = localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) + err = localCluster.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) require.Nil(t, err) // Verify that we don't have the record created after the older/first backup @@ -526,7 +525,7 @@ func primaryReplicaSameBackup(t *testing.T) { verifyInitialReplication(t) // backup the replica - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) @@ -544,9 +543,8 @@ func primaryReplicaSameBackup(t *testing.T) { cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) // Promote replica2 to primary - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica2.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica2.Alias, shardKsName) require.Nil(t, err) // insert more data on replica2 (current primary) @@ -564,7 +562,7 @@ func primaryReplicaSameBackup(t *testing.T) { // It is written into the MANIFEST and read back from the MANIFEST. // // Take another backup on the replica. - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) // Insert more data on replica2 (current primary). @@ -594,7 +592,7 @@ func primaryReplicaSameBackupModifiedCompressionEngine(t *testing.T) { time.Sleep(5 * time.Second) // backup the replica - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) @@ -619,9 +617,8 @@ func primaryReplicaSameBackupModifiedCompressionEngine(t *testing.T) { cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) // Promote replica2 to primary - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica2.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica2.Alias, shardKsName) require.Nil(t, err) // insert more data on replica2 (current primary) @@ -635,9 +632,8 @@ func primaryReplicaSameBackupModifiedCompressionEngine(t *testing.T) { cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 3) // Promote replica1 to primary - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica1.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica1.Alias, shardKsName) require.Nil(t, err) // Insert more data on replica1 (current primary). @@ -648,7 +644,7 @@ func primaryReplicaSameBackupModifiedCompressionEngine(t *testing.T) { cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 4) // Now take replica2 backup with gzip (new compressor) - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica2.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica2.Alias) require.Nil(t, err) verifyTabletBackupStats(t, replica2.VttabletProcess.GetVars()) @@ -688,7 +684,7 @@ func testRestoreOldPrimary(t *testing.T, method restoreMethod) { time.Sleep(5 * time.Second) // backup the replica - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) @@ -698,9 +694,8 @@ func testRestoreOldPrimary(t *testing.T, method restoreMethod) { require.Nil(t, err) // reparent to replica1 - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica1.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica1.Alias, shardKsName) require.Nil(t, err) // insert more data to new primary @@ -726,7 +721,7 @@ func restoreUsingRestart(t *testing.T, tablet *cluster.Vttablet) { } func restoreInPlace(t *testing.T, tablet *cluster.Vttablet) { - err := localCluster.VtctlclientProcess.ExecuteCommand("RestoreFromBackup", tablet.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", tablet.Alias) require.Nil(t, err) } @@ -756,7 +751,7 @@ func restartPrimaryAndReplica(t *testing.T) { err = tablet.VttabletProcess.Setup() require.Nil(t, err) } - err := localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) + err := localCluster.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) require.Nil(t, err) } @@ -766,12 +761,12 @@ func stopAllTablets() { tablet.VttabletProcess.TearDown() if tablet.MysqlctldProcess.TabletUID > 0 { tablet.MysqlctldProcess.Stop() - localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary", tablet.Alias) + localCluster.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", tablet.Alias) continue } proc, _ := tablet.MysqlctlProcess.StopProcess() mysqlProcs = append(mysqlProcs, proc) - localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary", tablet.Alias) + localCluster.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", tablet.Alias) } for _, proc := range mysqlProcs { proc.Wait() @@ -796,7 +791,7 @@ func terminatedRestore(t *testing.T) { checkTabletType(t, replica1.Alias, topodata.TabletType_REPLICA) // backup the replica - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) checkTabletType(t, replica1.Alias, topodata.TabletType_REPLICA) @@ -807,9 +802,8 @@ func terminatedRestore(t *testing.T) { require.Nil(t, err) // reparent to replica1 - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica1.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica1.Alias, shardKsName) require.Nil(t, err) // insert more data to new primary @@ -821,7 +815,7 @@ func terminatedRestore(t *testing.T) { // If restore fails then the tablet type goes back to original type. checkTabletType(t, primary.Alias, topodata.TabletType_REPLICA) - err = localCluster.VtctlclientProcess.ExecuteCommand("RestoreFromBackup", primary.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", primary.Alias) require.Nil(t, err) checkTabletType(t, primary.Alias, topodata.TabletType_REPLICA) @@ -863,7 +857,7 @@ func doNotDemoteNewlyPromotedPrimaryIfReparentingDuringBackup(t *testing.T) { checkTabletType(t, primary.Alias, topodata.TabletType_PRIMARY) // now backup - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) }() @@ -874,10 +868,11 @@ func doNotDemoteNewlyPromotedPrimaryIfReparentingDuringBackup(t *testing.T) { checkTabletType(t, primary.Alias, topodata.TabletType_PRIMARY) // now reparent - _, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput( - "PlannedReparentShard", "--", - "--keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, shardName), - "--new_primary", replica1.Alias) + _, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput( + "PlannedReparentShard", + "--new-primary", replica1.Alias, + fmt.Sprintf("%s/%s", keyspaceName, shardName), + ) require.Nil(t, err) // check that we reparented @@ -906,13 +901,13 @@ func doNotDemoteNewlyPromotedPrimaryIfReparentingDuringBackup(t *testing.T) { func vtctlBackup(t *testing.T, tabletType string) { // StopReplication on replica1. We verify that the replication works fine later in // verifyInitialReplication. So this will also check that VTOrc is running. - err := localCluster.VtctlclientProcess.ExecuteCommand("StopReplication", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("StopReplication", replica1.Alias) require.Nil(t, err) verifyInitialReplication(t) restoreWaitForBackup(t, tabletType, nil, true) - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) backups := localCluster.VerifyBackupCount(t, shardKsName, 1) @@ -930,7 +925,7 @@ func vtctlBackup(t *testing.T, tabletType string) { err = replica2.VttabletProcess.TearDown() require.Nil(t, err) - err = localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", replica2.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("DeleteTablets", replica2.Alias) require.Nil(t, err) _, err = primary.VttabletProcess.QueryTablet("DROP TABLE vt_insert_test", keyspaceName, true) require.Nil(t, err) @@ -976,7 +971,7 @@ func restoreWaitForBackup(t *testing.T, tabletType string, cDetails *Compression } func RemoveBackup(t *testing.T, backupName string) { - err := localCluster.VtctlclientProcess.ExecuteCommand("RemoveBackup", shardKsName, backupName) + err := localCluster.VtctldClientProcess.ExecuteCommand("RemoveBackup", shardKsName, backupName) require.Nil(t, err) } @@ -1002,9 +997,9 @@ func verifyRestoreTablet(t *testing.T, tablet *cluster.Vttablet, status string) // We restart replication here because semi-sync will not be set correctly on tablet startup since // we deprecated enable_semi_sync. StartReplication RPC fixes the semi-sync settings by consulting the // durability policies set. - err = localCluster.VtctlclientProcess.ExecuteCommand("StopReplication", tablet.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("StopReplication", tablet.Alias) require.NoError(t, err) - err = localCluster.VtctlclientProcess.ExecuteCommand("StartReplication", tablet.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("StartReplication", tablet.Alias) require.NoError(t, err) if tablet.Type == "replica" { @@ -1033,13 +1028,13 @@ func terminateBackup(t *testing.T, alias string) { }() } - args := append([]string{"--server", localCluster.VtctlclientProcess.Server, "--alsologtostderr"}, "Backup", "--", alias) + args := append([]string{"--server", localCluster.VtctldClientProcess.Server, "--alsologtostderr"}, "Backup", alias) tmpProcess := exec.Command( - "vtctlclient", + "vtctldclient", args..., ) - reader, _ := tmpProcess.StderrPipe() + reader, _ := tmpProcess.StdoutPipe() err := tmpProcess.Start() require.Nil(t, err) found := false @@ -1067,13 +1062,13 @@ func terminateRestore(t *testing.T) { }() } - args := append([]string{"--server", localCluster.VtctlclientProcess.Server, "--alsologtostderr"}, "RestoreFromBackup", "--", primary.Alias) + args := append([]string{"--server", localCluster.VtctldClientProcess.Server, "--alsologtostderr"}, "RestoreFromBackup", primary.Alias) tmpProcess := exec.Command( - "vtctlclient", + "vtctldclient", args..., ) - reader, _ := tmpProcess.StderrPipe() + reader, _ := tmpProcess.StdoutPipe() err := tmpProcess.Start() require.Nil(t, err) found := false @@ -1289,7 +1284,7 @@ func TestReplicaIncrementalBackup(t *testing.T, replicaIndex int, incrementalFro func TestReplicaFullRestore(t *testing.T, replicaIndex int, expectError string) { replica := getReplica(t, replicaIndex) - output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("RestoreFromBackup", replica.Alias) + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("RestoreFromBackup", replica.Alias) if expectError != "" { require.Errorf(t, err, "expected: %v", expectError) require.Contains(t, output, expectError) @@ -1304,7 +1299,7 @@ func TestReplicaRestoreToPos(t *testing.T, replicaIndex int, restoreToPos replic require.False(t, restoreToPos.IsZero()) restoreToPosArg := replication.EncodePosition(restoreToPos) - output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("RestoreFromBackup", "--", "--restore_to_pos", restoreToPosArg, replica.Alias) + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("RestoreFromBackup", "--restore-to-pos", restoreToPosArg, replica.Alias) if expectError != "" { require.Errorf(t, err, "expected: %v", expectError) require.Contains(t, output, expectError) diff --git a/go/test/endtoend/cellalias/cell_alias_test.go b/go/test/endtoend/cellalias/cell_alias_test.go index d357331d8cd..07e8d687f4e 100644 --- a/go/test/endtoend/cellalias/cell_alias_test.go +++ b/go/test/endtoend/cellalias/cell_alias_test.go @@ -186,14 +186,14 @@ func TestMain(m *testing.M) { return 1, err } } - if err := localCluster.VtctlclientProcess.InitializeShard(keyspaceName, shard1.Name, shard1Primary.Cell, shard1Primary.TabletUID); err != nil { + if err := localCluster.VtctldClientProcess.InitializeShard(keyspaceName, shard1.Name, shard1Primary.Cell, shard1Primary.TabletUID); err != nil { return 1, err } // run a health check on source replica so it responds to discovery // (for binlog players) and on the source rdonlys (for workers) for _, tablet := range []string{shard1Replica.Alias, shard1Rdonly.Alias} { - if err := localCluster.VtctlclientProcess.ExecuteCommand("RunHealthCheck", tablet); err != nil { + if err := localCluster.VtctldClientProcess.ExecuteCommand("RunHealthCheck", tablet); err != nil { return 1, err } } @@ -204,7 +204,7 @@ func TestMain(m *testing.M) { } } - if err := localCluster.VtctlclientProcess.InitializeShard(keyspaceName, shard2.Name, shard2Primary.Cell, shard2Primary.TabletUID); err != nil { + if err := localCluster.VtctldClientProcess.InitializeShard(keyspaceName, shard2.Name, shard2Primary.Cell, shard2Primary.TabletUID); err != nil { return 1, err } @@ -212,14 +212,14 @@ func TestMain(m *testing.M) { return 1, err } - if err := localCluster.VtctlclientProcess.ApplySchema(keyspaceName, fmt.Sprintf(sqlSchema, tableName)); err != nil { + if err := localCluster.VtctldClientProcess.ApplySchema(keyspaceName, fmt.Sprintf(sqlSchema, tableName)); err != nil { return 1, err } - if err := localCluster.VtctlclientProcess.ApplyVSchema(keyspaceName, fmt.Sprintf(vSchema, tableName)); err != nil { + if err := localCluster.VtctldClientProcess.ApplyVSchema(keyspaceName, fmt.Sprintf(vSchema, tableName)); err != nil { return 1, err } - _ = localCluster.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) + _ = localCluster.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) return m.Run(), nil }() @@ -237,7 +237,7 @@ func TestAlias(t *testing.T) { insertInitialValues(t) defer deleteInitialValues(t) - err := localCluster.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) + err := localCluster.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) require.NoError(t, err) shard1 := localCluster.Keyspaces[0].Shards[0] shard2 := localCluster.Keyspaces[0].Shards[1] @@ -251,11 +251,11 @@ func TestAlias(t *testing.T) { cluster.CheckSrvKeyspace(t, cell2, keyspaceName, expectedPartitions, *localCluster) // Adds alias so vtgate can route to replica/rdonly tablets that are not in the same cell, but same alias - err = localCluster.VtctlclientProcess.ExecuteCommand("AddCellsAlias", "--", + err = localCluster.VtctldClientProcess.ExecuteCommand("AddCellsAlias", "--cells", allCells, "region_east_coast") require.NoError(t, err) - err = localCluster.VtctlclientProcess.ExecuteCommand("UpdateCellsAlias", "--", + err = localCluster.VtctldClientProcess.ExecuteCommand("UpdateCellsAlias", "--cells", allCells, "region_east_coast") require.NoError(t, err) @@ -277,7 +277,7 @@ func TestAlias(t *testing.T) { testQueriesOnTabletType(t, "rdonly", vtgateInstance.GrpcPort, false) // now, delete the alias, so that if we run above assertions again, it will fail for replica,rdonly target type - err = localCluster.VtctlclientProcess.ExecuteCommand("DeleteCellsAlias", + err = localCluster.VtctldClientProcess.ExecuteCommand("DeleteCellsAlias", "region_east_coast") require.NoError(t, err) @@ -301,7 +301,7 @@ func TestAddAliasWhileVtgateUp(t *testing.T) { insertInitialValues(t) defer deleteInitialValues(t) - err := localCluster.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) + err := localCluster.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) require.NoError(t, err) shard1 := localCluster.Keyspaces[0].Shards[0] shard2 := localCluster.Keyspaces[0].Shards[1] @@ -328,7 +328,7 @@ func TestAddAliasWhileVtgateUp(t *testing.T) { testQueriesOnTabletType(t, "rdonly", vtgateInstance.GrpcPort, true) // Adds alias so vtgate can route to replica/rdonly tablets that are not in the same cell, but same alias - err = localCluster.VtctlclientProcess.ExecuteCommand("AddCellsAlias", "--", + err = localCluster.VtctldClientProcess.ExecuteCommand("AddCellsAlias", "--cells", allCells, "region_east_coast") require.NoError(t, err) diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go index a9cc482b9e3..98218bcf3fb 100644 --- a/go/test/endtoend/cluster/cluster_process.go +++ b/go/test/endtoend/cluster/cluster_process.go @@ -421,7 +421,7 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames } // Make first tablet as primary - if err = cluster.VtctlclientProcess.InitializeShard(keyspace.Name, shardName, cluster.Cell, shard.Vttablets[0].TabletUID); err != nil { + if err = cluster.VtctldClientProcess.InitializeShard(keyspace.Name, shardName, cluster.Cell, shard.Vttablets[0].TabletUID); err != nil { log.Errorf("error running InitializeShard on keyspace %v, shard %v: %v", keyspace.Name, shardName, err) return } @@ -441,7 +441,7 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames // Apply Schema SQL if keyspace.SchemaSQL != "" { - if err = cluster.VtctlclientProcess.ApplySchema(keyspace.Name, keyspace.SchemaSQL); err != nil { + if err = cluster.VtctldClientProcess.ApplySchema(keyspace.Name, keyspace.SchemaSQL); err != nil { log.Errorf("error applying schema: %v, %v", keyspace.SchemaSQL, err) return } @@ -449,7 +449,7 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames // Apply VSchema if keyspace.VSchema != "" { - if err = cluster.VtctlclientProcess.ApplyVSchema(keyspace.Name, keyspace.VSchema); err != nil { + if err = cluster.VtctldClientProcess.ApplyVSchema(keyspace.Name, keyspace.VSchema); err != nil { log.Errorf("error applying vschema: %v, %v", keyspace.VSchema, err) return } @@ -581,7 +581,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard } // Make first tablet as primary - if err = cluster.VtctlclientProcess.InitShardPrimary(keyspace.Name, shardName, cluster.Cell, shard.Vttablets[0].TabletUID); err != nil { + if err = cluster.VtctldClientProcess.InitShardPrimary(keyspace.Name, shardName, cluster.Cell, shard.Vttablets[0].TabletUID); err != nil { log.Errorf("error running ISM on keyspace %v, shard %v: %v", keyspace.Name, shardName, err) return } @@ -601,7 +601,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard // Apply Schema SQL if keyspace.SchemaSQL != "" { - if err = cluster.VtctlclientProcess.ApplySchema(keyspace.Name, keyspace.SchemaSQL); err != nil { + if err = cluster.VtctldClientProcess.ApplySchema(keyspace.Name, keyspace.SchemaSQL); err != nil { log.Errorf("error applying schema: %v, %v", keyspace.SchemaSQL, err) return } @@ -609,7 +609,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard // Apply VSchema if keyspace.VSchema != "" { - if err = cluster.VtctlclientProcess.ApplyVSchema(keyspace.Name, keyspace.VSchema); err != nil { + if err = cluster.VtctldClientProcess.ApplyVSchema(keyspace.Name, keyspace.VSchema); err != nil { log.Errorf("error applying vschema: %v, %v", keyspace.VSchema, err) return } diff --git a/go/test/endtoend/cluster/cluster_util.go b/go/test/endtoend/cluster/cluster_util.go index a35c3bf3769..9fcefba3892 100644 --- a/go/test/endtoend/cluster/cluster_util.go +++ b/go/test/endtoend/cluster/cluster_util.go @@ -138,7 +138,7 @@ func PanicHandler(t testing.TB) { // ListBackups Lists back preset in shard func (cluster LocalProcessCluster) ListBackups(shardKsName string) ([]string, error) { - output, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("ListBackups", shardKsName) + output, err := cluster.VtctldClientProcess.ExecuteCommandWithOutput("GetBackups", shardKsName) if err != nil { return nil, err } @@ -165,7 +165,7 @@ func (cluster LocalProcessCluster) RemoveAllBackups(t *testing.T, shardKsName st backups, err := cluster.ListBackups(shardKsName) require.Nil(t, err) for _, backup := range backups { - cluster.VtctlclientProcess.ExecuteCommand("RemoveBackup", shardKsName, backup) + cluster.VtctldClientProcess.ExecuteCommand("RemoveBackup", shardKsName, backup) } } diff --git a/go/test/endtoend/cluster/vtctldclient_process.go b/go/test/endtoend/cluster/vtctldclient_process.go index 52e0f985680..959ab5a93b9 100644 --- a/go/test/endtoend/cluster/vtctldclient_process.go +++ b/go/test/endtoend/cluster/vtctldclient_process.go @@ -22,7 +22,11 @@ import ( "strings" "time" + "vitess.io/vitess/go/json2" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vterrors" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // VtctldClientProcess is a generic handle for a running vtctldclient command . @@ -93,6 +97,68 @@ func VtctldClientProcessInstance(hostname string, grpcPort int, tmpDirectory str return vtctldclient } +type ApplySchemaParams struct { + DDLStrategy string + MigrationContext string + UUIDs string + CallerID string + BatchSize int +} + +// ApplySchemaWithOutput applies SQL schema to the keyspace +func (vtctldclient *VtctldClientProcess) ApplySchemaWithOutput(keyspace string, sql string, params ApplySchemaParams) (result string, err error) { + args := []string{ + "ApplySchema", + "--sql", sql, + } + if params.MigrationContext != "" { + args = append(args, "--migration-context", params.MigrationContext) + } + if params.DDLStrategy != "" { + args = append(args, "--ddl-strategy", params.DDLStrategy) + } + if params.UUIDs != "" { + args = append(args, "--uuid", params.UUIDs) + } + if params.BatchSize > 0 { + args = append(args, "--batch-size", fmt.Sprintf("%d", params.BatchSize)) + } + if params.CallerID != "" { + args = append(args, "--caller-id", params.CallerID) + } + args = append(args, keyspace) + return vtctldclient.ExecuteCommandWithOutput(args...) +} + +// ApplySchema applies SQL schema to the keyspace +func (vtctldclient *VtctldClientProcess) ApplySchema(keyspace string, sql string) error { + message, err := vtctldclient.ApplySchemaWithOutput(keyspace, sql, ApplySchemaParams{DDLStrategy: "direct -allow-zero-in-date"}) + + return vterrors.Wrap(err, message) +} + +// ApplyVSchema applies vitess schema (JSON format) to the keyspace +func (vtctldclient *VtctldClientProcess) ApplyVSchema(keyspace string, json string) (err error) { + return vtctldclient.ExecuteCommand( + "ApplyVSchema", + "--vschema", json, + keyspace, + ) +} + +// GetSrvKeyspaces returns a mapping of cell to srv keyspace for the given keyspace. +func (vtctldclient *VtctldClientProcess) GetSrvKeyspaces(keyspace string, cells ...string) (ksMap map[string]*topodatapb.SrvKeyspace, err error) { + args := append([]string{"GetSrvKeyspaces", keyspace}, cells...) + out, err := vtctldclient.ExecuteCommandWithOutput(args...) + if err != nil { + return nil, err + } + + ksMap = map[string]*topodatapb.SrvKeyspace{} + err = json2.Unmarshal([]byte(out), &ksMap) + return ksMap, err +} + // PlannedReparentShard executes vtctlclient command to make specified tablet the primary for the shard. func (vtctldclient *VtctldClientProcess) PlannedReparentShard(Keyspace string, Shard string, alias string) (err error) { output, err := vtctldclient.ExecuteCommandWithOutput( @@ -105,6 +171,32 @@ func (vtctldclient *VtctldClientProcess) PlannedReparentShard(Keyspace string, S return err } +// InitializeShard executes vtctldclient command to make specified tablet the primary for the shard. +func (vtctldclient *VtctldClientProcess) InitializeShard(keyspace string, shard string, cell string, uid int) error { + output, err := vtctldclient.ExecuteCommandWithOutput( + "PlannedReparentShard", + fmt.Sprintf("%s/%s", keyspace, shard), + "--wait-replicas-timeout", "31s", + "--new-primary", fmt.Sprintf("%s-%d", cell, uid)) + if err != nil { + log.Errorf("error in PlannedReparentShard output %s, err %s", output, err.Error()) + } + return err +} + +// InitShardPrimary executes vtctldclient command to make specified tablet the primary for the shard. +func (vtctldclient *VtctldClientProcess) InitShardPrimary(keyspace string, shard string, cell string, uid int) error { + output, err := vtctldclient.ExecuteCommandWithOutput( + "InitShardPrimary", + "--force", "--wait-replicas-timeout", "31s", + fmt.Sprintf("%s/%s", keyspace, shard), + fmt.Sprintf("%s-%d", cell, uid)) + if err != nil { + log.Errorf("error in InitShardPrimary output %s, err %s", output, err.Error()) + } + return err +} + // CreateKeyspace executes the vtctl command to create a keyspace func (vtctldclient *VtctldClientProcess) CreateKeyspace(keyspaceName string, sidecarDBName string) (err error) { var output string diff --git a/go/test/endtoend/clustertest/vttablet_test.go b/go/test/endtoend/clustertest/vttablet_test.go index 369deb18cfd..5e7d5e27182 100644 --- a/go/test/endtoend/clustertest/vttablet_test.go +++ b/go/test/endtoend/clustertest/vttablet_test.go @@ -51,6 +51,6 @@ func TestDeleteTablet(t *testing.T) { defer cluster.PanicHandler(t) primary := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() require.NotNil(t, primary) - _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("DeleteTablet", "--", "--allow_primary", primary.Alias) + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("DeleteTablets", "--allow-primary", primary.Alias) require.NoError(t, err) } diff --git a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go index 725659a5ee1..4c759ff577a 100644 --- a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go +++ b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go @@ -74,7 +74,7 @@ func testReplicationBase(t *testing.T, isClientCertPassed bool) { } // Reparent using SSL (this will also check replication works) - err = clusterInstance.VtctlclientProcess.InitializeShard(keyspace, shardName, clusterInstance.Cell, primaryTablet.TabletUID) + err = clusterInstance.VtctldClientProcess.InitializeShard(keyspace, shardName, clusterInstance.Cell, primaryTablet.TabletUID) if isClientCertPassed { require.NoError(t, err) } else { diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index 338ad5c8cd2..142e4b4b442 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -24,6 +24,7 @@ import ( "strings" "testing" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/vt/key" "github.com/stretchr/testify/assert" @@ -109,7 +110,7 @@ func TestMain(m *testing.M) { if err := clusterForKSTest.StartKeyspace(*keyspaceSharded, []string{"-80", "80-"}, 1, false); err != nil { return 1 } - if err := clusterForKSTest.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceShardedName); err != nil { + if err := clusterForKSTest.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceShardedName); err != nil { return 1 } @@ -121,7 +122,7 @@ func TestMain(m *testing.M) { if err := clusterForKSTest.StartKeyspace(*keyspaceUnsharded, []string{keyspaceUnshardedName}, 1, false); err != nil { return 1 } - if err := clusterForKSTest.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceUnshardedName); err != nil { + if err := clusterForKSTest.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceUnshardedName); err != nil { return 1 } @@ -228,48 +229,49 @@ func TestGetKeyspace(t *testing.T) { func TestDeleteKeyspace(t *testing.T) { defer cluster.PanicHandler(t) - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateKeyspace", "test_delete_keyspace") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") + _ = clusterForKSTest.VtctldClientProcess.CreateKeyspace("test_delete_keyspace", sidecar.DefaultName) + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--", "--keyspace=test_delete_keyspace", "--shard=0", "zone1-0000000100", "primary") // Can't delete keyspace if there are shards present. - err := clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteKeyspace", "test_delete_keyspace") + err := clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteKeyspace", "test_delete_keyspace") require.Error(t, err) // Can't delete shard if there are tablets present. - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteShard", "--", "--even_if_serving", "test_delete_keyspace/0") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteShards", "--even-if-serving", "test_delete_keyspace/0") require.Error(t, err) // Use recursive DeleteShard to remove tablets. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteShard", "--", "--even_if_serving", "--recursive", "test_delete_keyspace/0") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteShards", "--even-if-serving", "--recursive", "test_delete_keyspace/0") // Now non-recursive DeleteKeyspace should work. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteKeyspace", "test_delete_keyspace") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteKeyspace", "test_delete_keyspace") // Start over and this time use recursive DeleteKeyspace to do everything. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateKeyspace", "test_delete_keyspace") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") + _ = clusterForKSTest.VtctldClientProcess.CreateKeyspace("test_delete_keyspace", sidecar.DefaultName) + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--", "--port=1234", "--bind-address=127.0.0.1", "--keyspace=test_delete_keyspace", "--shard=0", "zone1-0000000100", "primary") // Create the serving/replication entries and check that they exist, // so we can later check they're deleted. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "test_delete_keyspace") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", "test_delete_keyspace") _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShardReplication", cell, "test_delete_keyspace/0") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetSrvKeyspace", cell, "test_delete_keyspace") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetSrvKeyspaces", "test_delete_keyspace", cell) // Recursive DeleteKeyspace - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteKeyspace", "--", "--recursive", "test_delete_keyspace") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteKeyspace", "--recursive", "test_delete_keyspace") // Check that everything is gone. - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetKeyspace", "test_delete_keyspace") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetKeyspace", "test_delete_keyspace") require.Error(t, err) - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShard", "test_delete_keyspace/0") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetShard", "test_delete_keyspace/0") require.Error(t, err) - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetTablet", "zone1-0000000100") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetTablet", "zone1-0000000100") require.Error(t, err) err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShardReplication", cell, "test_delete_keyspace/0") require.Error(t, err) - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetSrvKeyspace", cell, "test_delete_keyspace") - require.Error(t, err) + ksMap, err := clusterForKSTest.VtctldClientProcess.GetSrvKeyspaces("test_delete_keyspace", cell) + require.NoError(t, err) + require.Empty(t, ksMap[cell]) } // TODO: Fix this test, not running in CI diff --git a/go/test/endtoend/messaging/message_test.go b/go/test/endtoend/messaging/message_test.go index 3082f295055..7e1190c16bb 100644 --- a/go/test/endtoend/messaging/message_test.go +++ b/go/test/endtoend/messaging/message_test.go @@ -393,12 +393,12 @@ func TestReparenting(t *testing.T) { // do planned reparenting, make one replica as primary // and validate client connection count in correspond tablets - clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( - "PlannedReparentShard", "--", - "--keyspace_shard", userKeyspace+"/-80", - "--new_primary", shard0Replica.Alias) + clusterInstance.VtctldClientProcess.ExecuteCommand( + "PlannedReparentShard", + userKeyspace+"/-80", + "--new-primary", shard0Replica.Alias) // validate topology - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate") require.Nil(t, err) // Verify connection has migrated. @@ -417,12 +417,12 @@ func TestReparenting(t *testing.T) { stream.Next() // make old primary again as new primary - clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( - "PlannedReparentShard", "--", - "--keyspace_shard", userKeyspace+"/-80", - "--new_primary", shard0Primary.Alias) + clusterInstance.VtctldClientProcess.ExecuteCommand( + "PlannedReparentShard", + userKeyspace+"/-80", + "--new-primary", shard0Primary.Alias) // validate topology - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate") require.Nil(t, err) time.Sleep(10 * time.Second) assertClientCount(t, 1, shard0Primary) diff --git a/go/test/endtoend/migration/migration_test.go b/go/test/endtoend/migration/migration_test.go index f0b91e2d6df..eca112e388d 100644 --- a/go/test/endtoend/migration/migration_test.go +++ b/go/test/endtoend/migration/migration_test.go @@ -145,7 +145,7 @@ func TestMigration(t *testing.T) { vt.ExtraArgs = append(vt.ExtraArgs, "--tablet_config", yamlFile) } createKeyspace(t, commerce, []string{"0"}, tabletConfig) - err := clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "commerce") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", "commerce") require.NoError(t, err) err = clusterInstance.StartVtgate() @@ -221,7 +221,7 @@ func migrate(t *testing.T, fromdb, toks string, tables []string) { "('%s', '%s', %s, '', 9999, 9999, 'primary', 0, 0, 'Running')", tables[0], "vt_"+toks, sqlEscaped.String()) fmt.Printf("VReplicationExec: %s\n", query) vttablet := keyspaces[toks].Shards[0].Vttablets[0].VttabletProcess - err := clusterInstance.VtctlclientProcess.ExecuteCommand("VReplicationExec", vttablet.TabletPath, query) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("VReplicationExec", vttablet.TabletPath, query) require.NoError(t, err) } diff --git a/go/test/endtoend/mysqlctl/mysqlctl_test.go b/go/test/endtoend/mysqlctl/mysqlctl_test.go index 3b28c5bcf30..bdea4d3988c 100644 --- a/go/test/endtoend/mysqlctl/mysqlctl_test.go +++ b/go/test/endtoend/mysqlctl/mysqlctl_test.go @@ -156,6 +156,6 @@ func TestAutoDetect(t *testing.T) { require.Nil(t, err, "error should be nil") // Reparent tablets, which requires flavor detection - err = clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shardName, cell, primaryTablet.TabletUID) + err = clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shardName, cell, primaryTablet.TabletUID) require.Nil(t, err, "error should be nil") } diff --git a/go/test/endtoend/mysqlctld/mysqlctld_test.go b/go/test/endtoend/mysqlctld/mysqlctld_test.go index e1577acfc52..52be2fa4323 100644 --- a/go/test/endtoend/mysqlctld/mysqlctld_test.go +++ b/go/test/endtoend/mysqlctld/mysqlctld_test.go @@ -159,7 +159,7 @@ func TestAutoDetect(t *testing.T) { require.Nil(t, err, "error should be nil") // Reparent tablets, which requires flavor detection - err = clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shardName, cell, primaryTablet.TabletUID) + err = clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shardName, cell, primaryTablet.TabletUID) require.Nil(t, err, "error should be nil") } diff --git a/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go b/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go index 3dc635c8870..41a9a80086b 100644 --- a/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go +++ b/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go @@ -370,7 +370,7 @@ func testWithInitialSchema(t *testing.T) { for i := 0; i < totalTableCount; i++ { tableName := fmt.Sprintf("vt_onlineddl_test_%02d", i) sqlQuery = fmt.Sprintf(createTable, tableName) - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sqlQuery) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, sqlQuery) require.Nil(t, err) for _, insert := range insertStatements { @@ -395,7 +395,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, cluster.VtctlClientParams{DDLStrategy: ddlStrategy, CallerID: callerID}) + uuid, err = clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, CallerID: callerID}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) diff --git a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go index d4517e67aff..672e79c0985 100644 --- a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go +++ b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go @@ -1194,7 +1194,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) + uuid, err = clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.ApplySchemaParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go index 3c2c25ec8b5..01f7024f59c 100644 --- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go +++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go @@ -82,7 +82,7 @@ var ( keyspaceName = "ks" cell = "zone1" schemaChangeDirectory = "" - overrideVtctlParams *cluster.VtctlClientParams + overrideVtctlParams *cluster.ApplySchemaParams ) type WriteMetrics struct { @@ -949,7 +949,7 @@ func testScheduler(t *testing.T) { t.Run("Idempotent submission, retry failed migration", func(t *testing.T) { uuid := "00000000_1111_2222_3333_444444444444" - overrideVtctlParams = &cluster.VtctlClientParams{DDLStrategy: ddlStrategy, UUIDList: uuid, MigrationContext: "idempotent:1111-2222-3333"} + overrideVtctlParams = &cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, UUIDs: uuid, MigrationContext: "idempotent:1111-2222-3333"} defer func() { overrideVtctlParams = nil }() // create a migration and cancel it. We don't let it complete. We want it in "failed" state t.Run("start and fail migration", func(t *testing.T) { @@ -985,7 +985,7 @@ func testScheduler(t *testing.T) { t.Run("Idempotent submission, retry failed migration in singleton context", func(t *testing.T) { uuid := "00000000_1111_3333_3333_444444444444" ddlStrategy := ddlStrategy + " --singleton-context" - overrideVtctlParams = &cluster.VtctlClientParams{DDLStrategy: ddlStrategy, UUIDList: uuid, MigrationContext: "idempotent:1111-3333-3333"} + overrideVtctlParams = &cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, UUIDs: uuid, MigrationContext: "idempotent:1111-3333-3333"} defer func() { overrideVtctlParams = nil }() // create a migration and cancel it. We don't let it complete. We want it in "failed" state t.Run("start and fail migration", func(t *testing.T) { @@ -2391,7 +2391,7 @@ func testForeignKeys(t *testing.T) { continue } statement := fmt.Sprintf("DROP TABLE IF EXISTS %s", artifact) - _, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, statement, cluster.VtctlClientParams{DDLStrategy: "direct"}) + _, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, statement, cluster.ApplySchemaParams{DDLStrategy: "direct"}) if err == nil { droppedTables[artifact] = true } @@ -2423,11 +2423,11 @@ func testOnlineDDLStatement(t *testing.T, params *testOnlineDDLStatementParams) } } } else { - vtctlParams := &cluster.VtctlClientParams{DDLStrategy: params.ddlStrategy, MigrationContext: params.migrationContext} + vtctlParams := &cluster.ApplySchemaParams{DDLStrategy: params.ddlStrategy, MigrationContext: params.migrationContext} if overrideVtctlParams != nil { vtctlParams = overrideVtctlParams } - output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, params.ddlStatement, *vtctlParams) + output, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, params.ddlStatement, *vtctlParams) switch params.expectError { case anyErrorIndicator: if err != nil { @@ -2472,7 +2472,7 @@ func testRevertMigration(t *testing.T, params *testRevertMigrationParams) (uuid } } } else { - output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, revertQuery, cluster.VtctlClientParams{DDLStrategy: params.ddlStrategy, MigrationContext: params.migrationContext}) + output, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, revertQuery, cluster.ApplySchemaParams{DDLStrategy: params.ddlStrategy, MigrationContext: params.migrationContext}) if params.expectError == "" { assert.NoError(t, err) uuid = output diff --git a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go index 49e72eda290..e5df3051612 100644 --- a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go +++ b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go @@ -458,7 +458,7 @@ func TestSchemaChange(t *testing.T) { time.Sleep(10 * time.Second) onlineddl.CheckCancelAllMigrations(t, &vtParams, 0) // Validate that invoking CANCEL ALL via vtctl works - onlineddl.CheckCancelAllMigrationsViaVtctl(t, &clusterInstance.VtctlclientProcess, keyspaceName) + onlineddl.CheckCancelAllMigrationsViaVtctld(t, &clusterInstance.VtctldClientProcess, keyspaceName) }) t.Run("cancel all migrations: some migrations to cancel", func(t *testing.T) { // Use VTGate for throttling, issue a `ALTER VITESS_MIGRATION THROTTLE ALL ...` @@ -497,7 +497,7 @@ func TestSchemaChange(t *testing.T) { } wg.Wait() // cancelling via vtctl does not return values. We CANCEL ALL via vtctl, then validate via VTGate that nothing remains to be cancelled. - onlineddl.CheckCancelAllMigrationsViaVtctl(t, &clusterInstance.VtctlclientProcess, keyspaceName) + onlineddl.CheckCancelAllMigrationsViaVtctld(t, &clusterInstance.VtctldClientProcess, keyspaceName) onlineddl.CheckCancelAllMigrations(t, &vtParams, 0) }) @@ -555,7 +555,7 @@ func TestSchemaChange(t *testing.T) { }) t.Run("PRS shard -80", func(t *testing.T) { // migration has started and is throttled. We now run PRS - err := clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", keyspaceName+"/-80", "--new_primary", reparentTablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", keyspaceName+"/-80", "--new-primary", reparentTablet.Alias) require.NoError(t, err, "failed PRS: %v", err) rs := onlineddl.VtgateExecQuery(t, &vtParams, "show vitess_tablets", "") onlineddl.PrintQueryResult(os.Stdout, rs) @@ -650,7 +650,7 @@ func TestSchemaChange(t *testing.T) { }) t.Run("PRS shard -80", func(t *testing.T) { // migration has started and completion is postponed. We now PRS - err := clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", keyspaceName+"/-80", "--new_primary", reparentTablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", keyspaceName+"/-80", "--new-primary", reparentTablet.Alias) require.NoError(t, err, "failed PRS: %v", err) rs := onlineddl.VtgateExecQuery(t, &vtParams, "show vitess_tablets", "") onlineddl.PrintQueryResult(os.Stdout, rs) @@ -905,7 +905,7 @@ func testWithInitialSchema(t *testing.T) { var sqlQuery = "" //nolint for i := 0; i < totalTableCount; i++ { sqlQuery = fmt.Sprintf(createTable, fmt.Sprintf("vt_onlineddl_test_%02d", i)) - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sqlQuery) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, sqlQuery) require.Nil(t, err) } @@ -923,8 +923,8 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str uuid = row.AsString("uuid", "") } } else { - params := cluster.VtctlClientParams{DDLStrategy: ddlStrategy, UUIDList: providedUUIDList, MigrationContext: providedMigrationContext} - output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, params) + params := cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, UUIDs: providedUUIDList, MigrationContext: providedMigrationContext} + output, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, params) if expectError == "" { assert.NoError(t, err) uuid = output diff --git a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go index 9f442a39c76..84c1ea7165c 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go @@ -329,11 +329,11 @@ func TestSchemaChange(t *testing.T) { func testWithInitialSchema(t *testing.T) { for _, statement := range cleanupStatements { - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, statement) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, statement) require.Nil(t, err) } // Create the stress table - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, createStatement) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, createStatement) require.Nil(t, err) // Check if table is created @@ -349,7 +349,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) + uuid, err = clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.ApplySchemaParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) diff --git a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go index 2d9caaa6703..0db25088bd0 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go @@ -553,10 +553,10 @@ func TestSchemaChange(t *testing.T) { func testWithInitialSchema(t *testing.T) { // Create the stress table for _, statement := range cleanupStatements { - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, statement) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, statement) require.Nil(t, err) } - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, createStatement) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, createStatement) require.Nil(t, err) // Check if table is created @@ -572,7 +572,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) + uuid, err = clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.ApplySchemaParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) diff --git a/go/test/endtoend/onlineddl/vtctlutil.go b/go/test/endtoend/onlineddl/vtctlutil.go index 19a6ff79604..52a832f0e1f 100644 --- a/go/test/endtoend/onlineddl/vtctlutil.go +++ b/go/test/endtoend/onlineddl/vtctlutil.go @@ -25,9 +25,9 @@ import ( ) // CheckCancelAllMigrations cancels all pending migrations. There is no validation for affected migrations. -func CheckCancelAllMigrationsViaVtctl(t *testing.T, vtctlclient *cluster.VtctlClientProcess, keyspace string) { +func CheckCancelAllMigrationsViaVtctld(t *testing.T, vtctldclient *cluster.VtctldClientProcess, keyspace string) { cancelQuery := "alter vitess_migration cancel all" - _, err := vtctlclient.ApplySchemaWithOutput(keyspace, cancelQuery, cluster.VtctlClientParams{}) + _, err := vtctldclient.ApplySchemaWithOutput(keyspace, cancelQuery, cluster.ApplySchemaParams{}) assert.NoError(t, err) } diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go index 7f70a926be3..0aed6573337 100644 --- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go +++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go @@ -141,7 +141,7 @@ func TestPITRRecovery(t *testing.T) { cluster.VerifyRowsInTabletForTable(t, replica1, keyspaceName, 2, "product") // backup the replica - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.NoError(t, err) // check that the backup shows up in the listing @@ -181,10 +181,10 @@ func TestPITRRecovery(t *testing.T) { cluster.VerifyRowsInTabletForTable(t, shard1Replica1, keyspaceName, 4, "product") // take the backup (to simulate the regular backup) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", shard0Replica1.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Backup", shard0Replica1.Alias) require.NoError(t, err) // take the backup (to simulate the regular backup) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", shard1Replica1.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Backup", shard1Replica1.Alias) require.NoError(t, err) backups, err := clusterInstance.ListBackups(keyspaceName + "/-80") @@ -295,10 +295,10 @@ func TestPITRRecovery(t *testing.T) { } func performResharding(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema) + err := clusterInstance.VtctldClientProcess.ApplyVSchema(keyspaceName, vSchema) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--source_shards=0", "--target_shards=-80,80-", "Create", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "create", "--source-shards=0", "--target-shards=-80,80-", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") require.NoError(t, err) waitTimeout := 30 * time.Second @@ -307,32 +307,32 @@ func performResharding(t *testing.T) { waitForNoWorkflowLag(t, clusterInstance, "ks.reshardWorkflow") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=rdonly", "SwitchTraffic", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "SwitchTraffic", "--tablet-types=rdonly", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=replica", "SwitchTraffic", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "SwitchTraffic", "--tablet-types=replica", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") require.NoError(t, err) // then serve primary from the split shards - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=primary", "SwitchTraffic", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "SwitchTraffic", "--tablet-types=primary", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") require.NoError(t, err) // remove the original tablets in the original shard removeTablets(t, []*cluster.Vttablet{primary, replica1, replica2}) for _, tablet := range []*cluster.Vttablet{replica1, replica2} { - err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", tablet.Alias) require.NoError(t, err) } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary", primary.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", primary.Alias) require.NoError(t, err) // rebuild the serving graph, all mentions of the old shards should be gone - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "ks") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", "ks") require.NoError(t, err) // delete the original shard - err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteShard", "ks/0") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteShards", "ks/0") require.NoError(t, err) // Restart vtgate process @@ -460,13 +460,13 @@ func initializeCluster(t *testing.T) { } } - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard0.Name, cell, shard0Primary.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shard0.Name, cell, shard0Primary.TabletUID) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard1.Name, cell, shard1Primary.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shard1.Name, cell, shard1Primary.TabletUID) require.NoError(t, err) err = clusterInstance.StartVTOrc(keyspaceName) @@ -497,9 +497,9 @@ func insertRow(t *testing.T, id int, productName string, isSlow bool) { } func createRestoreKeyspace(t *testing.T, timeToRecover, restoreKeyspaceName string) { - output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("CreateKeyspace", "--", - "--keyspace_type=SNAPSHOT", "--base_keyspace="+keyspaceName, - "--snapshot_time", timeToRecover, restoreKeyspaceName) + output, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", + "--type=SNAPSHOT", "--base-keyspace="+keyspaceName, + "--snapshot-timestamp", timeToRecover, restoreKeyspaceName) log.Info(output) require.NoError(t, err) } diff --git a/go/test/endtoend/recovery/unshardedrecovery/recovery.go b/go/test/endtoend/recovery/unshardedrecovery/recovery.go index 8966e66ed47..1ebb7c2647f 100644 --- a/go/test/endtoend/recovery/unshardedrecovery/recovery.go +++ b/go/test/endtoend/recovery/unshardedrecovery/recovery.go @@ -164,7 +164,7 @@ func TestMainImpl(m *testing.M) { if err != nil { return 1, err } - if err := localCluster.VtctlclientProcess.InitializeShard(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil { + if err := localCluster.VtctldClientProcess.InitializeShard(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil { return 1, err } if err := localCluster.StartVTOrc(keyspaceName); err != nil { @@ -206,17 +206,17 @@ func TestRecoveryImpl(t *testing.T) { verifyInitialReplication(t) // take first backup of value = test1 - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) assert.NoError(t, err) backups := listBackups(t) require.Equal(t, len(backups), 1) assert.Contains(t, backups[0], replica1.Alias) - err = localCluster.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema) + err = localCluster.VtctldClientProcess.ApplyVSchema(keyspaceName, vSchema) assert.NoError(t, err) - output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", keyspaceName) + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetVSchema", keyspaceName) assert.NoError(t, err) assert.Contains(t, output, "vt_insert_test") @@ -224,12 +224,12 @@ func TestRecoveryImpl(t *testing.T) { restoreTime := time.Now().UTC() recovery.RestoreTablet(t, localCluster, replica2, recoveryKS1, "0", keyspaceName, commonTabletArg, restoreTime) - output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvVSchema", cell) + output, err = localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetSrvVSchema", cell) assert.NoError(t, err) assert.Contains(t, output, keyspaceName) assert.Contains(t, output, recoveryKS1) - output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS1) + output, err = localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS1) assert.NoError(t, err) assert.Contains(t, output, "vt_insert_test") @@ -277,13 +277,13 @@ func TestRecoveryImpl(t *testing.T) { } // take second backup of value = msgx1 - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) assert.NoError(t, err) // restore to first backup recovery.RestoreTablet(t, localCluster, replica3, recoveryKS2, "0", keyspaceName, commonTabletArg, restoreTime) - output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS2) + output, err = localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS2) assert.NoError(t, err) assert.Contains(t, output, "vt_insert_test") diff --git a/go/test/endtoend/reparent/emergencyreparent/ers_test.go b/go/test/endtoend/reparent/emergencyreparent/ers_test.go index fbd4770e15e..f0cf4f2cd6a 100644 --- a/go/test/endtoend/reparent/emergencyreparent/ers_test.go +++ b/go/test/endtoend/reparent/emergencyreparent/ers_test.go @@ -233,10 +233,10 @@ func TestERSPromoteRdonly(t *testing.T) { tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[2].Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[2].Alias, "rdonly") require.NoError(t, err) utils.ConfirmReplication(t, tablets[0], tablets[1:]) @@ -248,7 +248,7 @@ func TestERSPromoteRdonly(t *testing.T) { out, err := utils.ErsIgnoreTablet(clusterInstance, nil, "30s", "30s", []*cluster.Vttablet{tablets[3]}, false) require.NotNil(t, err, out) - out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShard", utils.KeyspaceShard) + out, err = clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetShard", utils.KeyspaceShard) require.NoError(t, err) require.Contains(t, out, `"uid": 101`, "the primary should still be 101 in the shard info") } @@ -288,16 +288,16 @@ func TestPullFromRdonly(t *testing.T) { // make tablets[1] a rdonly tablet. // rename tablet so that the test is not confusing rdonly := tablets[1] - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly.Alias, "rdonly") require.NoError(t, err) // confirm that all the tablets can replicate successfully right now utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{rdonly, tablets[2], tablets[3]}) // stop replication on the other two tablets - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", tablets[2].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", tablets[2].Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", tablets[3].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", tablets[3].Alias) require.NoError(t, err) // stop semi-sync on the primary so that any transaction now added does not require an ack @@ -311,9 +311,9 @@ func TestPullFromRdonly(t *testing.T) { utils.StopTablet(t, tablets[0], true) // start the replication back on the two tablets - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", tablets[2].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", tablets[2].Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", tablets[3].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", tablets[3].Alias) require.NoError(t, err) // check that tablets[2] and tablets[3] still only has 1 value @@ -349,12 +349,12 @@ func TestNoReplicationStatusAndIOThreadStopped(t *testing.T) { tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[1].Alias, `STOP SLAVE`) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[1].Alias, `STOP SLAVE`) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[1].Alias, `RESET SLAVE ALL`) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[1].Alias, `RESET SLAVE ALL`) require.NoError(t, err) // - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[3].Alias, `STOP SLAVE IO_THREAD;`) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[3].Alias, `STOP SLAVE IO_THREAD;`) require.NoError(t, err) // Run an additional command in the current primary which will only be acked by tablets[2] and be in its relay log. insertedVal := utils.ConfirmReplication(t, tablets[0], nil) @@ -450,7 +450,7 @@ func TestRecoverWithMultipleFailures(t *testing.T) { utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) // make tablets[1] a rdonly tablet. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") require.NoError(t, err) // Confirm that replication is still working as intended @@ -478,7 +478,7 @@ func TestERSFailFast(t *testing.T) { utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) // make tablets[1] a rdonly tablet. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") require.NoError(t, err) // Confirm that replication is still working as intended @@ -517,9 +517,9 @@ func TestReplicationStopped(t *testing.T) { tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[1].Alias, `STOP SLAVE SQL_THREAD;`) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[1].Alias, `STOP SLAVE SQL_THREAD;`) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[2].Alias, `STOP SLAVE;`) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[2].Alias, `STOP SLAVE;`) require.NoError(t, err) // Run an additional command in the current primary which will only be acked by tablets[3] and be in its relay log. insertedVal := utils.ConfirmReplication(t, tablets[0], nil) @@ -528,7 +528,7 @@ func TestReplicationStopped(t *testing.T) { require.Error(t, err, "ERS should fail with 2 replicas having replication stopped") // Start replication back on tablet[1] - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[1].Alias, `START SLAVE;`) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[1].Alias, `START SLAVE;`) require.NoError(t, err) // Failover to tablets[3] again. This time it should succeed out, err := utils.Ers(clusterInstance, tablets[3], "60s", "30s")