Skip to content

Commit

Permalink
Merge pull request #459 from planetscale/create-pull-request/patch
Browse files Browse the repository at this point in the history
Upgrade Vitess Dependency to Latest
  • Loading branch information
GuptaManan100 authored Aug 24, 2023
2 parents 0fee311 + 8196712 commit 782268c
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 18 deletions.
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ require (
sigs.k8s.io/controller-runtime v0.14.3
sigs.k8s.io/controller-tools v0.11.3
sigs.k8s.io/kustomize v2.0.3+incompatible
vitess.io/vitess v0.10.3-0.20230728152621-700e93e59cde
vitess.io/vitess v0.10.3-0.20230818175137-5c82d1dd7548
)

require (
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -1152,5 +1152,5 @@ sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ih
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
vitess.io/vitess v0.10.3-0.20230728152621-700e93e59cde h1:pmTalDAo9XMDZQX5snTILv8SPGxzLZJQtDItckNvm/4=
vitess.io/vitess v0.10.3-0.20230728152621-700e93e59cde/go.mod h1:gvSNRUp8tcPhzbbyLZX+XQ52lON8A6B55C809R1vHEE=
vitess.io/vitess v0.10.3-0.20230818175137-5c82d1dd7548 h1:gsm0AUTiaVHr1DJ7ZpW6ESA6AWK+nNQHidRVfV7Pmlc=
vitess.io/vitess v0.10.3-0.20230818175137-5c82d1dd7548/go.mod h1:gvSNRUp8tcPhzbbyLZX+XQ52lON8A6B55C809R1vHEE=
8 changes: 5 additions & 3 deletions pkg/controller/vitessshardreplication/init_restored_shard.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"

"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/mysql/replication"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
Expand Down Expand Up @@ -281,7 +282,7 @@ func electInitialShardPrimary(ctx context.Context, keyspaceName, shardName strin

type tabletStatus struct {
replicationConfigured bool
replicationPosition mysql.Position
replicationPosition replication.Position
databaseExists bool
tablet *topo.TabletInfo
err error
Expand Down Expand Up @@ -312,7 +313,7 @@ func getTabletStatus(ctx context.Context, tmc tmclient.TabletManagerClient, tabl
status.err = fmt.Errorf("couldn't get replicaiton position for tablet %v: %v", tabletName, err)
return status
}
status.replicationPosition, err = mysql.DecodePosition(positionStr)
status.replicationPosition, err = replication.DecodePosition(positionStr)
if err != nil {
status.err = fmt.Errorf("couldn't get replicaiton position for tablet %v: %v", tabletName, err)
return status
Expand Down Expand Up @@ -344,7 +345,8 @@ func getTabletStatus(ctx context.Context, tmc tmclient.TabletManagerClient, tabl
// values as well as the latest known value pulled from our build dependency.
//
// TODO: Add an officially-supported signal in the Vitess RPC to recognize this
// important state programmatically.
//
// important state programmatically.
func isErrNotReplica(err error) bool {
errString := err.Error()

Expand Down
24 changes: 12 additions & 12 deletions pkg/controller/vitessshardreplication/reconcile_drain.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (
"fmt"
"time"

"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/mysql/replication"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
Expand Down Expand Up @@ -69,17 +69,17 @@ This operates in four phases:
We guarantee this invariant:
- Only one tablet is marked as finished, and once it is, no other tablet will be
marked as finished until this tablet is deleted or the drain is aborted
(aborting the drain is considered an emergency situation and our invariant
could break here).
- Only one tablet is marked as finished, and once it is, no other tablet will be
marked as finished until this tablet is deleted or the drain is aborted
(aborting the drain is considered an emergency situation and our invariant
could break here).
This has implications to these situations:
- If the shard becomes unhealthy, anything marked as "finished" will stay
"finished".
- If the primary is reparented to a "finished" tablet, that tablet will stay
"finished".
- If the shard becomes unhealthy, anything marked as "finished" will stay
"finished".
- If the primary is reparented to a "finished" tablet, that tablet will stay
"finished".
These are necessary because if we ever remove the "finished" annotation we could
then later mark something else as "finished".
Expand Down Expand Up @@ -429,7 +429,7 @@ func candidatePrimary(ctx context.Context, wr *wrangler.Wrangler, shard *topo.Sh
status, err := wr.TabletManagerClient().ReplicationStatus(ctx, tablet.Tablet)
result := candidateInfo{tablet: tablet, err: err}
if err == nil {
result.position, result.err = mysql.DecodePosition(status.Position)
result.position, result.err = replication.DecodePosition(status.Position)
}
results <- result
}(tablet)
Expand All @@ -438,7 +438,7 @@ func candidatePrimary(ctx context.Context, wr *wrangler.Wrangler, shard *topo.Sh
// Read results channel and remember the high point so far.
// No one ever closes the results chan, but we know how many to expect.
var bestCandidate *topo.TabletInfo
var highestPosition mysql.Position
var highestPosition replication.Position
for range candidates {
result := <-results
if result.err != nil {
Expand All @@ -460,6 +460,6 @@ func candidatePrimary(ctx context.Context, wr *wrangler.Wrangler, shard *topo.Sh

type candidateInfo struct {
tablet *topo.TabletInfo
position mysql.Position
position replication.Position
err error
}

0 comments on commit 782268c

Please sign in to comment.