diff --git a/go/vt/vtgate/planbuilder/operator_transformers.go b/go/vt/vtgate/planbuilder/operator_transformers.go index 5f965b55ad9..3974a307e71 100644 --- a/go/vt/vtgate/planbuilder/operator_transformers.go +++ b/go/vt/vtgate/planbuilder/operator_transformers.go @@ -30,13 +30,11 @@ import ( "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/vindexes" ) -func transformToLogicalPlan(ctx *plancontext.PlanningContext, op ops.Operator) (logicalPlan, error) { +func transformToLogicalPlan(ctx *plancontext.PlanningContext, op operators.Operator) (logicalPlan, error) { switch op := op.(type) { case *operators.Route: return transformRoutePlan(ctx, op) @@ -190,10 +188,7 @@ func transformSubQuery(ctx *plancontext.PlanningContext, op *operators.SubQuery) return newUncorrelatedSubquery(op.FilterType, op.SubqueryValueName, op.HasValuesName, inner, outer), nil } - lhsCols, err := op.OuterExpressionsNeeded(ctx, op.Outer) - if err != nil { - return nil, err - } + lhsCols := op.OuterExpressionsNeeded(ctx, op.Outer) return newSemiJoin(outer, inner, op.Vars, lhsCols), nil } @@ -251,7 +246,7 @@ func transformAggregator(ctx *plancontext.PlanningContext, op *operators.Aggrega oa.groupByKeys = append(oa.groupByKeys, &engine.GroupByParams{ KeyCol: groupBy.ColOffset, WeightStringCol: groupBy.WSOffset, - Expr: groupBy.AsAliasedExpr().Expr, + Expr: groupBy.SimplifiedExpr, Type: typ, }) } @@ -435,7 +430,7 @@ func routeToEngineRoute(ctx *plancontext.PlanningContext, op *operators.Route, h } rp := newRoutingParams(ctx, op.Routing.OpCode()) - err = op.Routing.UpdateRoutingParams(ctx, rp) + op.Routing.UpdateRoutingParams(ctx, rp) if err != nil { return nil, err } @@ -544,7 +539,7 @@ func buildRouteLogicalPlan(ctx *plancontext.PlanningContext, op *operators.Route } func buildInsertLogicalPlan( - rb *operators.Route, op ops.Operator, stmt *sqlparser.Insert, + rb *operators.Route, op operators.Operator, stmt *sqlparser.Insert, hints *queryHints, ) (logicalPlan, error) { ins := op.(*operators.Insert) @@ -635,16 +630,13 @@ func dmlFormatter(buf *sqlparser.TrackedBuffer, node sqlparser.SQLNode) { func buildUpdateLogicalPlan( ctx *plancontext.PlanningContext, rb *operators.Route, - dmlOp ops.Operator, + dmlOp operators.Operator, stmt *sqlparser.Update, hints *queryHints, ) (logicalPlan, error) { upd := dmlOp.(*operators.Update) rp := newRoutingParams(ctx, rb.Routing.OpCode()) - err := rb.Routing.UpdateRoutingParams(ctx, rp) - if err != nil { - return nil, err - } + rb.Routing.UpdateRoutingParams(ctx, rp) edml := &engine.DML{ Query: generateQuery(stmt), TableNames: []string{upd.VTable.Name.String()}, @@ -670,15 +662,12 @@ func buildUpdateLogicalPlan( func buildDeleteLogicalPlan( ctx *plancontext.PlanningContext, rb *operators.Route, - dmlOp ops.Operator, + dmlOp operators.Operator, hints *queryHints, ) (logicalPlan, error) { del := dmlOp.(*operators.Delete) rp := newRoutingParams(ctx, rb.Routing.OpCode()) - err := rb.Routing.UpdateRoutingParams(ctx, rp) - if err != nil { - return nil, err - } + rb.Routing.UpdateRoutingParams(ctx, rp) edml := &engine.DML{ Query: generateQuery(del.AST), TableNames: []string{del.VTable.Name.String()}, @@ -739,7 +728,7 @@ func updateSelectedVindexPredicate(op *operators.Route) sqlparser.Expr { func getAllTableNames(op *operators.Route) ([]string, error) { tableNameMap := map[string]any{} - err := rewrite.Visit(op, func(op ops.Operator) error { + err := operators.Visit(op, func(op operators.Operator) error { tbl, isTbl := op.(*operators.Table) var name string if isTbl { @@ -764,7 +753,7 @@ func getAllTableNames(op *operators.Route) ([]string, error) { } func transformUnionPlan(ctx *plancontext.PlanningContext, op *operators.Union) (logicalPlan, error) { - sources, err := slice.MapWithError(op.Sources, func(src ops.Operator) (logicalPlan, error) { + sources, err := slice.MapWithError(op.Sources, func(src operators.Operator) (logicalPlan, error) { plan, err := transformToLogicalPlan(ctx, src) if err != nil { return nil, err diff --git a/go/vt/vtgate/planbuilder/operators/SQL_builder.go b/go/vt/vtgate/planbuilder/operators/SQL_builder.go index 5201818951d..961a7d252ff 100644 --- a/go/vt/vtgate/planbuilder/operators/SQL_builder.go +++ b/go/vt/vtgate/planbuilder/operators/SQL_builder.go @@ -23,7 +23,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -33,7 +32,7 @@ type ( ctx *plancontext.PlanningContext stmt sqlparser.Statement tableNames []string - dmlOperator ops.Operator + dmlOperator Operator } ) @@ -41,7 +40,7 @@ func (qb *queryBuilder) asSelectStatement() sqlparser.SelectStatement { return qb.stmt.(sqlparser.SelectStatement) } -func ToSQL(ctx *plancontext.PlanningContext, op ops.Operator) (_ sqlparser.Statement, _ ops.Operator, err error) { +func ToSQL(ctx *plancontext.PlanningContext, op Operator) (_ sqlparser.Statement, _ Operator, err error) { defer PanicHandler(&err) q := &queryBuilder{ctx: ctx} @@ -347,7 +346,7 @@ func stripDownQuery(from, to sqlparser.SelectStatement) { } // buildQuery recursively builds the query into an AST, from an operator tree -func buildQuery(op ops.Operator, qb *queryBuilder) { +func buildQuery(op Operator, qb *queryBuilder) { switch op := op.(type) { case *Table: buildTable(op, qb) @@ -415,7 +414,7 @@ func buildUpdate(op *Update, qb *queryBuilder) { } type OpWithAST interface { - ops.Operator + Operator Statement() sqlparser.Statement } diff --git a/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go b/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go index edba5c51256..e50483ce8d2 100644 --- a/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go +++ b/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go @@ -24,25 +24,23 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) -func tryPushAggregator(ctx *plancontext.PlanningContext, aggregator *Aggregator) (output ops.Operator, applyResult *rewrite.ApplyResult, err error) { +func tryPushAggregator(ctx *plancontext.PlanningContext, aggregator *Aggregator) (output Operator, applyResult *ApplyResult) { if aggregator.Pushed { - return aggregator, rewrite.SameTree, nil + return aggregator, NoRewrite } // this rewrite is always valid, and we should do it whenever possible if route, ok := aggregator.Source.(*Route); ok && (route.IsSingleShard() || overlappingUniqueVindex(ctx, aggregator.Grouping)) { - return rewrite.Swap(aggregator, route, "push down aggregation under route - remove original") + return Swap(aggregator, route, "push down aggregation under route - remove original") } // other rewrites require us to have reached this phase before we can consider them if !reachedPhase(ctx, delegateAggregation) { - return aggregator, rewrite.SameTree, nil + return aggregator, NoRewrite } // if we have not yet been able to push this aggregation down, @@ -54,23 +52,19 @@ func tryPushAggregator(ctx *plancontext.PlanningContext, aggregator *Aggregator) switch src := aggregator.Source.(type) { case *Route: // if we have a single sharded route, we can push it down - output, applyResult, err = pushAggregationThroughRoute(ctx, aggregator, src) + output, applyResult = pushAggregationThroughRoute(ctx, aggregator, src) case *ApplyJoin: - output, applyResult, err = pushAggregationThroughJoin(ctx, aggregator, src) + output, applyResult = pushAggregationThroughJoin(ctx, aggregator, src) case *Filter: - output, applyResult, err = pushAggregationThroughFilter(ctx, aggregator, src) + output, applyResult = pushAggregationThroughFilter(ctx, aggregator, src) case *SubQueryContainer: - output, applyResult, err = pushAggregationThroughSubquery(ctx, aggregator, src) + output, applyResult = pushAggregationThroughSubquery(ctx, aggregator, src) default: - return aggregator, rewrite.SameTree, nil - } - - if err != nil { - return nil, nil, err + return aggregator, NoRewrite } if output == nil { - return aggregator, rewrite.SameTree, nil + return aggregator, NoRewrite } aggregator.Pushed = true @@ -92,16 +86,13 @@ func pushAggregationThroughSubquery( ctx *plancontext.PlanningContext, rootAggr *Aggregator, src *SubQueryContainer, -) (ops.Operator, *rewrite.ApplyResult, error) { - pushedAggr := rootAggr.Clone([]ops.Operator{src.Outer}).(*Aggregator) +) (Operator, *ApplyResult) { + pushedAggr := rootAggr.Clone([]Operator{src.Outer}).(*Aggregator) pushedAggr.Original = false pushedAggr.Pushed = false for _, subQuery := range src.Inner { - lhsCols, err := subQuery.OuterExpressionsNeeded(ctx, src.Outer) - if err != nil { - return nil, nil, err - } + lhsCols := subQuery.OuterExpressionsNeeded(ctx, src.Outer) for _, colName := range lhsCols { idx := slices.IndexFunc(pushedAggr.Columns, func(ae *sqlparser.AliasedExpr) bool { return ctx.SemTable.EqualsExpr(ae.Expr, colName) @@ -116,12 +107,12 @@ func pushAggregationThroughSubquery( src.Outer = pushedAggr if !rootAggr.Original { - return src, rewrite.NewTree("push Aggregation under subquery - keep original"), nil + return src, Rewrote("push Aggregation under subquery - keep original") } rootAggr.aggregateTheAggregates() - return rootAggr, rewrite.NewTree("push Aggregation under subquery"), nil + return rootAggr, Rewrote("push Aggregation under subquery") } func (a *Aggregator) aggregateTheAggregates() { @@ -145,15 +136,12 @@ func pushAggregationThroughRoute( ctx *plancontext.PlanningContext, aggregator *Aggregator, route *Route, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { // Create a new aggregator to be placed below the route. aggrBelowRoute := aggregator.SplitAggregatorBelowRoute(route.Inputs()) aggrBelowRoute.Aggregations = nil - err := pushAggregations(ctx, aggregator, aggrBelowRoute) - if err != nil { - return nil, nil, err - } + pushAggregations(ctx, aggregator, aggrBelowRoute) // Set the source of the route to the new aggregator placed below the route. route.Source = aggrBelowRoute @@ -161,18 +149,15 @@ func pushAggregationThroughRoute( if !aggregator.Original { // we only keep the root aggregation, if this aggregator was created // by splitting one and pushing under a join, we can get rid of this one - return aggregator.Source, rewrite.NewTree("push aggregation under route - remove original"), nil + return aggregator.Source, Rewrote("push aggregation under route - remove original") } - return aggregator, rewrite.NewTree("push aggregation under route - keep original"), nil + return aggregator, Rewrote("push aggregation under route - keep original") } // pushAggregations splits aggregations between the original aggregator and the one we are pushing down -func pushAggregations(ctx *plancontext.PlanningContext, aggregator *Aggregator, aggrBelowRoute *Aggregator) error { - canPushDistinctAggr, distinctExpr, err := checkIfWeCanPush(ctx, aggregator) - if err != nil { - return err - } +func pushAggregations(ctx *plancontext.PlanningContext, aggregator *Aggregator, aggrBelowRoute *Aggregator) { + canPushDistinctAggr, distinctExpr := checkIfWeCanPush(ctx, aggregator) distinctAggrGroupByAdded := false @@ -192,7 +177,7 @@ func pushAggregations(ctx *plancontext.PlanningContext, aggregator *Aggregator, // doing the aggregating on the vtgate level instead // Adding to group by can be done only once even though there are multiple distinct aggregation with same expression. if !distinctAggrGroupByAdded { - groupBy := NewGroupBy(distinctExpr, distinctExpr, aeDistinctExpr) + groupBy := NewGroupBy(distinctExpr, distinctExpr) groupBy.ColOffset = aggr.ColOffset aggrBelowRoute.Grouping = append(aggrBelowRoute.Grouping, groupBy) distinctAggrGroupByAdded = true @@ -202,11 +187,9 @@ func pushAggregations(ctx *plancontext.PlanningContext, aggregator *Aggregator, if !canPushDistinctAggr { aggregator.DistinctExpr = distinctExpr } - - return nil } -func checkIfWeCanPush(ctx *plancontext.PlanningContext, aggregator *Aggregator) (bool, sqlparser.Expr, error) { +func checkIfWeCanPush(ctx *plancontext.PlanningContext, aggregator *Aggregator) (bool, sqlparser.Expr) { canPush := true var distinctExpr sqlparser.Expr var differentExpr *sqlparser.AliasedExpr @@ -229,22 +212,22 @@ func checkIfWeCanPush(ctx *plancontext.PlanningContext, aggregator *Aggregator) } if !canPush && differentExpr != nil { - return false, nil, vterrors.VT12001(fmt.Sprintf("only one DISTINCT aggregation is allowed in a SELECT: %s", sqlparser.String(differentExpr))) + panic(vterrors.VT12001(fmt.Sprintf("only one DISTINCT aggregation is allowed in a SELECT: %s", sqlparser.String(differentExpr)))) } - return canPush, distinctExpr, nil + return canPush, distinctExpr } func pushAggregationThroughFilter( ctx *plancontext.PlanningContext, aggregator *Aggregator, filter *Filter, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { columnsNeeded := collectColNamesNeeded(ctx, filter) // Create a new aggregator to be placed below the route. - pushedAggr := aggregator.Clone([]ops.Operator{filter.Source}).(*Aggregator) + pushedAggr := aggregator.Clone([]Operator{filter.Source}).(*Aggregator) pushedAggr.Pushed = false pushedAggr.Original = false @@ -264,10 +247,10 @@ withNextColumn: if !aggregator.Original { // we only keep the root aggregation, if this aggregator was created // by splitting one and pushing under a join, we can get rid of this one - return aggregator.Source, rewrite.NewTree("push aggregation under filter - remove original"), nil + return aggregator.Source, Rewrote("push aggregation under filter - remove original") } aggregator.aggregateTheAggregates() - return aggregator, rewrite.NewTree("push aggregation under filter - keep original"), nil + return aggregator, Rewrote("push aggregation under filter - keep original") } func collectColNamesNeeded(ctx *plancontext.PlanningContext, f *Filter) (columnsNeeded []*sqlparser.ColName) { @@ -363,7 +346,7 @@ Transformed: / \ R1 R2 */ -func pushAggregationThroughJoin(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { +func pushAggregationThroughJoin(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin) (Operator, *ApplyResult) { lhs := &joinPusher{ orig: rootAggr, pushed: &Aggregator{ @@ -387,23 +370,17 @@ func pushAggregationThroughJoin(ctx *plancontext.PlanningContext, rootAggr *Aggr if err != nil { // if we get this error, we just abort the splitting and fall back on simpler ways of solving the same query if errors.Is(err, errAbortAggrPushing) { - return nil, nil, nil + return nil, nil } - return nil, nil, err + panic(err) } - groupingJCs, err := splitGroupingToLeftAndRight(ctx, rootAggr, lhs, rhs) - if err != nil { - return nil, nil, err - } + groupingJCs := splitGroupingToLeftAndRight(ctx, rootAggr, lhs, rhs) joinColumns = append(joinColumns, groupingJCs...) // We need to add any columns coming from the lhs of the join to the group by on that side // If we don't, the LHS will not be able to return the column, and it can't be used to send down to the RHS - err = addColumnsFromLHSInJoinPredicates(ctx, rootAggr, join, lhs) - if err != nil { - return nil, nil, err - } + addColumnsFromLHSInJoinPredicates(ctx, rootAggr, join, lhs) join.LHS, join.RHS = lhs.pushed, rhs.pushed join.JoinColumns = joinColumns @@ -411,23 +388,23 @@ func pushAggregationThroughJoin(ctx *plancontext.PlanningContext, rootAggr *Aggr if !rootAggr.Original { // we only keep the root aggregation, if this aggregator was created // by splitting one and pushing under a join, we can get rid of this one - return output, rewrite.NewTree("push Aggregation under join - keep original"), nil + return output, Rewrote("push Aggregation under join - keep original") } rootAggr.aggregateTheAggregates() rootAggr.Source = output - return rootAggr, rewrite.NewTree("push Aggregation under join"), nil + return rootAggr, Rewrote("push Aggregation under join") } var errAbortAggrPushing = fmt.Errorf("abort aggregation pushing") -func addColumnsFromLHSInJoinPredicates(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin, lhs *joinPusher) error { +func addColumnsFromLHSInJoinPredicates(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin, lhs *joinPusher) { for _, pred := range join.JoinPredicates { for _, bve := range pred.LHSExprs { expr := bve.Expr wexpr, err := rootAggr.QP.GetSimplifiedExpr(ctx, expr) if err != nil { - return err + panic(err) } idx, found := canReuseColumn(ctx, lhs.pushed.Columns, expr, extractExpr) if !found { @@ -450,10 +427,9 @@ func addColumnsFromLHSInJoinPredicates(ctx *plancontext.PlanningContext, rootAgg }) } } - return nil } -func splitGroupingToLeftAndRight(ctx *plancontext.PlanningContext, rootAggr *Aggregator, lhs, rhs *joinPusher) ([]JoinColumn, error) { +func splitGroupingToLeftAndRight(ctx *plancontext.PlanningContext, rootAggr *Aggregator, lhs, rhs *joinPusher) []JoinColumn { var groupingJCs []JoinColumn for _, groupBy := range rootAggr.Grouping { @@ -463,30 +439,27 @@ func splitGroupingToLeftAndRight(ctx *plancontext.PlanningContext, rootAggr *Agg case deps.IsSolvedBy(lhs.tableID): lhs.addGrouping(ctx, groupBy) groupingJCs = append(groupingJCs, JoinColumn{ - Original: aeWrap(groupBy.Inner), + Original: groupBy.Inner, LHSExprs: []BindVarExpr{{Expr: expr}}, }) case deps.IsSolvedBy(rhs.tableID): rhs.addGrouping(ctx, groupBy) groupingJCs = append(groupingJCs, JoinColumn{ - Original: aeWrap(groupBy.Inner), + Original: groupBy.Inner, RHSExpr: expr, }) case deps.IsSolvedBy(lhs.tableID.Merge(rhs.tableID)): - jc, err := breakExpressionInLHSandRHSForApplyJoin(ctx, groupBy.SimplifiedExpr, lhs.tableID) - if err != nil { - return nil, err - } + jc := breakExpressionInLHSandRHSForApplyJoin(ctx, groupBy.SimplifiedExpr, lhs.tableID) for _, lhsExpr := range jc.LHSExprs { e := lhsExpr.Expr - lhs.addGrouping(ctx, NewGroupBy(e, e, aeWrap(e))) + lhs.addGrouping(ctx, NewGroupBy(e, e)) } - rhs.addGrouping(ctx, NewGroupBy(jc.RHSExpr, jc.RHSExpr, aeWrap(jc.RHSExpr))) + rhs.addGrouping(ctx, NewGroupBy(jc.RHSExpr, jc.RHSExpr)) default: - return nil, vterrors.VT13001(fmt.Sprintf("grouping with bad dependencies %s", groupBy.SimplifiedExpr)) + panic(vterrors.VT13001(fmt.Sprintf("grouping with bad dependencies %s", groupBy.SimplifiedExpr))) } } - return groupingJCs, nil + return groupingJCs } // splitAggrColumnsToLeftAndRight pushes all aggregations on the aggregator above a join and @@ -497,7 +470,7 @@ func splitAggrColumnsToLeftAndRight( aggregator *Aggregator, join *ApplyJoin, lhs, rhs *joinPusher, -) ([]JoinColumn, ops.Operator, error) { +) ([]JoinColumn, Operator, error) { proj := newAliasedProjection(join) proj.FromAggr = true builder := &aggBuilder{ @@ -507,10 +480,7 @@ func splitAggrColumnsToLeftAndRight( outerJoin: join.LeftJoin, } - canPushDistinctAggr, distinctExpr, err := checkIfWeCanPush(ctx, aggregator) - if err != nil { - return nil, nil, err - } + canPushDistinctAggr, distinctExpr := checkIfWeCanPush(ctx, aggregator) // Distinct aggregation cannot be pushed down in the join. // We keep node of the distinct aggregation expression to be used later for ordering. @@ -531,10 +501,7 @@ outer: continue outer } } - _, err := builder.proj.addUnexploredExpr(col, col.Expr) - if err != nil { - return nil, nil, err - } + builder.proj.addUnexploredExpr(col, col.Expr) } return builder.joinColumns, builder.proj, nil } @@ -566,7 +533,7 @@ func (ab *aggBuilder) leftCountStar(ctx *plancontext.PlanningContext) *sqlparser ae, created := ab.lhs.countStar(ctx) if created { ab.joinColumns = append(ab.joinColumns, JoinColumn{ - Original: ae, + Original: ae.Expr, LHSExprs: []BindVarExpr{{Expr: ae.Expr}}, }) } @@ -577,7 +544,7 @@ func (ab *aggBuilder) rightCountStar(ctx *plancontext.PlanningContext) *sqlparse ae, created := ab.rhs.countStar(ctx) if created { ab.joinColumns = append(ab.joinColumns, JoinColumn{ - Original: ae, + Original: ae.Expr, RHSExpr: ae.Expr, }) } @@ -599,7 +566,8 @@ func (p *joinPusher) countStar(ctx *plancontext.PlanningContext) (*sqlparser.Ali func (ab *aggBuilder) handleAggr(ctx *plancontext.PlanningContext, aggr Aggr) error { switch aggr.OpCode { case opcode.AggregateCountStar: - return ab.handleCountStar(ctx, aggr) + ab.handleCountStar(ctx, aggr) + return nil case opcode.AggregateCount, opcode.AggregateSum: return ab.handleAggrWithCountStarMultiplier(ctx, aggr) case opcode.AggregateMax, opcode.AggregateMin, opcode.AggregateAnyValue: @@ -632,7 +600,7 @@ func (ab *aggBuilder) handleAggr(ctx *plancontext.PlanningContext, aggr Aggr) er func (ab *aggBuilder) pushThroughLeft(aggr Aggr) { ab.lhs.pushThroughAggr(aggr) ab.joinColumns = append(ab.joinColumns, JoinColumn{ - Original: aggr.Original, + Original: aggr.Original.Expr, LHSExprs: []BindVarExpr{{Expr: aggr.Original.Expr}}, }) } @@ -640,16 +608,13 @@ func (ab *aggBuilder) pushThroughLeft(aggr Aggr) { func (ab *aggBuilder) pushThroughRight(aggr Aggr) { ab.rhs.pushThroughAggr(aggr) ab.joinColumns = append(ab.joinColumns, JoinColumn{ - Original: aggr.Original, + Original: aggr.Original.Expr, RHSExpr: aggr.Original.Expr, }) } func (ab *aggBuilder) handlePushThroughAggregation(ctx *plancontext.PlanningContext, aggr Aggr) error { - _, err := ab.proj.addUnexploredExpr(aggr.Original, aggr.Original.Expr) - if err != nil { - return err - } + ab.proj.addUnexploredExpr(aggr.Original, aggr.Original.Expr) deps := ctx.SemTable.RecursiveDeps(aggr.Original.Expr) switch { @@ -663,12 +628,12 @@ func (ab *aggBuilder) handlePushThroughAggregation(ctx *plancontext.PlanningCont return nil } -func (ab *aggBuilder) handleCountStar(ctx *plancontext.PlanningContext, aggr Aggr) error { +func (ab *aggBuilder) handleCountStar(ctx *plancontext.PlanningContext, aggr Aggr) { // Add the aggregate to both sides of the join. lhsAE := ab.leftCountStar(ctx) rhsAE := ab.rightCountStar(ctx) - return ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, true) + ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, true) } func (ab *aggBuilder) handleAggrWithCountStarMultiplier(ctx *plancontext.PlanningContext, aggr Aggr) error { @@ -694,10 +659,11 @@ func (ab *aggBuilder) handleAggrWithCountStarMultiplier(ctx *plancontext.Plannin return errAbortAggrPushing } - return ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, addCoalesce) + ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, addCoalesce) + return nil } -func (ab *aggBuilder) buildProjectionForAggr(lhsAE *sqlparser.AliasedExpr, rhsAE *sqlparser.AliasedExpr, aggr Aggr, coalesce bool) error { +func (ab *aggBuilder) buildProjectionForAggr(lhsAE *sqlparser.AliasedExpr, rhsAE *sqlparser.AliasedExpr, aggr Aggr, coalesce bool) { // We expect the expressions to be different on each side of the join, otherwise it's an error. if lhsAE.Expr == rhsAE.Expr { panic(fmt.Sprintf("Need the two produced expressions to be different. %T %T", lhsAE, rhsAE)) @@ -726,8 +692,7 @@ func (ab *aggBuilder) buildProjectionForAggr(lhsAE *sqlparser.AliasedExpr, rhsAE As: sqlparser.NewIdentifierCI(aggr.Original.ColumnName()), } - _, err := ab.proj.addUnexploredExpr(projAE, projExpr) - return err + ab.proj.addUnexploredExpr(projAE, projExpr) } func coalesceFunc(e sqlparser.Expr) sqlparser.Expr { @@ -820,7 +785,7 @@ func needAvgBreaking(aggrs []Aggr) bool { // splitAvgAggregations takes an aggregator that has AVG aggregations in it and splits // these into sum/count expressions that can be spread out to shards -func splitAvgAggregations(ctx *plancontext.PlanningContext, aggr *Aggregator) (ops.Operator, *rewrite.ApplyResult, error) { +func splitAvgAggregations(ctx *plancontext.PlanningContext, aggr *Aggregator) (Operator, *ApplyResult) { proj := newAliasedProjection(aggr) var columns []*sqlparser.AliasedExpr @@ -848,10 +813,7 @@ func splitAvgAggregations(ctx *plancontext.PlanningContext, aggr *Aggregator) (o outputColumn := aeWrap(col.Expr) outputColumn.As = sqlparser.NewIdentifierCI(col.ColumnName()) - _, err := proj.addUnexploredExpr(sqlparser.CloneRefOfAliasedExpr(col), calcExpr) - if err != nil { - return nil, nil, err - } + proj.addUnexploredExpr(sqlparser.CloneRefOfAliasedExpr(col), calcExpr) col.Expr = sumExpr found := false for aggrOffset, aggregation := range aggr.Aggregations { @@ -877,5 +839,5 @@ func splitAvgAggregations(ctx *plancontext.PlanningContext, aggr *Aggregator) (o aggr.Columns = append(aggr.Columns, columns...) aggr.Aggregations = append(aggr.Aggregations, aggregations...) - return proj, rewrite.NewTree("split avg aggregation"), nil + return proj, Rewrote("split avg aggregation") } diff --git a/go/vt/vtgate/planbuilder/operators/aggregator.go b/go/vt/vtgate/planbuilder/operators/aggregator.go index e1848752e75..6c07343498b 100644 --- a/go/vt/vtgate/planbuilder/operators/aggregator.go +++ b/go/vt/vtgate/planbuilder/operators/aggregator.go @@ -25,7 +25,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -35,7 +34,7 @@ type ( // Both all aggregations and no grouping, and the inverse // of all grouping and no aggregations are valid configurations of this operator Aggregator struct { - Source ops.Operator + Source Operator Columns []*sqlparser.AliasedExpr Grouping []GroupBy @@ -60,7 +59,7 @@ type ( } ) -func (a *Aggregator) Clone(inputs []ops.Operator) ops.Operator { +func (a *Aggregator) Clone(inputs []Operator) Operator { kopy := *a kopy.Source = inputs[0] kopy.Columns = slices.Clone(a.Columns) @@ -69,18 +68,18 @@ func (a *Aggregator) Clone(inputs []ops.Operator) ops.Operator { return &kopy } -func (a *Aggregator) Inputs() []ops.Operator { - return []ops.Operator{a.Source} +func (a *Aggregator) Inputs() []Operator { + return []Operator{a.Source} } -func (a *Aggregator) SetInputs(operators []ops.Operator) { +func (a *Aggregator) SetInputs(operators []Operator) { if len(operators) != 1 { panic(fmt.Sprintf("unexpected number of operators as input in aggregator: %d", len(operators))) } a.Source = operators[0] } -func (a *Aggregator) AddPredicate(_ *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (a *Aggregator) AddPredicate(_ *plancontext.PlanningContext, expr sqlparser.Expr) Operator { return &Filter{ Source: a, Predicates: []sqlparser.Expr{expr}, @@ -92,7 +91,7 @@ func (a *Aggregator) addColumnWithoutPushing(ctx *plancontext.PlanningContext, e a.Columns = append(a.Columns, expr) if addToGroupBy { - groupBy := NewGroupBy(expr.Expr, expr.Expr, expr) + groupBy := NewGroupBy(expr.Expr, expr.Expr) groupBy.ColOffset = offset a.Grouping = append(a.Grouping, groupBy) } else { @@ -193,12 +192,6 @@ func (a *Aggregator) findColInternal(ctx *plancontext.PlanningContext, ae *sqlpa if offset, found := canReuseColumn(ctx, a.Columns, expr, extractExpr); found { return offset } - colName, isColName := expr.(*sqlparser.ColName) - for i, col := range a.Columns { - if isColName && colName.Name.EqualString(col.As.String()) { - return i - } - } if addToGroupBy { panic(vterrors.VT13001(fmt.Sprintf("did not expect to add group by here: %s", sqlparser.String(expr)))) @@ -254,11 +247,11 @@ func (a *Aggregator) ShortDescription() string { return fmt.Sprintf("%s%s group by %s", org, strings.Join(columns, ", "), strings.Join(grouping, ",")) } -func (a *Aggregator) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (a *Aggregator) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return a.Source.GetOrdering(ctx) } -func (a *Aggregator) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (a *Aggregator) planOffsets(ctx *plancontext.PlanningContext) Operator { if a.offsetPlanned { return nil } @@ -408,7 +401,7 @@ func (a *Aggregator) internalAddColumn(ctx *plancontext.PlanningContext, aliased // SplitAggregatorBelowRoute returns the aggregator that will live under the Route. // This is used when we are splitting the aggregation so one part is done // at the mysql level and one part at the vtgate level -func (a *Aggregator) SplitAggregatorBelowRoute(input []ops.Operator) *Aggregator { +func (a *Aggregator) SplitAggregatorBelowRoute(input []Operator) *Aggregator { newOp := a.Clone(input).(*Aggregator) newOp.Pushed = false newOp.Original = false @@ -420,4 +413,4 @@ func (a *Aggregator) introducesTableID() semantics.TableSet { return a.DT.introducesTableID() } -var _ ops.Operator = (*Aggregator)(nil) +var _ Operator = (*Aggregator)(nil) diff --git a/go/vt/vtgate/planbuilder/operators/apply_join.go b/go/vt/vtgate/planbuilder/operators/apply_join.go index 95d7d962738..7e2c100c944 100644 --- a/go/vt/vtgate/planbuilder/operators/apply_join.go +++ b/go/vt/vtgate/planbuilder/operators/apply_join.go @@ -25,7 +25,6 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) @@ -33,7 +32,7 @@ type ( // ApplyJoin is a nested loop join - for each row on the LHS, // we'll execute the plan on the RHS, feeding data from left to right ApplyJoin struct { - LHS, RHS ops.Operator + LHS, RHS Operator // LeftJoin will be true in the case of an outer join LeftJoin bool @@ -72,7 +71,7 @@ type ( // so they can be used for the result of this expression that is using data from both sides. // All fields will be used for these JoinColumn struct { - Original *sqlparser.AliasedExpr // this is the original expression being passed through + Original sqlparser.Expr // this is the original expression being passed through LHSExprs []BindVarExpr RHSExpr sqlparser.Expr GroupBy bool // if this is true, we need to push this down to our inputs with addToGroupBy set to true @@ -86,7 +85,7 @@ type ( } ) -func NewApplyJoin(lhs, rhs ops.Operator, predicate sqlparser.Expr, leftOuterJoin bool) *ApplyJoin { +func NewApplyJoin(lhs, rhs Operator, predicate sqlparser.Expr, leftOuterJoin bool) *ApplyJoin { return &ApplyJoin{ LHS: lhs, RHS: rhs, @@ -97,7 +96,7 @@ func NewApplyJoin(lhs, rhs ops.Operator, predicate sqlparser.Expr, leftOuterJoin } // Clone implements the Operator interface -func (aj *ApplyJoin) Clone(inputs []ops.Operator) ops.Operator { +func (aj *ApplyJoin) Clone(inputs []Operator) Operator { kopy := *aj kopy.LHS = inputs[0] kopy.RHS = inputs[1] @@ -110,33 +109,33 @@ func (aj *ApplyJoin) Clone(inputs []ops.Operator) ops.Operator { return &kopy } -func (aj *ApplyJoin) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (aj *ApplyJoin) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { return AddPredicate(ctx, aj, expr, false, newFilter) } // Inputs implements the Operator interface -func (aj *ApplyJoin) Inputs() []ops.Operator { - return []ops.Operator{aj.LHS, aj.RHS} +func (aj *ApplyJoin) Inputs() []Operator { + return []Operator{aj.LHS, aj.RHS} } // SetInputs implements the Operator interface -func (aj *ApplyJoin) SetInputs(inputs []ops.Operator) { +func (aj *ApplyJoin) SetInputs(inputs []Operator) { aj.LHS, aj.RHS = inputs[0], inputs[1] } -func (aj *ApplyJoin) GetLHS() ops.Operator { +func (aj *ApplyJoin) GetLHS() Operator { return aj.LHS } -func (aj *ApplyJoin) GetRHS() ops.Operator { +func (aj *ApplyJoin) GetRHS() Operator { return aj.RHS } -func (aj *ApplyJoin) SetLHS(operator ops.Operator) { +func (aj *ApplyJoin) SetLHS(operator Operator) { aj.LHS = operator } -func (aj *ApplyJoin) SetRHS(operator ops.Operator) { +func (aj *ApplyJoin) SetRHS(operator Operator) { aj.RHS = operator } @@ -151,10 +150,7 @@ func (aj *ApplyJoin) IsInner() bool { func (aj *ApplyJoin) AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) { aj.Predicate = ctx.SemTable.AndExpressions(expr, aj.Predicate) - col, err := breakExpressionInLHSandRHSForApplyJoin(ctx, expr, TableID(aj.LHS)) - if err != nil { - panic(err) - } + col := breakExpressionInLHSandRHSForApplyJoin(ctx, expr, TableID(aj.LHS)) aj.JoinPredicates = append(aj.JoinPredicates, col) rhs := aj.RHS.AddPredicate(ctx, col.RHSExpr) aj.RHS = rhs @@ -173,21 +169,21 @@ func (aj *ApplyJoin) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser. return transformColumnsToSelectExprs(ctx, aj) } -func (aj *ApplyJoin) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (aj *ApplyJoin) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return aj.LHS.GetOrdering(ctx) } func joinColumnToAliasedExpr(c JoinColumn) *sqlparser.AliasedExpr { - return c.Original + return aeWrap(c.Original) } func joinColumnToExpr(column JoinColumn) sqlparser.Expr { - return column.Original.Expr + return column.Original } -func (aj *ApplyJoin) getJoinColumnFor(ctx *plancontext.PlanningContext, orig *sqlparser.AliasedExpr, e sqlparser.Expr, addToGroupBy bool) (col JoinColumn, err error) { +func (aj *ApplyJoin) getJoinColumnFor(ctx *plancontext.PlanningContext, orig *sqlparser.AliasedExpr, e sqlparser.Expr, addToGroupBy bool) (col JoinColumn) { defer func() { - col.Original = orig + col.Original = orig.Expr }() lhs := TableID(aj.LHS) rhs := TableID(aj.RHS) @@ -201,12 +197,9 @@ func (aj *ApplyJoin) getJoinColumnFor(ctx *plancontext.PlanningContext, orig *sq case deps.IsSolvedBy(rhs): col.RHSExpr = e case deps.IsSolvedBy(both): - col, err = breakExpressionInLHSandRHSForApplyJoin(ctx, e, TableID(aj.LHS)) - if err != nil { - return JoinColumn{}, err - } + col = breakExpressionInLHSandRHSForApplyJoin(ctx, e, TableID(aj.LHS)) default: - return JoinColumn{}, vterrors.VT13002(sqlparser.String(e)) + panic(vterrors.VT13002(sqlparser.String(e))) } return @@ -232,16 +225,13 @@ func (aj *ApplyJoin) AddColumn( return offset } } - col, err := aj.getJoinColumnFor(ctx, expr, expr.Expr, groupBy) - if err != nil { - panic(err) - } + col := aj.getJoinColumnFor(ctx, expr, expr.Expr, groupBy) offset := len(aj.JoinColumns) aj.JoinColumns = append(aj.JoinColumns, col) return offset } -func (aj *ApplyJoin) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (aj *ApplyJoin) planOffsets(ctx *plancontext.PlanningContext) Operator { for _, col := range aj.JoinColumns { // Read the type description for JoinColumn to understand the following code for _, lhsExpr := range col.LHSExprs { diff --git a/go/vt/vtgate/planbuilder/operators/ast_to_op.go b/go/vt/vtgate/planbuilder/operators/ast_to_op.go index f6acbadd35a..f8c8891f8f9 100644 --- a/go/vt/vtgate/planbuilder/operators/ast_to_op.go +++ b/go/vt/vtgate/planbuilder/operators/ast_to_op.go @@ -21,7 +21,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -31,39 +30,28 @@ const foreignKeyConstraintValues = "fkc_vals" const foreignKeyUpdateExpr = "fkc_upd" // translateQueryToOp creates an operator tree that represents the input SELECT or UNION query -func translateQueryToOp(ctx *plancontext.PlanningContext, selStmt sqlparser.Statement) (op ops.Operator, err error) { +func translateQueryToOp(ctx *plancontext.PlanningContext, selStmt sqlparser.Statement) Operator { switch node := selStmt.(type) { case *sqlparser.Select: - op, err = createOperatorFromSelect(ctx, node) + return createOperatorFromSelect(ctx, node) case *sqlparser.Union: - op, err = createOperatorFromUnion(ctx, node) + return createOperatorFromUnion(ctx, node) case *sqlparser.Update: - op, err = createOperatorFromUpdate(ctx, node) + return createOperatorFromUpdate(ctx, node) case *sqlparser.Delete: - op, err = createOperatorFromDelete(ctx, node) + return createOperatorFromDelete(ctx, node) case *sqlparser.Insert: - op, err = createOperatorFromInsert(ctx, node) + return createOperatorFromInsert(ctx, node) default: - err = vterrors.VT12001(fmt.Sprintf("operator: %T", selStmt)) + panic(vterrors.VT12001(fmt.Sprintf("operator: %T", selStmt))) } - if err != nil { - return nil, err - } - - return op, nil } -func createOperatorFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) (ops.Operator, error) { - op, err := crossJoin(ctx, sel.From) - if err != nil { - return nil, err - } +func createOperatorFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) Operator { + op := crossJoin(ctx, sel.From) if sel.Where != nil { - op, err = addWherePredicates(ctx, sel.Where.Expr, op) - if err != nil { - return nil, err - } + op = addWherePredicates(ctx, sel.Where.Expr, op) } if sel.Comments != nil || sel.Lock != sqlparser.NoLock { @@ -76,26 +64,23 @@ func createOperatorFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.S op = newHorizon(op, sel) - return op, nil + return op } -func addWherePredicates(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Operator) (ops.Operator, error) { +func addWherePredicates(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op Operator) Operator { sqc := &SubQueryBuilder{} outerID := TableID(op) exprs := sqlparser.SplitAndExpression(nil, expr) for _, expr := range exprs { sqlparser.RemoveKeyspaceFromColName(expr) - subq, err := sqc.handleSubquery(ctx, expr, outerID) - if err != nil { - return nil, err - } + subq := sqc.handleSubquery(ctx, expr, outerID) if subq != nil { continue } op = op.AddPredicate(ctx, expr) addColumnEquality(ctx, expr) } - return sqc.getRootOperator(op, nil), nil + return sqc.getRootOperator(op, nil) } // cloneASTAndSemState clones the AST and the semantic state of the input node. @@ -158,56 +143,44 @@ type joinPredicateCollector struct { func (jpc *joinPredicateCollector) inspectPredicate( ctx *plancontext.PlanningContext, predicate sqlparser.Expr, -) error { +) { pred := predicate deps := ctx.SemTable.RecursiveDeps(predicate) // if the subquery is not enough, but together we have all we need, // then we can use this predicate to connect the subquery to the outer query if !deps.IsSolvedBy(jpc.subqID) && deps.IsSolvedBy(jpc.totalID) { jpc.predicates = append(jpc.predicates, predicate) - jc, err := breakExpressionInLHSandRHSForApplyJoin(ctx, predicate, jpc.outerID) - if err != nil { - return err - } + jc := breakExpressionInLHSandRHSForApplyJoin(ctx, predicate, jpc.outerID) jpc.joinColumns = append(jpc.joinColumns, jc) pred = jc.RHSExpr } jpc.remainingPredicates = append(jpc.remainingPredicates, pred) - return nil } -func createOperatorFromUnion(ctx *plancontext.PlanningContext, node *sqlparser.Union) (ops.Operator, error) { - opLHS, err := translateQueryToOp(ctx, node.Left) - if err != nil { - return nil, err - } - +func createOperatorFromUnion(ctx *plancontext.PlanningContext, node *sqlparser.Union) Operator { _, isRHSUnion := node.Right.(*sqlparser.Union) if isRHSUnion { - return nil, vterrors.VT12001("nesting of UNIONs on the right-hand side") + panic(vterrors.VT12001("nesting of UNIONs on the right-hand side")) } - opRHS, err := translateQueryToOp(ctx, node.Right) - if err != nil { - return nil, err - } - + opLHS := translateQueryToOp(ctx, node.Left) + opRHS := translateQueryToOp(ctx, node.Right) lexprs := ctx.SemTable.SelectExprs(node.Left) rexprs := ctx.SemTable.SelectExprs(node.Right) unionCols := ctx.SemTable.SelectExprs(node) - union := newUnion([]ops.Operator{opLHS, opRHS}, []sqlparser.SelectExprs{lexprs, rexprs}, unionCols, node.Distinct) - return newHorizon(union, node), nil + union := newUnion([]Operator{opLHS, opRHS}, []sqlparser.SelectExprs{lexprs, rexprs}, unionCols, node.Distinct) + return newHorizon(union, node) } // createOpFromStmt creates an operator from the given statement. It takes in two additional arguments— // 1. verifyAllFKs: For this given statement, do we need to verify validity of all the foreign keys on the vtgate level. // 2. fkToIgnore: The foreign key constraint to specifically ignore while planning the statement. This field is used in UPDATE CASCADE planning, wherein while planning the child update // query, we need to ignore the parent foreign key constraint that caused the cascade in question. -func createOpFromStmt(ctx *plancontext.PlanningContext, stmt sqlparser.Statement, verifyAllFKs bool, fkToIgnore string) (ops.Operator, error) { +func createOpFromStmt(ctx *plancontext.PlanningContext, stmt sqlparser.Statement, verifyAllFKs bool, fkToIgnore string) Operator { var err error ctx, err = plancontext.CreatePlanningContext(stmt, ctx.ReservedVars, ctx.VSchema, ctx.PlannerVersion) if err != nil { - return nil, err + panic(err) } // TODO (@GuptaManan100, @harshit-gangal): When we add cross-shard foreign keys support, @@ -222,7 +195,7 @@ func createOpFromStmt(ctx *plancontext.PlanningContext, stmt sqlparser.Statement // From all the parent foreign keys involved, we should remove the one that we need to ignore. err = ctx.SemTable.RemoveParentForeignKey(fkToIgnore) if err != nil { - return nil, err + panic(err) } // Now, we can filter the foreign keys further based on the planning context, specifically whether we are running @@ -236,13 +209,17 @@ func createOpFromStmt(ctx *plancontext.PlanningContext, stmt sqlparser.Statement err = ctx.SemTable.RemoveNonRequiredForeignKeys(ctx.VerifyAllFKs, vindexes.DeleteAction) } if err != nil { - return nil, err + panic(err) } - return PlanQuery(ctx, stmt) + op, err := PlanQuery(ctx, stmt) + if err != nil { + panic(err) + } + return op } -func getOperatorFromTableExpr(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr, onlyTable bool) (ops.Operator, error) { +func getOperatorFromTableExpr(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr, onlyTable bool) Operator { switch tableExpr := tableExpr.(type) { case *sqlparser.AliasedTableExpr: return getOperatorFromAliasedTableExpr(ctx, tableExpr, onlyTable) @@ -251,19 +228,13 @@ func getOperatorFromTableExpr(ctx *plancontext.PlanningContext, tableExpr sqlpar case *sqlparser.ParenTableExpr: return crossJoin(ctx, tableExpr.Exprs) default: - return nil, vterrors.VT13001(fmt.Sprintf("unable to use: %T table type", tableExpr)) + panic(vterrors.VT13001(fmt.Sprintf("unable to use: %T table type", tableExpr))) } } -func getOperatorFromJoinTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr) (ops.Operator, error) { - lhs, err := getOperatorFromTableExpr(ctx, tableExpr.LeftExpr, false) - if err != nil { - return nil, err - } - rhs, err := getOperatorFromTableExpr(ctx, tableExpr.RightExpr, false) - if err != nil { - return nil, err - } +func getOperatorFromJoinTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr) Operator { + lhs := getOperatorFromTableExpr(ctx, tableExpr.LeftExpr, false) + rhs := getOperatorFromTableExpr(ctx, tableExpr.RightExpr, false) switch tableExpr.Join { case sqlparser.NormalJoinType: @@ -271,17 +242,17 @@ func getOperatorFromJoinTableExpr(ctx *plancontext.PlanningContext, tableExpr *s case sqlparser.LeftJoinType, sqlparser.RightJoinType: return createOuterJoin(tableExpr, lhs, rhs) default: - return nil, vterrors.VT13001("unsupported: %s", tableExpr.Join.ToString()) + panic(vterrors.VT13001("unsupported: %s", tableExpr.Join.ToString())) } } -func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.AliasedTableExpr, onlyTable bool) (ops.Operator, error) { +func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.AliasedTableExpr, onlyTable bool) Operator { tableID := ctx.SemTable.TableSetFor(tableExpr) switch tbl := tableExpr.Expr.(type) { case sqlparser.TableName: tableInfo, err := ctx.SemTable.TableInfoFor(tableID) if err != nil { - return nil, err + panic(err) } if vt, isVindex := tableInfo.(*semantics.VindexTable); isVindex { @@ -295,73 +266,71 @@ func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr }, Vindex: vt.Vindex, Solved: solves, - }, nil + } } qg := newQueryGraph() isInfSchema := tableInfo.IsInfSchema() qt := &QueryTable{Alias: tableExpr, Table: tbl, ID: tableID, IsInfSchema: isInfSchema} qg.Tables = append(qg.Tables, qt) - return qg, nil + return qg case *sqlparser.DerivedTable: if onlyTable && tbl.Select.GetLimit() == nil { tbl.Select.SetOrderBy(nil) } - inner, err := translateQueryToOp(ctx, tbl.Select) - if err != nil { - return nil, err - } + inner := translateQueryToOp(ctx, tbl.Select) if horizon, ok := inner.(*Horizon); ok { horizon.TableId = &tableID horizon.Alias = tableExpr.As.String() horizon.ColumnAliases = tableExpr.Columns qp, err := CreateQPFromSelectStatement(ctx, tbl.Select) if err != nil { - return nil, err + panic(err) } horizon.QP = qp } - return inner, nil + return inner default: - return nil, vterrors.VT13001(fmt.Sprintf("unable to use: %T", tbl)) + panic(vterrors.VT13001(fmt.Sprintf("unable to use: %T", tbl))) } } -func crossJoin(ctx *plancontext.PlanningContext, exprs sqlparser.TableExprs) (ops.Operator, error) { - var output ops.Operator +func crossJoin(ctx *plancontext.PlanningContext, exprs sqlparser.TableExprs) Operator { + var output Operator for _, tableExpr := range exprs { - op, err := getOperatorFromTableExpr(ctx, tableExpr, len(exprs) == 1) - if err != nil { - return nil, err - } + op := getOperatorFromTableExpr(ctx, tableExpr, len(exprs) == 1) if output == nil { output = op } else { output = createJoin(ctx, output, op) } } - return output, nil + return output } -func createQueryTableForDML(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr, whereClause *sqlparser.Where) (semantics.TableInfo, *QueryTable, error) { +func createQueryTableForDML( + ctx *plancontext.PlanningContext, + tableExpr sqlparser.TableExpr, + whereClause *sqlparser.Where, +) (semantics.TableInfo, *QueryTable) { alTbl, ok := tableExpr.(*sqlparser.AliasedTableExpr) if !ok { - return nil, nil, vterrors.VT13001("expected AliasedTableExpr") + panic(vterrors.VT13001("expected AliasedTableExpr")) } tblName, ok := alTbl.Expr.(sqlparser.TableName) if !ok { - return nil, nil, vterrors.VT13001("expected TableName") + panic(vterrors.VT13001("expected TableName")) } tableID := ctx.SemTable.TableSetFor(alTbl) tableInfo, err := ctx.SemTable.TableInfoFor(tableID) if err != nil { - return nil, nil, err + panic(err) } if tableInfo.IsInfSchema() { - return nil, nil, vterrors.VT12001("update information schema tables") + panic(vterrors.VT12001("update information schema tables")) } var predicates []sqlparser.Expr @@ -374,7 +343,7 @@ func createQueryTableForDML(ctx *plancontext.PlanningContext, tableExpr sqlparse Table: tblName, Predicates: predicates, } - return tableInfo, qt, nil + return tableInfo, qt } func addColumnEquality(ctx *plancontext.PlanningContext, expr sqlparser.Expr) { @@ -404,7 +373,7 @@ func createSelectionOp( orderBy sqlparser.OrderBy, limit *sqlparser.Limit, lock sqlparser.Lock, -) (ops.Operator, error) { +) Operator { selectionStmt := &sqlparser.Select{ SelectExprs: selectExprs, From: tableExprs, diff --git a/go/vt/vtgate/planbuilder/operators/comments.go b/go/vt/vtgate/planbuilder/operators/comments.go index 9ede4b9e0da..912fa4138d9 100644 --- a/go/vt/vtgate/planbuilder/operators/comments.go +++ b/go/vt/vtgate/planbuilder/operators/comments.go @@ -21,32 +21,31 @@ import ( "strings" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // LockAndComment contains any comments or locking directives we want on all queries down from this operator type LockAndComment struct { - Source ops.Operator + Source Operator Comments *sqlparser.ParsedComments Lock sqlparser.Lock } -func (l *LockAndComment) Clone(inputs []ops.Operator) ops.Operator { +func (l *LockAndComment) Clone(inputs []Operator) Operator { klon := *l klon.Source = inputs[0] return &klon } -func (l *LockAndComment) Inputs() []ops.Operator { - return []ops.Operator{l.Source} +func (l *LockAndComment) Inputs() []Operator { + return []Operator{l.Source} } -func (l *LockAndComment) SetInputs(operators []ops.Operator) { +func (l *LockAndComment) SetInputs(operators []Operator) { l.Source = operators[0] } -func (l *LockAndComment) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (l *LockAndComment) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { l.Source = l.Source.AddPredicate(ctx, expr) return l } @@ -76,6 +75,6 @@ func (l *LockAndComment) ShortDescription() string { return strings.Join(s, " ") } -func (l *LockAndComment) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (l *LockAndComment) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return l.Source.GetOrdering(ctx) } diff --git a/go/vt/vtgate/planbuilder/operators/delete.go b/go/vt/vtgate/planbuilder/operators/delete.go index 8b7841bdcdd..17f6125992f 100644 --- a/go/vt/vtgate/planbuilder/operators/delete.go +++ b/go/vt/vtgate/planbuilder/operators/delete.go @@ -22,7 +22,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -45,7 +44,7 @@ func (d *Delete) introducesTableID() semantics.TableSet { } // Clone implements the Operator interface -func (d *Delete) Clone([]ops.Operator) ops.Operator { +func (d *Delete) Clone([]Operator) Operator { return &Delete{ QTable: d.QTable, VTable: d.VTable, @@ -61,7 +60,7 @@ func (d *Delete) TablesUsed() []string { return nil } -func (d *Delete) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (d *Delete) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } @@ -73,24 +72,13 @@ func (d *Delete) Statement() sqlparser.Statement { return d.AST } -func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlparser.Delete) (ops.Operator, error) { - tableInfo, qt, err := createQueryTableForDML(ctx, deleteStmt.TableExprs[0], deleteStmt.Where) - if err != nil { - return nil, err - } - - vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "delete") - if err != nil { - return nil, err - } +func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlparser.Delete) Operator { + tableInfo, qt := createQueryTableForDML(ctx, deleteStmt.TableExprs[0], deleteStmt.Where) + vindexTable, routing := buildVindexTableForDML(ctx, tableInfo, qt, "delete") delClone := sqlparser.CloneRefOfDelete(deleteStmt) // Create the delete operator first. - delOp, err := createDeleteOperator(ctx, deleteStmt, qt, vindexTable, routing) - if err != nil { - return nil, err - } - + delOp := createDeleteOperator(ctx, deleteStmt, qt, vindexTable, routing) if deleteStmt.Comments != nil { delOp = &LockAndComment{ Source: delOp, @@ -101,11 +89,11 @@ func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlp childFks := ctx.SemTable.GetChildForeignKeysList() // If there are no foreign key constraints, then we don't need to do anything. if len(childFks) == 0 { - return delOp, nil + return delOp } // If the delete statement has a limit, we don't support it yet. if deleteStmt.Limit != nil { - return nil, vterrors.VT12001("foreign keys management at vitess with limit") + panic(vterrors.VT12001("foreign keys management at vitess with limit")) } return createFkCascadeOpForDelete(ctx, delOp, delClone, childFks) @@ -116,7 +104,7 @@ func createDeleteOperator( deleteStmt *sqlparser.Delete, qt *QueryTable, vindexTable *vindexes.Table, - routing Routing) (ops.Operator, error) { + routing Routing) Operator { del := &Delete{ QTable: qt, VTable: vindexTable, @@ -128,13 +116,10 @@ func createDeleteOperator( } if !vindexTable.Keyspace.Sharded { - return route, nil + return route } - primaryVindex, vindexAndPredicates, err := getVindexInformation(qt.ID, vindexTable) - if err != nil { - return nil, err - } + primaryVindex, vindexAndPredicates := getVindexInformation(qt.ID, vindexTable) tr, ok := routing.(*ShardedRouting) if ok { @@ -151,58 +136,49 @@ func createDeleteOperator( sqc := &SubQueryBuilder{} for _, predicate := range qt.Predicates { - if subq, err := sqc.handleSubquery(ctx, predicate, qt.ID); err != nil { - return nil, err - } else if subq != nil { + subq := sqc.handleSubquery(ctx, predicate, qt.ID) + if subq != nil { continue } - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + + routing = UpdateRoutingLogic(ctx, predicate, routing) } if routing.OpCode() == engine.Scatter && deleteStmt.Limit != nil { // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07) - return nil, vterrors.VT12001("multi shard DELETE with LIMIT") + panic(vterrors.VT12001("multi shard DELETE with LIMIT")) } - return sqc.getRootOperator(route, nil), nil + return sqc.getRootOperator(route, nil) } -func createFkCascadeOpForDelete(ctx *plancontext.PlanningContext, parentOp ops.Operator, delStmt *sqlparser.Delete, childFks []vindexes.ChildFKInfo) (ops.Operator, error) { +func createFkCascadeOpForDelete(ctx *plancontext.PlanningContext, parentOp Operator, delStmt *sqlparser.Delete, childFks []vindexes.ChildFKInfo) Operator { var fkChildren []*FkChild var selectExprs []sqlparser.SelectExpr for _, fk := range childFks { // Any RESTRICT type foreign keys that arrive here, // are cross-shard/cross-keyspace RESTRICT cases, which we don't currently support. if fk.OnDelete.IsRestrict() { - return nil, vterrors.VT12002() + panic(vterrors.VT12002()) } // We need to select all the parent columns for the foreign key constraint, to use in the update of the child table. var offsets []int offsets, selectExprs = addColumns(ctx, fk.ParentColumns, selectExprs) - fkChild, err := createFkChildForDelete(ctx, fk, offsets) - if err != nil { - return nil, err - } - fkChildren = append(fkChildren, fkChild) - } - selectionOp, err := createSelectionOp(ctx, selectExprs, delStmt.TableExprs, delStmt.Where, nil, nil, sqlparser.ForUpdateLockNoWait) - if err != nil { - return nil, err + fkChildren = append(fkChildren, + createFkChildForDelete(ctx, fk, offsets)) } + selectionOp := createSelectionOp(ctx, selectExprs, delStmt.TableExprs, delStmt.Where, nil, nil, sqlparser.ForUpdateLockNoWait) return &FkCascade{ Selection: selectionOp, Children: fkChildren, Parent: parentOp, - }, nil + } } -func createFkChildForDelete(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, cols []int) (*FkChild, error) { +func createFkChildForDelete(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, cols []int) *FkChild { bvName := ctx.ReservedVars.ReserveVariable(foreignKeyConstraintValues) parsedComments := getParsedCommentsForFkChecks(ctx) var childStmt sqlparser.Statement @@ -240,18 +216,15 @@ func createFkChildForDelete(ctx *plancontext.PlanningContext, fk vindexes.ChildF Where: &sqlparser.Where{Type: sqlparser.WhereClause, Expr: compExpr}, } case sqlparser.SetDefault: - return nil, vterrors.VT09016() + panic(vterrors.VT09016()) } // For the child statement of a DELETE query, we don't need to verify all the FKs on VTgate or ignore any foreign key explicitly. - childOp, err := createOpFromStmt(ctx, childStmt, false /* verifyAllFKs */, "" /* fkToIgnore */) - if err != nil { - return nil, err - } + childOp := createOpFromStmt(ctx, childStmt, false /* verifyAllFKs */, "" /* fkToIgnore */) return &FkChild{ BVName: bvName, Cols: cols, Op: childOp, - }, nil + } } diff --git a/go/vt/vtgate/planbuilder/operators/distinct.go b/go/vt/vtgate/planbuilder/operators/distinct.go index d7aad08d206..74f4495374c 100644 --- a/go/vt/vtgate/planbuilder/operators/distinct.go +++ b/go/vt/vtgate/planbuilder/operators/distinct.go @@ -21,13 +21,12 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type ( Distinct struct { - Source ops.Operator + Source Operator QP *QueryProjection // When we go from AST to operator, we place DISTINCT ops in the required places in the op tree @@ -46,7 +45,7 @@ type ( } ) -func (d *Distinct) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (d *Distinct) planOffsets(ctx *plancontext.PlanningContext) Operator { columns := d.GetColumns(ctx) for idx, col := range columns { e, err := d.QP.GetSimplifiedExpr(ctx, col.Expr) @@ -71,7 +70,7 @@ func (d *Distinct) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { return nil } -func (d *Distinct) Clone(inputs []ops.Operator) ops.Operator { +func (d *Distinct) Clone(inputs []Operator) Operator { return &Distinct{ Required: d.Required, Source: inputs[0], @@ -82,15 +81,15 @@ func (d *Distinct) Clone(inputs []ops.Operator) ops.Operator { } } -func (d *Distinct) Inputs() []ops.Operator { - return []ops.Operator{d.Source} +func (d *Distinct) Inputs() []Operator { + return []Operator{d.Source} } -func (d *Distinct) SetInputs(operators []ops.Operator) { +func (d *Distinct) SetInputs(operators []Operator) { d.Source = operators[0] } -func (d *Distinct) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (d *Distinct) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { d.Source = d.Source.AddPredicate(ctx, expr) return d } @@ -118,7 +117,7 @@ func (d *Distinct) ShortDescription() string { return "Performance" } -func (d *Distinct) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (d *Distinct) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return d.Source.GetOrdering(ctx) } diff --git a/go/vt/vtgate/planbuilder/operators/dml_planning.go b/go/vt/vtgate/planbuilder/operators/dml_planning.go index 8f87a71c95f..3140142858c 100644 --- a/go/vt/vtgate/planbuilder/operators/dml_planning.go +++ b/go/vt/vtgate/planbuilder/operators/dml_planning.go @@ -19,12 +19,11 @@ package operators import ( "fmt" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -33,12 +32,11 @@ import ( // If it cannot find a unique vindex match, it returns an error. func getVindexInformation(id semantics.TableSet, table *vindexes.Table) ( *vindexes.ColumnVindex, - []*VindexPlusPredicates, - error) { + []*VindexPlusPredicates) { // Check that we have a primary vindex which is valid if len(table.ColumnVindexes) == 0 || !table.ColumnVindexes[0].IsUnique() { - return nil, nil, vterrors.VT09001(table.Name) + panic(vterrors.VT09001(table.Name)) } primaryVindex := table.ColumnVindexes[0] @@ -55,10 +53,16 @@ func getVindexInformation(id semantics.TableSet, table *vindexes.Table) ( TableID: id, }) } - return primaryVindex, vindexesAndPredicates, nil + return primaryVindex, vindexesAndPredicates } -func buildChangedVindexesValues(ctx *plancontext.PlanningContext, update *sqlparser.Update, table *vindexes.Table, ksidCols []sqlparser.IdentifierCI, assignments []SetExpr) (vv map[string]*engine.VindexValues, ownedVindexQuery string, subQueriesArgOnChangedVindex []string, err error) { +func buildChangedVindexesValues( + ctx *plancontext.PlanningContext, + update *sqlparser.Update, + table *vindexes.Table, + ksidCols []sqlparser.IdentifierCI, + assignments []SetExpr, +) (vv map[string]*engine.VindexValues, ownedVindexQuery string, subQueriesArgOnChangedVindex []string) { changedVindexes := make(map[string]*engine.VindexValues) buf, offset := initialQuery(ksidCols, table) for i, vindex := range table.ColumnVindexes { @@ -72,7 +76,7 @@ func buildChangedVindexesValues(ctx *plancontext.PlanningContext, update *sqlpar continue } if found { - return nil, "", nil, vterrors.VT03015(assignment.Name.Name) + panic(vterrors.VT03015(assignment.Name.Name)) } found = true pv, err := evalengine.Translate(assignment.Expr.EvalExpr, &evalengine.Config{ @@ -80,7 +84,7 @@ func buildChangedVindexesValues(ctx *plancontext.PlanningContext, update *sqlpar Collation: ctx.SemTable.Collation, }) if err != nil { - return nil, "", nil, invalidUpdateExpr(assignment.Name.Name.String(), assignment.Expr.EvalExpr) + panic(invalidUpdateExpr(assignment.Name.Name.String(), assignment.Expr.EvalExpr)) } if assignment.Expr.Info != nil { @@ -107,13 +111,13 @@ func buildChangedVindexesValues(ctx *plancontext.PlanningContext, update *sqlpar } if update.Limit != nil && len(update.OrderBy) == 0 { - return nil, "", nil, vterrors.VT12001(fmt.Sprintf("you need to provide the ORDER BY clause when using LIMIT; invalid update on vindex: %v", vindex.Name)) + panic(vterrors.VT12001(fmt.Sprintf("you need to provide the ORDER BY clause when using LIMIT; invalid update on vindex: %v", vindex.Name))) } if i == 0 { - return nil, "", nil, vterrors.VT12001(fmt.Sprintf("you cannot UPDATE primary vindex columns; invalid update on vindex: %v", vindex.Name)) + panic(vterrors.VT12001(fmt.Sprintf("you cannot UPDATE primary vindex columns; invalid update on vindex: %v", vindex.Name))) } if _, ok := vindex.Vindex.(vindexes.Lookup); !ok { - return nil, "", nil, vterrors.VT12001(fmt.Sprintf("you can only UPDATE lookup vindexes; invalid update on vindex: %v", vindex.Name)) + panic(vterrors.VT12001(fmt.Sprintf("you can only UPDATE lookup vindexes; invalid update on vindex: %v", vindex.Name))) } changedVindexes[vindex.Name] = &engine.VindexValues{ EvalExprMap: vindexValueMap, @@ -122,16 +126,16 @@ func buildChangedVindexesValues(ctx *plancontext.PlanningContext, update *sqlpar offset++ } if len(changedVindexes) == 0 { - return nil, "", nil, nil + return nil, "", nil } // generate rest of the owned vindex query. aTblExpr, ok := update.TableExprs[0].(*sqlparser.AliasedTableExpr) if !ok { - return nil, "", nil, vterrors.VT12001("UPDATE on complex table expression") + panic(vterrors.VT12001("UPDATE on complex table expression")) } tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: table.Name}, As: aTblExpr.As} buf.Myprintf(" from %v%v%v%v for update", tblExpr, update.Where, update.OrderBy, update.Limit) - return changedVindexes, buf.String(), subQueriesArgOnChangedVindex, nil + return changedVindexes, buf.String(), subQueriesArgOnChangedVindex } func initialQuery(ksidCols []sqlparser.IdentifierCI, table *vindexes.Table) (*sqlparser.TrackedBuffer, int) { diff --git a/go/vt/vtgate/planbuilder/operators/expressions.go b/go/vt/vtgate/planbuilder/operators/expressions.go index 0df875a6fbd..65600155631 100644 --- a/go/vt/vtgate/planbuilder/operators/expressions.go +++ b/go/vt/vtgate/planbuilder/operators/expressions.go @@ -28,7 +28,7 @@ func breakExpressionInLHSandRHSForApplyJoin( ctx *plancontext.PlanningContext, expr sqlparser.Expr, lhs semantics.TableSet, -) (col JoinColumn, err error) { +) (col JoinColumn) { rewrittenExpr := sqlparser.CopyOnRewrite(expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) { nodeExpr, ok := cursor.Node().(sqlparser.Expr) if !ok || !fetchByOffset(nodeExpr) { @@ -51,9 +51,6 @@ func breakExpressionInLHSandRHSForApplyJoin( cursor.Replace(arg) }, nil).(sqlparser.Expr) - if err != nil { - return JoinColumn{}, err - } ctx.JoinPredicates[expr] = append(ctx.JoinPredicates[expr], rewrittenExpr) col.RHSExpr = rewrittenExpr return diff --git a/go/vt/vtgate/planbuilder/operators/filter.go b/go/vt/vtgate/planbuilder/operators/filter.go index cee57c74943..f2171c43a1b 100644 --- a/go/vt/vtgate/planbuilder/operators/filter.go +++ b/go/vt/vtgate/planbuilder/operators/filter.go @@ -24,14 +24,12 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) type Filter struct { - Source ops.Operator + Source Operator Predicates []sqlparser.Expr // PredicateWithOffsets is the evalengine expression that will finally be used. @@ -41,14 +39,14 @@ type Filter struct { Truncate int } -func newFilter(op ops.Operator, expr sqlparser.Expr) ops.Operator { +func newFilter(op Operator, expr sqlparser.Expr) Operator { return &Filter{ Source: op, Predicates: []sqlparser.Expr{expr}, } } // Clone implements the Operator interface -func (f *Filter) Clone(inputs []ops.Operator) ops.Operator { +func (f *Filter) Clone(inputs []Operator) Operator { return &Filter{ Source: inputs[0], Predicates: slices.Clone(f.Predicates), @@ -58,12 +56,12 @@ func (f *Filter) Clone(inputs []ops.Operator) ops.Operator { } // Inputs implements the Operator interface -func (f *Filter) Inputs() []ops.Operator { - return []ops.Operator{f.Source} +func (f *Filter) Inputs() []Operator { + return []Operator{f.Source} } // SetInputs implements the Operator interface -func (f *Filter) SetInputs(ops []ops.Operator) { +func (f *Filter) SetInputs(ops []Operator) { f.Source = ops[0] } @@ -80,7 +78,7 @@ func (f *Filter) UnsolvedPredicates(st *semantics.SemTable) []sqlparser.Expr { return result } -func (f *Filter) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (f *Filter) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { f.Source = f.Source.AddPredicate(ctx, expr) return f } @@ -101,25 +99,25 @@ func (f *Filter) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Sele return f.Source.GetSelectExprs(ctx) } -func (f *Filter) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (f *Filter) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return f.Source.GetOrdering(ctx) } -func (f *Filter) Compact(*plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) { +func (f *Filter) Compact(*plancontext.PlanningContext) (Operator, *ApplyResult) { if len(f.Predicates) == 0 { - return f.Source, rewrite.NewTree("filter with no predicates removed"), nil + return f.Source, Rewrote("filter with no predicates removed") } other, isFilter := f.Source.(*Filter) if !isFilter { - return f, rewrite.SameTree, nil + return f, NoRewrite } f.Source = other.Source f.Predicates = append(f.Predicates, other.Predicates...) - return f, rewrite.NewTree("two filters merged into one"), nil + return f, Rewrote("two filters merged into one") } -func (f *Filter) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (f *Filter) planOffsets(ctx *plancontext.PlanningContext) Operator { cfg := &evalengine.Config{ ResolveType: ctx.SemTable.TypeForExpr, Collation: ctx.SemTable.Collation, diff --git a/go/vt/vtgate/planbuilder/operators/fk_cascade.go b/go/vt/vtgate/planbuilder/operators/fk_cascade.go index 73b902a4980..f24b59ca5ab 100644 --- a/go/vt/vtgate/planbuilder/operators/fk_cascade.go +++ b/go/vt/vtgate/planbuilder/operators/fk_cascade.go @@ -20,7 +20,6 @@ import ( "slices" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) @@ -29,7 +28,7 @@ type FkChild struct { BVName string Cols []int // indexes NonLiteralInfo []engine.NonLiteralUpdateInfo - Op ops.Operator + Op Operator noColumns noPredicates @@ -39,19 +38,19 @@ type FkChild struct { // as an operator. This operator is created for DML queries that require // cascades (for example, ON DELETE CASCADE). type FkCascade struct { - Selection ops.Operator + Selection Operator Children []*FkChild - Parent ops.Operator + Parent Operator noColumns noPredicates } -var _ ops.Operator = (*FkCascade)(nil) +var _ Operator = (*FkCascade)(nil) // Inputs implements the Operator interface -func (fkc *FkCascade) Inputs() []ops.Operator { - var inputs []ops.Operator +func (fkc *FkCascade) Inputs() []Operator { + var inputs []Operator inputs = append(inputs, fkc.Parent) inputs = append(inputs, fkc.Selection) for _, child := range fkc.Children { @@ -61,7 +60,7 @@ func (fkc *FkCascade) Inputs() []ops.Operator { } // SetInputs implements the Operator interface -func (fkc *FkCascade) SetInputs(operators []ops.Operator) { +func (fkc *FkCascade) SetInputs(operators []Operator) { if len(operators) < 2 { panic("incorrect count of inputs for FkCascade") } @@ -76,7 +75,7 @@ func (fkc *FkCascade) SetInputs(operators []ops.Operator) { } // Clone implements the Operator interface -func (fkc *FkCascade) Clone(inputs []ops.Operator) ops.Operator { +func (fkc *FkCascade) Clone(inputs []Operator) Operator { if len(inputs) < 2 { panic("incorrect count of inputs for FkCascade") } @@ -100,7 +99,7 @@ func (fkc *FkCascade) Clone(inputs []ops.Operator) ops.Operator { } // GetOrdering implements the Operator interface -func (fkc *FkCascade) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (fkc *FkCascade) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } diff --git a/go/vt/vtgate/planbuilder/operators/fk_verify.go b/go/vt/vtgate/planbuilder/operators/fk_verify.go index 39e1092c8d9..8275a8d462f 100644 --- a/go/vt/vtgate/planbuilder/operators/fk_verify.go +++ b/go/vt/vtgate/planbuilder/operators/fk_verify.go @@ -17,14 +17,13 @@ limitations under the License. package operators import ( - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // VerifyOp keeps the information about the foreign key verification operation. // It is a Parent verification or a Child verification. type VerifyOp struct { - Op ops.Operator + Op Operator Typ string } @@ -33,17 +32,17 @@ type VerifyOp struct { // verifications on the existence of the rows in the parent table (for example, INSERT and UPDATE). type FkVerify struct { Verify []*VerifyOp - Input ops.Operator + Input Operator noColumns noPredicates } -var _ ops.Operator = (*FkVerify)(nil) +var _ Operator = (*FkVerify)(nil) // Inputs implements the Operator interface -func (fkv *FkVerify) Inputs() []ops.Operator { - inputs := []ops.Operator{fkv.Input} +func (fkv *FkVerify) Inputs() []Operator { + inputs := []Operator{fkv.Input} for _, v := range fkv.Verify { inputs = append(inputs, v.Op) } @@ -51,7 +50,7 @@ func (fkv *FkVerify) Inputs() []ops.Operator { } // SetInputs implements the Operator interface -func (fkv *FkVerify) SetInputs(operators []ops.Operator) { +func (fkv *FkVerify) SetInputs(operators []Operator) { fkv.Input = operators[0] if len(fkv.Verify) != len(operators)-1 { panic("mismatched number of verify inputs") @@ -62,7 +61,7 @@ func (fkv *FkVerify) SetInputs(operators []ops.Operator) { } // Clone implements the Operator interface -func (fkv *FkVerify) Clone(inputs []ops.Operator) ops.Operator { +func (fkv *FkVerify) Clone(inputs []Operator) Operator { newFkv := &FkVerify{ Verify: fkv.Verify, } @@ -71,7 +70,7 @@ func (fkv *FkVerify) Clone(inputs []ops.Operator) ops.Operator { } // GetOrdering implements the Operator interface -func (fkv *FkVerify) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (fkv *FkVerify) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } diff --git a/go/vt/vtgate/planbuilder/operators/hash_join.go b/go/vt/vtgate/planbuilder/operators/hash_join.go index e9cfeb7d107..ce23e510c09 100644 --- a/go/vt/vtgate/planbuilder/operators/hash_join.go +++ b/go/vt/vtgate/planbuilder/operators/hash_join.go @@ -25,14 +25,13 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) type ( HashJoin struct { - LHS, RHS ops.Operator + LHS, RHS Operator // LeftJoin will be true in the case of an outer join LeftJoin bool @@ -62,10 +61,10 @@ type ( } ) -var _ ops.Operator = (*HashJoin)(nil) +var _ Operator = (*HashJoin)(nil) var _ JoinOp = (*HashJoin)(nil) -func NewHashJoin(lhs, rhs ops.Operator, outerJoin bool) *HashJoin { +func NewHashJoin(lhs, rhs Operator, outerJoin bool) *HashJoin { hj := &HashJoin{ LHS: lhs, RHS: rhs, @@ -74,7 +73,7 @@ func NewHashJoin(lhs, rhs ops.Operator, outerJoin bool) *HashJoin { return hj } -func (hj *HashJoin) Clone(inputs []ops.Operator) ops.Operator { +func (hj *HashJoin) Clone(inputs []Operator) Operator { kopy := *hj kopy.LHS, kopy.RHS = inputs[0], inputs[1] kopy.columns = slices.Clone(hj.columns) @@ -83,15 +82,15 @@ func (hj *HashJoin) Clone(inputs []ops.Operator) ops.Operator { return &kopy } -func (hj *HashJoin) Inputs() []ops.Operator { - return []ops.Operator{hj.LHS, hj.RHS} +func (hj *HashJoin) Inputs() []Operator { + return []Operator{hj.LHS, hj.RHS} } -func (hj *HashJoin) SetInputs(operators []ops.Operator) { +func (hj *HashJoin) SetInputs(operators []Operator) { hj.LHS, hj.RHS = operators[0], operators[1] } -func (hj *HashJoin) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (hj *HashJoin) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { return AddPredicate(ctx, hj, expr, false, newFilter) } @@ -107,7 +106,7 @@ func (hj *HashJoin) AddColumn(ctx *plancontext.PlanningContext, reuseExisting bo return len(hj.columns) - 1 } -func (hj *HashJoin) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (hj *HashJoin) planOffsets(ctx *plancontext.PlanningContext) Operator { if hj.offset { return nil } @@ -124,15 +123,11 @@ func (hj *HashJoin) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { }) proj := newAliasedProjection(hj) - _, err := proj.addProjExpr(eexprs...) - if err != nil { - panic(err) - } - + proj.addProjExpr(eexprs...) return proj } -func (hj *HashJoin) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int { +func (hj *HashJoin) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, _ bool) int { for offset, col := range hj.columns { if ctx.SemTable.EqualsExprWithDeps(expr, col) { return offset @@ -162,23 +157,23 @@ func (hj *HashJoin) ShortDescription() string { return cmp } -func (hj *HashJoin) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (hj *HashJoin) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return nil // hash joins will never promise an output order } -func (hj *HashJoin) GetLHS() ops.Operator { +func (hj *HashJoin) GetLHS() Operator { return hj.LHS } -func (hj *HashJoin) GetRHS() ops.Operator { +func (hj *HashJoin) GetRHS() Operator { return hj.RHS } -func (hj *HashJoin) SetLHS(op ops.Operator) { +func (hj *HashJoin) SetLHS(op Operator) { hj.LHS = op } -func (hj *HashJoin) SetRHS(op ops.Operator) { +func (hj *HashJoin) SetRHS(op Operator) { hj.RHS = op } @@ -239,7 +234,7 @@ func (hj *HashJoin) addColumn(ctx *plancontext.PlanningContext, in sqlparser.Exp return true } deps := ctx.SemTable.RecursiveDeps(expr) - check := func(id semantics.TableSet, op ops.Operator, offsetter func(int) int) int { + check := func(id semantics.TableSet, op Operator, offsetter func(int) int) int { if !deps.IsSolvedBy(id) { return -1 } diff --git a/go/vt/vtgate/planbuilder/operators/helpers.go b/go/vt/vtgate/planbuilder/operators/helpers.go index 21be634d7d8..e5801f6b36f 100644 --- a/go/vt/vtgate/planbuilder/operators/helpers.go +++ b/go/vt/vtgate/planbuilder/operators/helpers.go @@ -21,36 +21,34 @@ import ( "sort" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) // compact will optimise the operator tree into a smaller but equivalent version -func compact(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { +func compact(ctx *plancontext.PlanningContext, op Operator) Operator { type compactable interface { // Compact implement this interface for operators that have easy to see optimisations - Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) + Compact(ctx *plancontext.PlanningContext) (Operator, *ApplyResult) } - newOp, err := rewrite.BottomUp(op, TableID, func(op ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { + newOp := BottomUp(op, TableID, func(op Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { newOp, ok := op.(compactable) if !ok { - return op, rewrite.SameTree, nil + return op, NoRewrite } return newOp.Compact(ctx) }, stopAtRoute) - return newOp, err + return newOp } -func checkValid(op ops.Operator) error { +func checkValid(op Operator) error { type checkable interface { CheckValid() error } - return rewrite.Visit(op, func(this ops.Operator) error { + return Visit(op, func(this Operator) error { if chk, ok := this.(checkable); ok { return chk.CheckValid() } @@ -58,9 +56,9 @@ func checkValid(op ops.Operator) error { }) } -func Clone(op ops.Operator) ops.Operator { +func Clone(op Operator) Operator { inputs := op.Inputs() - clones := make([]ops.Operator, len(inputs)) + clones := make([]Operator, len(inputs)) for i, input := range inputs { clones[i] = Clone(input) } @@ -72,8 +70,8 @@ type tableIDIntroducer interface { introducesTableID() semantics.TableSet } -func TableID(op ops.Operator) (result semantics.TableSet) { - _ = rewrite.Visit(op, func(this ops.Operator) error { +func TableID(op Operator) (result semantics.TableSet) { + _ = Visit(op, func(this Operator) error { if tbl, ok := this.(tableIDIntroducer); ok { result = result.Merge(tbl.introducesTableID()) } @@ -87,9 +85,9 @@ type TableUser interface { TablesUsed() []string } -func TablesUsed(op ops.Operator) []string { +func TablesUsed(op Operator) []string { addString, collect := collectSortedUniqueStrings() - _ = rewrite.Visit(op, func(this ops.Operator) error { + _ = Visit(op, func(this Operator) error { if tbl, ok := this.(TableUser); ok { for _, u := range tbl.TablesUsed() { addString(u) @@ -100,29 +98,7 @@ func TablesUsed(op ops.Operator) []string { return collect() } -func UnresolvedPredicates(op ops.Operator, st *semantics.SemTable) (result []sqlparser.Expr) { - type unresolved interface { - // UnsolvedPredicates returns any predicates that have dependencies on the given Operator and - // on the outside of it (a parent Select expression, any other table not used by Operator, etc.). - // This is used for sub-queries. An example query could be: - // SELECT * FROM tbl WHERE EXISTS (SELECT 1 FROM otherTbl WHERE tbl.col = otherTbl.col) - // The subquery would have one unsolved predicate: `tbl.col = otherTbl.col` - // It's a predicate that belongs to the inner query, but it needs data from the outer query - // These predicates dictate which data we have to send from the outer side to the inner - UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr - } - - _ = rewrite.Visit(op, func(this ops.Operator) error { - if tbl, ok := this.(unresolved); ok { - result = append(result, tbl.UnsolvedPredicates(st)...) - } - - return nil - }) - return -} - -func CostOf(op ops.Operator) (cost int) { +func CostOf(op Operator) (cost int) { type costly interface { // Cost returns the cost for this operator. All the costly operators in the tree are summed together to get the // total cost of the operator tree. @@ -131,7 +107,7 @@ func CostOf(op ops.Operator) (cost int) { Cost() int } - _ = rewrite.Visit(op, func(op ops.Operator) error { + _ = Visit(op, func(op Operator) error { if costlyOp, ok := op.(costly); ok { cost += costlyOp.Cost() } diff --git a/go/vt/vtgate/planbuilder/operators/horizon.go b/go/vt/vtgate/planbuilder/operators/horizon.go index c58db4f3964..1a6fc6331ea 100644 --- a/go/vt/vtgate/planbuilder/operators/horizon.go +++ b/go/vt/vtgate/planbuilder/operators/horizon.go @@ -22,7 +22,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -35,7 +34,7 @@ import ( // Project/Aggregate/Sort/Limit operations, some which can be pushed down, // and some that have to be evaluated at the vtgate level. type Horizon struct { - Source ops.Operator + Source Operator // If this is a derived table, the two following fields will contain the tableID and name of it TableId *semantics.TableSet @@ -52,12 +51,12 @@ type Horizon struct { ColumnsOffset []int } -func newHorizon(src ops.Operator, query sqlparser.SelectStatement) *Horizon { +func newHorizon(src Operator, query sqlparser.SelectStatement) *Horizon { return &Horizon{Source: src, Query: query} } // Clone implements the Operator interface -func (h *Horizon) Clone(inputs []ops.Operator) ops.Operator { +func (h *Horizon) Clone(inputs []Operator) Operator { klone := *h klone.Source = inputs[0] klone.ColumnAliases = sqlparser.CloneColumns(h.ColumnAliases) @@ -77,16 +76,16 @@ func (h *Horizon) IsMergeable(ctx *plancontext.PlanningContext) bool { } // Inputs implements the Operator interface -func (h *Horizon) Inputs() []ops.Operator { - return []ops.Operator{h.Source} +func (h *Horizon) Inputs() []Operator { + return []Operator{h.Source} } // SetInputs implements the Operator interface -func (h *Horizon) SetInputs(ops []ops.Operator) { +func (h *Horizon) SetInputs(ops []Operator) { h.Source = ops[0] } -func (h *Horizon) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (h *Horizon) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { if _, isUNion := h.Source.(*Union); isUNion { // If we have a derived table on top of a UNION, we can let the UNION do the expression rewriting h.Source = h.Source.AddPredicate(ctx, expr) @@ -181,12 +180,9 @@ func (h *Horizon) GetSelectExprs(*plancontext.PlanningContext) sqlparser.SelectE return sqlparser.GetFirstSelect(h.Query).SelectExprs } -func (h *Horizon) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (h *Horizon) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { if h.QP == nil { - _, err := h.getQP(ctx) - if err != nil { - panic(err) - } + h.getQP(ctx) } return h.QP.OrderExprs } @@ -196,20 +192,20 @@ func (h *Horizon) selectStatement() sqlparser.SelectStatement { return h.Query } -func (h *Horizon) src() ops.Operator { +func (h *Horizon) src() Operator { return h.Source } -func (h *Horizon) getQP(ctx *plancontext.PlanningContext) (*QueryProjection, error) { +func (h *Horizon) getQP(ctx *plancontext.PlanningContext) *QueryProjection { if h.QP != nil { - return h.QP, nil + return h.QP } qp, err := CreateQPFromSelectStatement(ctx, h.Query) if err != nil { - return nil, err + panic(err) } h.QP = qp - return h.QP, nil + return h.QP } func (h *Horizon) ShortDescription() string { diff --git a/go/vt/vtgate/planbuilder/operators/horizon_expanding.go b/go/vt/vtgate/planbuilder/operators/horizon_expanding.go index 06bcf2aaeb5..e3ddc5d9232 100644 --- a/go/vt/vtgate/planbuilder/operators/horizon_expanding.go +++ b/go/vt/vtgate/planbuilder/operators/horizon_expanding.go @@ -23,12 +23,10 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) -func expandHorizon(ctx *plancontext.PlanningContext, horizon *Horizon) (ops.Operator, *rewrite.ApplyResult, error) { +func expandHorizon(ctx *plancontext.PlanningContext, horizon *Horizon) (Operator, *ApplyResult) { statement := horizon.selectStatement() switch sel := statement.(type) { case *sqlparser.Select: @@ -36,16 +34,13 @@ func expandHorizon(ctx *plancontext.PlanningContext, horizon *Horizon) (ops.Oper case *sqlparser.Union: return expandUnionHorizon(ctx, horizon, sel) } - return nil, nil, vterrors.VT13001(fmt.Sprintf("unexpected statement type %T", statement)) + panic(vterrors.VT13001(fmt.Sprintf("unexpected statement type %T", statement))) } -func expandUnionHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, union *sqlparser.Union) (ops.Operator, *rewrite.ApplyResult, error) { +func expandUnionHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, union *sqlparser.Union) (Operator, *ApplyResult) { op := horizon.Source - qp, err := horizon.getQP(ctx) - if err != nil { - return nil, nil, err - } + qp := horizon.getQP(ctx) if len(qp.OrderExprs) > 0 { op = &Ordering{ @@ -72,20 +67,15 @@ func expandUnionHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, unio } if op == horizon.Source { - return op, rewrite.NewTree("removed UNION horizon not used"), nil + return op, Rewrote("removed UNION horizon not used") } - return op, rewrite.NewTree("expand UNION horizon into smaller components"), nil + return op, Rewrote("expand UNION horizon into smaller components") } -func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel *sqlparser.Select) (ops.Operator, *rewrite.ApplyResult, error) { +func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel *sqlparser.Select) (Operator, *ApplyResult) { op := createProjectionFromSelect(ctx, horizon) - - qp, err := horizon.getQP(ctx) - if err != nil { - return nil, nil, err - } - + qp := horizon.getQP(ctx) var extracted []string if qp.HasAggr { extracted = append(extracted, "Aggregation") @@ -103,10 +93,7 @@ func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel } if sel.Having != nil { - op, err = addWherePredicates(ctx, sel.Having.Expr, op) - if err != nil { - return nil, nil, err - } + op = addWherePredicates(ctx, sel.Having.Expr, op) extracted = append(extracted, "Filter") } @@ -126,14 +113,11 @@ func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel extracted = append(extracted, "Limit") } - return op, rewrite.NewTree(fmt.Sprintf("expand SELECT horizon into (%s)", strings.Join(extracted, ", "))), nil + return op, Rewrote(fmt.Sprintf("expand SELECT horizon into (%s)", strings.Join(extracted, ", "))) } -func createProjectionFromSelect(ctx *plancontext.PlanningContext, horizon *Horizon) (out ops.Operator) { - qp, err := horizon.getQP(ctx) - if err != nil { - panic(err) - } +func createProjectionFromSelect(ctx *plancontext.PlanningContext, horizon *Horizon) (out Operator) { + qp := horizon.getQP(ctx) var dt *DerivedTable if horizon.TableId != nil { @@ -172,7 +156,7 @@ func createProjectionFromSelect(ctx *plancontext.PlanningContext, horizon *Horiz return createProjectionForSimpleAggregation(ctx, a, qp) } -func createProjectionForSimpleAggregation(ctx *plancontext.PlanningContext, a *Aggregator, qp *QueryProjection) ops.Operator { +func createProjectionForSimpleAggregation(ctx *plancontext.PlanningContext, a *Aggregator, qp *QueryProjection) Operator { outer: for colIdx, expr := range qp.SelectExprs { ae, err := expr.GetAliasedExpr() @@ -206,7 +190,7 @@ outer: return a } -func createProjectionForComplexAggregation(a *Aggregator, qp *QueryProjection) ops.Operator { +func createProjectionForComplexAggregation(a *Aggregator, qp *QueryProjection) Operator { p := newAliasedProjection(a) p.DT = a.DT for _, expr := range qp.SelectExprs { @@ -215,10 +199,7 @@ func createProjectionForComplexAggregation(a *Aggregator, qp *QueryProjection) o panic(err) } - _, err = p.addProjExpr(newProjExpr(ae)) - if err != nil { - panic(err) - } + p.addProjExpr(newProjExpr(ae)) } for i, by := range a.Grouping { a.Grouping[i].ColOffset = len(a.Columns) @@ -231,7 +212,7 @@ func createProjectionForComplexAggregation(a *Aggregator, qp *QueryProjection) o return p } -func createProjectionWithoutAggr(ctx *plancontext.PlanningContext, qp *QueryProjection, src ops.Operator) *Projection { +func createProjectionWithoutAggr(ctx *plancontext.PlanningContext, qp *QueryProjection, src Operator) *Projection { // first we need to check if we have all columns or there are still unexpanded stars aes, err := slice.MapWithError(qp.SelectExprs, func(from SelectExpr) (*sqlparser.AliasedExpr, error) { ae, ok := from.Col.(*sqlparser.AliasedExpr) @@ -252,28 +233,19 @@ func createProjectionWithoutAggr(ctx *plancontext.PlanningContext, qp *QueryProj for _, ae := range aes { org := sqlparser.CloneRefOfAliasedExpr(ae) expr := ae.Expr - newExpr, subqs, err := sqc.pullOutValueSubqueries(ctx, expr, outerID, false) - if err != nil { - panic(err) - } + newExpr, subqs := sqc.pullOutValueSubqueries(ctx, expr, outerID, false) if newExpr == nil { // there was no subquery in this expression - _, err := proj.addUnexploredExpr(org, expr) - if err != nil { - panic(err) - } + proj.addUnexploredExpr(org, expr) } else { - err := proj.addSubqueryExpr(org, newExpr, subqs...) - if err != nil { - panic(err) - } + proj.addSubqueryExpr(org, newExpr, subqs...) } } proj.Source = sqc.getRootOperator(src, nil) return proj } -func newStarProjection(src ops.Operator, qp *QueryProjection) *Projection { +func newStarProjection(src Operator, qp *QueryProjection) *Projection { cols := sqlparser.SelectExprs{} for _, expr := range qp.SelectExprs { diff --git a/go/vt/vtgate/planbuilder/operators/info_schema_planning.go b/go/vt/vtgate/planbuilder/operators/info_schema_planning.go index 4f096e1ac65..f7de09c4857 100644 --- a/go/vt/vtgate/planbuilder/operators/info_schema_planning.go +++ b/go/vt/vtgate/planbuilder/operators/info_schema_planning.go @@ -41,7 +41,7 @@ type InfoSchemaRouting struct { Table *QueryTable } -func (isr *InfoSchemaRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (isr *InfoSchemaRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.SysTableTableSchema = nil for _, expr := range isr.SysTableTableSchema { eexpr, err := evalengine.Translate(expr, &evalengine.Config{ @@ -49,7 +49,7 @@ func (isr *InfoSchemaRouting) UpdateRoutingParams(_ *plancontext.PlanningContext ResolveColumn: NotImplementedSchemaInfoResolver, }) if err != nil { - return err + panic(err) } rp.SysTableTableSchema = append(rp.SysTableTableSchema, eexpr) } @@ -61,12 +61,11 @@ func (isr *InfoSchemaRouting) UpdateRoutingParams(_ *plancontext.PlanningContext ResolveColumn: NotImplementedSchemaInfoResolver, }) if err != nil { - return err + panic(err) } rp.SysTableTableName[k] = eexpr } - return nil } func (isr *InfoSchemaRouting) Clone() Routing { @@ -77,10 +76,10 @@ func (isr *InfoSchemaRouting) Clone() Routing { } } -func (isr *InfoSchemaRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (Routing, error) { +func (isr *InfoSchemaRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Routing { isTableSchema, bvName, out := extractInfoSchemaRoutingPredicate(ctx, expr) if out == nil { - return isr, nil + return isr } if isr.SysTableTableName == nil { @@ -92,14 +91,14 @@ func (isr *InfoSchemaRouting) updateRoutingLogic(ctx *plancontext.PlanningContex if sqlparser.Equals.Expr(out, s) { // we already have this expression in the list // stating it again does not add value - return isr, nil + return isr } } isr.SysTableTableSchema = append(isr.SysTableTableSchema, out) } else { isr.SysTableTableName[bvName] = out } - return isr, nil + return isr } func (isr *InfoSchemaRouting) Cost() int { diff --git a/go/vt/vtgate/planbuilder/operators/insert.go b/go/vt/vtgate/planbuilder/operators/insert.go index fa2f60dcecc..f783ac7a5bc 100644 --- a/go/vt/vtgate/planbuilder/operators/insert.go +++ b/go/vt/vtgate/planbuilder/operators/insert.go @@ -24,7 +24,6 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -79,13 +78,13 @@ func (i *Insert) ShortDescription() string { return i.VTable.String() } -func (i *Insert) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (i *Insert) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } -var _ ops.Operator = (*Insert)(nil) +var _ Operator = (*Insert)(nil) -func (i *Insert) Clone([]ops.Operator) ops.Operator { +func (i *Insert) Clone([]Operator) Operator { return &Insert{ VTable: i.VTable, AST: i.AST, @@ -105,16 +104,10 @@ func (i *Insert) Statement() sqlparser.Statement { return i.AST } -func createOperatorFromInsert(ctx *plancontext.PlanningContext, ins *sqlparser.Insert) (ops.Operator, error) { - tableInfo, qt, err := createQueryTableForDML(ctx, ins.Table, nil) - if err != nil { - return nil, err - } +func createOperatorFromInsert(ctx *plancontext.PlanningContext, ins *sqlparser.Insert) Operator { + tableInfo, qt := createQueryTableForDML(ctx, ins.Table, nil) - vTbl, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "insert") - if err != nil { - return nil, err - } + vTbl, routing := buildVindexTableForDML(ctx, tableInfo, qt, "insert") deleteBeforeInsert := false if ins.Action == sqlparser.ReplaceAct && @@ -125,37 +118,27 @@ func createOperatorFromInsert(ctx *plancontext.PlanningContext, ins *sqlparser.I deleteBeforeInsert = true } - insOp, err := checkAndCreateInsertOperator(ctx, ins, vTbl, routing) - if err != nil { - return nil, err - } + insOp := checkAndCreateInsertOperator(ctx, ins, vTbl, routing) if !deleteBeforeInsert { - return insOp, nil + return insOp } rows, isRows := ins.Rows.(sqlparser.Values) if !isRows { - return nil, vterrors.VT12001("REPLACE INTO using select statement") + panic(vterrors.VT12001("REPLACE INTO using select statement")) } pkCompExpr := pkCompExpression(vTbl, ins, rows) - uniqKeyCompExprs, err := uniqKeyCompExpressions(vTbl, ins, rows) - if err != nil { - return nil, err - } - + uniqKeyCompExprs := uniqKeyCompExpressions(vTbl, ins, rows) whereExpr := getWhereCondExpr(append(uniqKeyCompExprs, pkCompExpr)) delStmt := &sqlparser.Delete{ TableExprs: sqlparser.TableExprs{sqlparser.CloneRefOfAliasedTableExpr(ins.Table)}, Where: sqlparser.NewWhere(sqlparser.WhereClause, whereExpr), } - delOp, err := createOpFromStmt(ctx, delStmt, false, "") - if err != nil { - return nil, err - } - return &Sequential{Sources: []ops.Operator{delOp, insOp}}, nil + delOp := createOpFromStmt(ctx, delStmt, false, "") + return &Sequential{Sources: []Operator{delOp, insOp}} } func getWhereCondExpr(compExprs []*sqlparser.ComparisonExpr) sqlparser.Expr { @@ -229,10 +212,10 @@ type uComp struct { def sqlparser.Expr } -func uniqKeyCompExpressions(vTbl *vindexes.Table, ins *sqlparser.Insert, rows sqlparser.Values) (comps []*sqlparser.ComparisonExpr, err error) { +func uniqKeyCompExpressions(vTbl *vindexes.Table, ins *sqlparser.Insert, rows sqlparser.Values) (comps []*sqlparser.ComparisonExpr) { noOfUniqKeys := len(vTbl.UniqueKeys) if noOfUniqKeys == 0 { - return nil, nil + return nil } type uIdx struct { @@ -248,10 +231,7 @@ func uniqKeyCompExpressions(vTbl *vindexes.Table, ins *sqlparser.Insert, rows sq skipKey := false for _, expr := range uniqKey { var offsets []uComp - offsets, skipKey, err = createUniqueKeyComp(ins, expr, vTbl) - if err != nil { - return nil, err - } + offsets, skipKey = createUniqueKeyComp(ins, expr, vTbl) if skipKey { break } @@ -293,10 +273,10 @@ func uniqKeyCompExpressions(vTbl *vindexes.Table, ins *sqlparser.Insert, rows sq for i, valTuple := range allValTuples { compExprs = append(compExprs, sqlparser.NewComparisonExpr(sqlparser.InOp, allColTuples[i], valTuple, nil)) } - return compExprs, nil + return compExprs } -func createUniqueKeyComp(ins *sqlparser.Insert, expr sqlparser.Expr, vTbl *vindexes.Table) ([]uComp, bool, error) { +func createUniqueKeyComp(ins *sqlparser.Insert, expr sqlparser.Expr, vTbl *vindexes.Table) ([]uComp, bool) { col, isCol := expr.(*sqlparser.ColName) if isCol { var def sqlparser.Expr @@ -305,13 +285,13 @@ func createUniqueKeyComp(ins *sqlparser.Insert, expr sqlparser.Expr, vTbl *vinde def = findDefault(vTbl, col.Name) if def == nil { // default value is empty, nothing to compare as it will always be false. - return nil, true, nil + return nil, true } } - return []uComp{{idx, def}}, false, nil + return []uComp{{idx, def}}, false } var offsets []uComp - err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { col, ok := node.(*sqlparser.ColName) if !ok { return true, nil @@ -328,14 +308,11 @@ func createUniqueKeyComp(ins *sqlparser.Insert, expr sqlparser.Expr, vTbl *vinde offsets = append(offsets, uComp{idx, def}) return false, nil }, expr) - return offsets, false, err + return offsets, false } -func checkAndCreateInsertOperator(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, vTbl *vindexes.Table, routing Routing) (ops.Operator, error) { - insOp, err := createInsertOperator(ctx, ins, vTbl, routing) - if err != nil { - return nil, err - } +func checkAndCreateInsertOperator(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, vTbl *vindexes.Table, routing Routing) Operator { + insOp := createInsertOperator(ctx, ins, vTbl, routing) if ins.Comments != nil { insOp = &LockAndComment{ @@ -347,31 +324,31 @@ func checkAndCreateInsertOperator(ctx *plancontext.PlanningContext, ins *sqlpars // Find the foreign key mode and for unmanaged foreign-key-mode, we don't need to do anything. ksMode, err := ctx.VSchema.ForeignKeyMode(vTbl.Keyspace.Name) if err != nil { - return nil, err + return nil } if ksMode != vschemapb.Keyspace_managed { - return insOp, nil + return insOp } parentFKs := ctx.SemTable.GetParentForeignKeysList() childFks := ctx.SemTable.GetChildForeignKeysList() if len(parentFKs) > 0 { - return nil, vterrors.VT12002() + panic(vterrors.VT12002()) } if len(childFks) > 0 { if ins.Action == sqlparser.ReplaceAct { - return nil, vterrors.VT12001("REPLACE INTO with foreign keys") + panic(vterrors.VT12001("REPLACE INTO with foreign keys")) } if len(ins.OnDup) > 0 { - return nil, vterrors.VT12001("ON DUPLICATE KEY UPDATE with foreign keys") + panic(vterrors.VT12001("ON DUPLICATE KEY UPDATE with foreign keys")) } } - return insOp, nil + return insOp } -func createInsertOperator(ctx *plancontext.PlanningContext, insStmt *sqlparser.Insert, vTbl *vindexes.Table, routing Routing) (ops.Operator, error) { +func createInsertOperator(ctx *plancontext.PlanningContext, insStmt *sqlparser.Insert, vTbl *vindexes.Table, routing Routing) Operator { if _, target := routing.(*TargetedRouting); target { - return nil, vterrors.VT09017("INSERT with a target destination is not allowed") + panic(vterrors.VT09017("INSERT with a target destination is not allowed")) } insOp := &Insert{ @@ -390,15 +367,12 @@ func createInsertOperator(ctx *plancontext.PlanningContext, insStmt *sqlparser.I if vTbl.ColumnListAuthoritative { insStmt = populateInsertColumnlist(insStmt, vTbl) } else { - return nil, vterrors.VT09004() + panic(vterrors.VT09004()) } } // modify column list or values for autoincrement column. - autoIncGen, err := modifyForAutoinc(ctx, insStmt, vTbl) - if err != nil { - return nil, err - } + autoIncGen := modifyForAutoinc(ctx, insStmt, vTbl) insOp.AutoIncrement = autoIncGen // set insert ignore. @@ -407,24 +381,27 @@ func createInsertOperator(ctx *plancontext.PlanningContext, insStmt *sqlparser.I insOp.ColVindexes = getColVindexes(insOp) switch rows := insStmt.Rows.(type) { case sqlparser.Values: - route.Source, err = insertRowsPlan(ctx, insOp, insStmt, rows) - if err != nil { - return nil, err - } + route.Source = insertRowsPlan(ctx, insOp, insStmt, rows) case sqlparser.SelectStatement: return insertSelectPlan(ctx, insOp, route, insStmt, rows) } - return route, nil + return route } -func insertSelectPlan(ctx *plancontext.PlanningContext, insOp *Insert, routeOp *Route, ins *sqlparser.Insert, sel sqlparser.SelectStatement) (*InsertSelection, error) { +func insertSelectPlan( + ctx *plancontext.PlanningContext, + insOp *Insert, + routeOp *Route, + ins *sqlparser.Insert, + sel sqlparser.SelectStatement, +) *InsertSelection { if columnMismatch(insOp.AutoIncrement, ins, sel) { - return nil, vterrors.VT03006() + panic(vterrors.VT03006()) } selOp, err := PlanQuery(ctx, sel) if err != nil { - return nil, err + panic(err) } // output of the select plan will be used to insert rows into the table. @@ -449,28 +426,24 @@ func insertSelectPlan(ctx *plancontext.PlanningContext, insOp *Insert, routeOp * } if len(insOp.ColVindexes) == 0 { - return insertSelect, nil + return insertSelect } colVindexes := insOp.ColVindexes vv := make([][]int, len(colVindexes)) for idx, colVindex := range colVindexes { for _, col := range colVindex.Columns { - err := checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) - if err != nil { - return nil, err - } - + checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) colNum := findColumn(ins, col) // sharding column values should be provided in the insert. if colNum == -1 && idx == 0 { - return nil, vterrors.VT09003(col) + panic(vterrors.VT09003(col)) } vv[idx] = append(vv[idx], colNum) } } insOp.VindexValueOffset = vv - return insertSelect, nil + return insertSelect } func columnMismatch(gen *Generate, ins *sqlparser.Insert, sel sqlparser.SelectStatement) bool { @@ -498,15 +471,15 @@ func columnMismatch(gen *Generate, ins *sqlparser.Insert, sel sqlparser.SelectSt return false } -func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlparser.Insert, rows sqlparser.Values) (*Insert, error) { +func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlparser.Insert, rows sqlparser.Values) *Insert { for _, row := range rows { if len(ins.Columns) != len(row) { - return nil, vterrors.VT03006() + panic(vterrors.VT03006()) } } if len(insOp.ColVindexes) == 0 { - return insOp, nil + return insOp } colVindexes := insOp.ColVindexes @@ -514,10 +487,7 @@ func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlpar for vIdx, colVindex := range colVindexes { routeValues[vIdx] = make([][]evalengine.Expr, len(colVindex.Columns)) for colIdx, col := range colVindex.Columns { - err := checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) - if err != nil { - return nil, err - } + checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) routeValues[vIdx][colIdx] = make([]evalengine.Expr, len(rows)) colNum, _ := findOrAddColumn(ins, col) for rowNum, row := range rows { @@ -526,7 +496,7 @@ func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlpar Collation: ctx.SemTable.Collation, }) if err != nil { - return nil, err + panic(err) } routeValues[vIdx][colIdx][rowNum] = innerpv } @@ -543,7 +513,7 @@ func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlpar } } insOp.VindexValues = routeValues - return insOp, nil + return insOp } func valuesProvided(rows sqlparser.InsertRows) bool { @@ -571,18 +541,17 @@ func getColVindexes(insOp *Insert) (colVindexes []*vindexes.ColumnVindex) { return } -func checkAndErrIfVindexChanging(setClauses sqlparser.UpdateExprs, col sqlparser.IdentifierCI) error { +func checkAndErrIfVindexChanging(setClauses sqlparser.UpdateExprs, col sqlparser.IdentifierCI) { for _, assignment := range setClauses { if col.Equal(assignment.Name.Name) { valueExpr, isValuesFuncExpr := assignment.Expr.(*sqlparser.ValuesFuncExpr) // update on duplicate key is changing the vindex column, not supported. if !isValuesFuncExpr || !valueExpr.Name.Name.Equal(assignment.Name.Name) { - return vterrors.VT12001("DML cannot update vindex column") + panic(vterrors.VT12001("DML cannot update vindex column")) } - return nil + return } } - return nil } // findOrAddColumn finds the position of a column in the insert. If it's @@ -625,9 +594,9 @@ func populateInsertColumnlist(ins *sqlparser.Insert, table *vindexes.Table) *sql // modifyForAutoinc modifies the AST and the plan to generate necessary autoinc values. // For row values cases, bind variable names are generated using baseName. -func modifyForAutoinc(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, vTable *vindexes.Table) (*Generate, error) { +func modifyForAutoinc(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, vTable *vindexes.Table) *Generate { if vTable.AutoIncrement == nil { - return nil, nil + return nil } gen := &Generate{ Keyspace: vTable.AutoIncrement.Sequence.Keyspace, @@ -642,7 +611,7 @@ func modifyForAutoinc(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, v autoIncValues := make(sqlparser.ValTuple, 0, len(rows)) for rowNum, row := range rows { if len(ins.Columns) != len(row) { - return nil, vterrors.VT03006() + panic(vterrors.VT03006()) } // Support the DEFAULT keyword by treating it as null if _, ok := row[colNum].(*sqlparser.Default); ok { @@ -657,8 +626,8 @@ func modifyForAutoinc(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, v Collation: ctx.SemTable.Collation, }) if err != nil { - return nil, err + panic(err) } } - return gen, nil + return gen } diff --git a/go/vt/vtgate/planbuilder/operators/insert_selection.go b/go/vt/vtgate/planbuilder/operators/insert_selection.go index 5ae49ee2c55..70bda0a990a 100644 --- a/go/vt/vtgate/planbuilder/operators/insert_selection.go +++ b/go/vt/vtgate/planbuilder/operators/insert_selection.go @@ -17,15 +17,14 @@ limitations under the License. package operators import ( - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // InsertSelection operator represents an INSERT into SELECT FROM query. // It holds the operators for running the selection and insertion. type InsertSelection struct { - Select ops.Operator - Insert ops.Operator + Select Operator + Insert Operator // ForceNonStreaming when true, select first then insert, this is to avoid locking rows by select for insert. ForceNonStreaming bool @@ -34,7 +33,7 @@ type InsertSelection struct { noPredicates } -func (is *InsertSelection) Clone(inputs []ops.Operator) ops.Operator { +func (is *InsertSelection) Clone(inputs []Operator) Operator { return &InsertSelection{ Select: inputs[0], Insert: inputs[1], @@ -42,11 +41,11 @@ func (is *InsertSelection) Clone(inputs []ops.Operator) ops.Operator { } } -func (is *InsertSelection) Inputs() []ops.Operator { - return []ops.Operator{is.Select, is.Insert} +func (is *InsertSelection) Inputs() []Operator { + return []Operator{is.Select, is.Insert} } -func (is *InsertSelection) SetInputs(inputs []ops.Operator) { +func (is *InsertSelection) SetInputs(inputs []Operator) { is.Select = inputs[0] is.Insert = inputs[1] } @@ -58,8 +57,8 @@ func (is *InsertSelection) ShortDescription() string { return "" } -func (is *InsertSelection) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (is *InsertSelection) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } -var _ ops.Operator = (*InsertSelection)(nil) +var _ Operator = (*InsertSelection)(nil) diff --git a/go/vt/vtgate/planbuilder/operators/join.go b/go/vt/vtgate/planbuilder/operators/join.go index 1d50a688df4..35bf26f9793 100644 --- a/go/vt/vtgate/planbuilder/operators/join.go +++ b/go/vt/vtgate/planbuilder/operators/join.go @@ -19,24 +19,22 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // Join represents a join. If we have a predicate, this is an inner join. If no predicate exists, it is a cross join type Join struct { - LHS, RHS ops.Operator + LHS, RHS Operator Predicate sqlparser.Expr LeftJoin bool noColumns } -var _ ops.Operator = (*Join)(nil) +var _ Operator = (*Join)(nil) // Clone implements the Operator interface -func (j *Join) Clone(inputs []ops.Operator) ops.Operator { +func (j *Join) Clone(inputs []Operator) Operator { clone := *j clone.LHS = inputs[0] clone.RHS = inputs[1] @@ -48,30 +46,30 @@ func (j *Join) Clone(inputs []ops.Operator) ops.Operator { } } -func (j *Join) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (j *Join) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } // Inputs implements the Operator interface -func (j *Join) Inputs() []ops.Operator { - return []ops.Operator{j.LHS, j.RHS} +func (j *Join) Inputs() []Operator { + return []Operator{j.LHS, j.RHS} } // SetInputs implements the Operator interface -func (j *Join) SetInputs(ops []ops.Operator) { +func (j *Join) SetInputs(ops []Operator) { j.LHS, j.RHS = ops[0], ops[1] } -func (j *Join) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) { +func (j *Join) Compact(ctx *plancontext.PlanningContext) (Operator, *ApplyResult) { if j.LeftJoin { // we can't merge outer joins into a single QG - return j, rewrite.SameTree, nil + return j, NoRewrite } lqg, lok := j.LHS.(*QueryGraph) rqg, rok := j.RHS.(*QueryGraph) if !lok || !rok { - return j, rewrite.SameTree, nil + return j, NoRewrite } newOp := &QueryGraph{ @@ -82,23 +80,23 @@ func (j *Join) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite if j.Predicate != nil { newOp.collectPredicate(ctx, j.Predicate) } - return newOp, rewrite.NewTree("merge querygraphs into a single one"), nil + return newOp, Rewrote("merge querygraphs into a single one") } -func createOuterJoin(tableExpr *sqlparser.JoinTableExpr, lhs, rhs ops.Operator) (ops.Operator, error) { +func createOuterJoin(tableExpr *sqlparser.JoinTableExpr, lhs, rhs Operator) Operator { if tableExpr.Join == sqlparser.RightJoinType { lhs, rhs = rhs, lhs } subq, _ := getSubQuery(tableExpr.Condition.On) if subq != nil { - return nil, vterrors.VT12001("subquery in outer join predicate") + panic(vterrors.VT12001("subquery in outer join predicate")) } predicate := tableExpr.Condition.On sqlparser.RemoveKeyspaceFromColName(predicate) - return &Join{LHS: lhs, RHS: rhs, LeftJoin: true, Predicate: predicate}, nil + return &Join{LHS: lhs, RHS: rhs, LeftJoin: true, Predicate: predicate} } -func createJoin(ctx *plancontext.PlanningContext, LHS, RHS ops.Operator) ops.Operator { +func createJoin(ctx *plancontext.PlanningContext, LHS, RHS Operator) Operator { lqg, lok := LHS.(*QueryGraph) rqg, rok := RHS.(*QueryGraph) if lok && rok { @@ -112,7 +110,7 @@ func createJoin(ctx *plancontext.PlanningContext, LHS, RHS ops.Operator) ops.Ope return &Join{LHS: LHS, RHS: RHS} } -func createInnerJoin(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr, lhs, rhs ops.Operator) (ops.Operator, error) { +func createInnerJoin(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr, lhs, rhs Operator) Operator { op := createJoin(ctx, lhs, rhs) sqc := &SubQueryBuilder{} outerID := TableID(op) @@ -120,37 +118,34 @@ func createInnerJoin(ctx *plancontext.PlanningContext, tableExpr *sqlparser.Join sqlparser.RemoveKeyspaceFromColName(joinPredicate) exprs := sqlparser.SplitAndExpression(nil, joinPredicate) for _, pred := range exprs { - subq, err := sqc.handleSubquery(ctx, pred, outerID) - if err != nil { - return nil, err - } + subq := sqc.handleSubquery(ctx, pred, outerID) if subq != nil { continue } op = op.AddPredicate(ctx, pred) } - return sqc.getRootOperator(op, nil), nil + return sqc.getRootOperator(op, nil) } -func (j *Join) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (j *Join) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { return AddPredicate(ctx, j, expr, false, newFilter) } var _ JoinOp = (*Join)(nil) -func (j *Join) GetLHS() ops.Operator { +func (j *Join) GetLHS() Operator { return j.LHS } -func (j *Join) GetRHS() ops.Operator { +func (j *Join) GetRHS() Operator { return j.RHS } -func (j *Join) SetLHS(operator ops.Operator) { +func (j *Join) SetLHS(operator Operator) { j.LHS = operator } -func (j *Join) SetRHS(operator ops.Operator) { +func (j *Join) SetRHS(operator Operator) { j.RHS = operator } diff --git a/go/vt/vtgate/planbuilder/operators/join_merging.go b/go/vt/vtgate/planbuilder/operators/join_merging.go index 52c9c4e5837..dfd89013e94 100644 --- a/go/vt/vtgate/planbuilder/operators/join_merging.go +++ b/go/vt/vtgate/planbuilder/operators/join_merging.go @@ -21,14 +21,13 @@ import ( "reflect" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // mergeJoinInputs checks whether two operators can be merged into a single one. // If they can be merged, a new operator with the merged routing is returned // If they cannot be merged, nil is returned. -func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr, m merger) *Route { +func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPredicates []sqlparser.Expr, m merger) *Route { lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(lhs, rhs) if lhsRoute == nil { return nil @@ -66,7 +65,7 @@ func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, jo } } -func prepareInputRoutes(lhs ops.Operator, rhs ops.Operator) (*Route, *Route, Routing, Routing, routingType, routingType, bool) { +func prepareInputRoutes(lhs Operator, rhs Operator) (*Route, *Route, Routing, Routing, routingType, routingType, bool) { lhsRoute, rhsRoute := operatorsToRoutes(lhs, rhs) if lhsRoute == nil || rhsRoute == nil { return nil, nil, nil, nil, 0, 0, false diff --git a/go/vt/vtgate/planbuilder/operators/joins.go b/go/vt/vtgate/planbuilder/operators/joins.go index ad61a6c5a00..266b9b8288f 100644 --- a/go/vt/vtgate/planbuilder/operators/joins.go +++ b/go/vt/vtgate/planbuilder/operators/joins.go @@ -18,17 +18,16 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) type JoinOp interface { - ops.Operator - GetLHS() ops.Operator - GetRHS() ops.Operator - SetLHS(ops.Operator) - SetRHS(ops.Operator) + Operator + GetLHS() Operator + GetRHS() Operator + SetLHS(Operator) + SetRHS(Operator) MakeInner() IsInner() bool AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) @@ -39,8 +38,8 @@ func AddPredicate( join JoinOp, expr sqlparser.Expr, joinPredicates bool, - newFilter func(ops.Operator, sqlparser.Expr) ops.Operator, -) ops.Operator { + newFilter func(Operator, sqlparser.Expr) Operator, +) Operator { deps := ctx.SemTable.RecursiveDeps(expr) switch { case deps.IsSolvedBy(TableID(join.GetLHS())): diff --git a/go/vt/vtgate/planbuilder/operators/limit.go b/go/vt/vtgate/planbuilder/operators/limit.go index a6ea925b135..1ba6b61149d 100644 --- a/go/vt/vtgate/planbuilder/operators/limit.go +++ b/go/vt/vtgate/planbuilder/operators/limit.go @@ -18,12 +18,11 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type Limit struct { - Source ops.Operator + Source Operator AST *sqlparser.Limit // Pushed marks whether the limit has been pushed down to the inputs but still need to keep the operator around. @@ -32,22 +31,22 @@ type Limit struct { Pushed bool } -func (l *Limit) Clone(inputs []ops.Operator) ops.Operator { +func (l *Limit) Clone(inputs []Operator) Operator { return &Limit{ Source: inputs[0], AST: sqlparser.CloneRefOfLimit(l.AST), } } -func (l *Limit) Inputs() []ops.Operator { - return []ops.Operator{l.Source} +func (l *Limit) Inputs() []Operator { + return []Operator{l.Source} } -func (l *Limit) SetInputs(operators []ops.Operator) { +func (l *Limit) SetInputs(operators []Operator) { l.Source = operators[0] } -func (l *Limit) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (l *Limit) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { l.Source = l.Source.AddPredicate(ctx, expr) return l } @@ -68,7 +67,7 @@ func (l *Limit) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Selec return l.Source.GetSelectExprs(ctx) } -func (l *Limit) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (l *Limit) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return l.Source.GetOrdering(ctx) } diff --git a/go/vt/vtgate/planbuilder/operators/misc_routing.go b/go/vt/vtgate/planbuilder/operators/misc_routing.go index 81301f975b4..575aa7b4e9a 100644 --- a/go/vt/vtgate/planbuilder/operators/misc_routing.go +++ b/go/vt/vtgate/planbuilder/operators/misc_routing.go @@ -64,10 +64,9 @@ var ( _ Routing = (*SequenceRouting)(nil) ) -func (tr *TargetedRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (tr *TargetedRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.Keyspace = tr.keyspace rp.TargetDestination = tr.TargetDestination - return nil } func (tr *TargetedRouting) Clone() Routing { @@ -75,8 +74,8 @@ func (tr *TargetedRouting) Clone() Routing { return &newTr } -func (tr *TargetedRouting) updateRoutingLogic(_ *plancontext.PlanningContext, _ sqlparser.Expr) (Routing, error) { - return tr, nil +func (tr *TargetedRouting) updateRoutingLogic(_ *plancontext.PlanningContext, _ sqlparser.Expr) Routing { + return tr } func (tr *TargetedRouting) Cost() int { @@ -91,17 +90,16 @@ func (tr *TargetedRouting) Keyspace() *vindexes.Keyspace { return tr.keyspace } -func (n *NoneRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (n *NoneRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.Keyspace = n.keyspace - return nil } func (n *NoneRouting) Clone() Routing { return n } -func (n *NoneRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) (Routing, error) { - return n, nil +func (n *NoneRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) Routing { + return n } func (n *NoneRouting) Cost() int { @@ -116,9 +114,8 @@ func (n *NoneRouting) Keyspace() *vindexes.Keyspace { return n.keyspace } -func (rr *AnyShardRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (rr *AnyShardRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.Keyspace = rr.keyspace - return nil } func (rr *AnyShardRouting) Clone() Routing { @@ -128,8 +125,8 @@ func (rr *AnyShardRouting) Clone() Routing { } } -func (rr *AnyShardRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) (Routing, error) { - return rr, nil +func (rr *AnyShardRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) Routing { + return rr } func (rr *AnyShardRouting) Cost() int { @@ -159,16 +156,14 @@ func (rr *AnyShardRouting) AlternateInKeyspace(keyspace *vindexes.Keyspace) *Rou return nil } -func (dr *DualRouting) UpdateRoutingParams(*plancontext.PlanningContext, *engine.RoutingParameters) error { - return nil -} +func (dr *DualRouting) UpdateRoutingParams(*plancontext.PlanningContext, *engine.RoutingParameters) {} func (dr *DualRouting) Clone() Routing { return &DualRouting{} } -func (dr *DualRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) (Routing, error) { - return dr, nil +func (dr *DualRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) Routing { + return dr } func (dr *DualRouting) Cost() int { @@ -183,18 +178,17 @@ func (dr *DualRouting) Keyspace() *vindexes.Keyspace { return nil } -func (sr *SequenceRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (sr *SequenceRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.Opcode = engine.Next rp.Keyspace = sr.keyspace - return nil } func (sr *SequenceRouting) Clone() Routing { return &SequenceRouting{keyspace: sr.keyspace} } -func (sr *SequenceRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) (Routing, error) { - return sr, nil +func (sr *SequenceRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) Routing { + return sr } func (sr *SequenceRouting) Cost() int { diff --git a/go/vt/vtgate/planbuilder/operators/offset_planning.go b/go/vt/vtgate/planbuilder/operators/offset_planning.go index d2fc266790c..6de7a2be2b0 100644 --- a/go/vt/vtgate/planbuilder/operators/offset_planning.go +++ b/go/vt/vtgate/planbuilder/operators/offset_planning.go @@ -21,36 +21,30 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) // planOffsets will walk the tree top down, adding offset information to columns in the tree for use in further optimization, -func planOffsets(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { +func planOffsets(ctx *plancontext.PlanningContext, root Operator) Operator { type offsettable interface { - planOffsets(ctx *plancontext.PlanningContext) ops.Operator + planOffsets(ctx *plancontext.PlanningContext) Operator } - visitor := func(in ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { - var err error + visitor := func(in Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { switch op := in.(type) { case *Horizon: - return nil, nil, vterrors.VT13001(fmt.Sprintf("should not see %T here", in)) + panic(vterrors.VT13001(fmt.Sprintf("should not see %T here", in))) case offsettable: newOp := op.planOffsets(ctx) if newOp != nil { - return newOp, rewrite.NewTree("new operator after offset planning"), nil + return newOp, Rewrote("new operator after offset planning") } } - if err != nil { - return nil, nil, err - } - return in, rewrite.SameTree, nil + return in, NoRewrite } - return rewrite.TopDown(root, TableID, visitor, stopAtRoute) + return TopDown(root, TableID, visitor, stopAtRoute) } func fetchByOffset(e sqlparser.SQLNode) bool { @@ -63,7 +57,7 @@ func fetchByOffset(e sqlparser.SQLNode) bool { } // useOffsets rewrites an expression to use values from the input -func useOffsets(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Operator) sqlparser.Expr { +func useOffsets(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op Operator) sqlparser.Expr { var exprOffset *sqlparser.Offset in := op.Inputs()[0] @@ -93,17 +87,17 @@ func useOffsets(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Op // addColumnsToInput adds columns needed by an operator to its input. // This happens only when the filter expression can be retrieved as an offset from the underlying mysql. -func addColumnsToInput(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func addColumnsToInput(ctx *plancontext.PlanningContext, root Operator) Operator { + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { filter, ok := in.(*Filter) if !ok { - return in, rewrite.SameTree, nil + return in, NoRewrite } proj, areOnTopOfProj := filter.Source.(selectExpressions) if !areOnTopOfProj { // not much we can do here - return in, rewrite.SameTree, nil + return in, NoRewrite } addedColumns := false found := func(expr sqlparser.Expr, i int) {} @@ -119,22 +113,22 @@ func addColumnsToInput(ctx *plancontext.PlanningContext, root ops.Operator) (ops _ = sqlparser.CopyOnRewrite(expr, visitor, nil, ctx.SemTable.CopySemanticInfo) } if addedColumns { - return in, rewrite.NewTree("added columns because filter needs it"), nil + return in, Rewrote("added columns because filter needs it") } - return in, rewrite.SameTree, nil + return in, NoRewrite } - return rewrite.TopDown(root, TableID, visitor, stopAtRoute) + return TopDown(root, TableID, visitor, stopAtRoute) } // addColumnsToInput adds columns needed by an operator to its input. // This happens only when the filter expression can be retrieved as an offset from the underlying mysql. -func pullDistinctFromUNION(_ *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func pullDistinctFromUNION(_ *plancontext.PlanningContext, root Operator) Operator { + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { union, ok := in.(*Union) if !ok || !union.distinct { - return in, rewrite.SameTree, nil + return in, NoRewrite } union.distinct = false @@ -143,10 +137,10 @@ func pullDistinctFromUNION(_ *plancontext.PlanningContext, root ops.Operator) (o Required: true, Source: union, } - return distinct, rewrite.NewTree("pulled out DISTINCT from union"), nil + return distinct, Rewrote("pulled out DISTINCT from union") } - return rewrite.TopDown(root, TableID, visitor, stopAtRoute) + return TopDown(root, TableID, visitor, stopAtRoute) } func getOffsetRewritingVisitor( diff --git a/go/vt/vtgate/planbuilder/operators/operator.go b/go/vt/vtgate/planbuilder/operators/operator.go index b165c5345b0..d639643dda1 100644 --- a/go/vt/vtgate/planbuilder/operators/operator.go +++ b/go/vt/vtgate/planbuilder/operators/operator.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Vitess Authors. +Copyright 2022 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -36,138 +36,61 @@ The operators go through a few phases while planning: package operators import ( - "fmt" - - "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type ( - // helper type that implements Inputs() returning nil - noInputs struct{} + // Operator forms the tree of operators, representing the declarative query provided. + // The operator tree is no actually runnable, it's an intermediate representation used + // while query planning + // The mental model are operators that pull data from each other, the root being the + // full query output, and the leaves are most often `Route`s, representing communication + // with one or more shards. We want to push down as much work as possible under these Routes + Operator interface { + // Clone will return a copy of this operator, protected so changed to the original will not impact the clone + Clone(inputs []Operator) Operator - // helper type that implements AddColumn() returning an error - noColumns struct{} + // Inputs returns the inputs for this operator + Inputs() []Operator - // helper type that implements AddPredicate() returning an error - noPredicates struct{} -) + // SetInputs changes the inputs for this op + SetInputs([]Operator) -// PlanQuery creates a query plan for a given SQL statement -func PlanQuery(ctx *plancontext.PlanningContext, stmt sqlparser.Statement) (result ops.Operator, err error) { - defer PanicHandler(&err) + // AddPredicate is used to push predicates. It pushed it as far down as is possible in the tree. + // If we encounter a join and the predicate depends on both sides of the join, the predicate will be split into two parts, + // where data is fetched from the LHS of the join to be used in the evaluation on the RHS + // TODO: we should remove this and replace it with rewriters + AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator - op, err := translateQueryToOp(ctx, stmt) - if err != nil { - return nil, err - } + AddColumn(ctx *plancontext.PlanningContext, reuseExisting bool, addToGroupBy bool, expr *sqlparser.AliasedExpr) int - if rewrite.DebugOperatorTree { - fmt.Println("Initial tree:") - fmt.Println(ops.ToTree(op)) - } + FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int - if op, err = compact(ctx, op); err != nil { - return nil, err - } + GetColumns(ctx *plancontext.PlanningContext) []*sqlparser.AliasedExpr + GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.SelectExprs - if err = checkValid(op); err != nil { - return nil, err - } - - if op, err = planQuery(ctx, op); err != nil { - return nil, err - } + ShortDescription() string - _, isRoute := op.(*Route) - if !isRoute && ctx.SemTable.NotSingleRouteErr != nil { - // If we got here, we don't have a single shard plan - return nil, ctx.SemTable.NotSingleRouteErr + GetOrdering(ctx *plancontext.PlanningContext) []OrderBy } - return op, err -} + // OrderBy contains the expression to used in order by and also if ordering is needed at VTGate level then what the weight_string function expression to be sent down for evaluation. + OrderBy struct { + Inner *sqlparser.Order -func PanicHandler(err *error) { - if r := recover(); r != nil { - badness, ok := r.(error) - if !ok { - panic(r) - } - - *err = badness - } -} - -// Inputs implements the Operator interface -func (noInputs) Inputs() []ops.Operator { - return nil -} - -// SetInputs implements the Operator interface -func (noInputs) SetInputs(ops []ops.Operator) { - if len(ops) > 0 { - panic("the noInputs operator does not have inputs") - } -} - -// AddColumn implements the Operator interface -func (noColumns) AddColumn(*plancontext.PlanningContext, bool, bool, *sqlparser.AliasedExpr) int { - panic(vterrors.VT13001("noColumns operators have no column")) -} - -func (noColumns) GetColumns(*plancontext.PlanningContext) []*sqlparser.AliasedExpr { - panic(vterrors.VT13001("noColumns operators have no column")) -} - -func (noColumns) FindCol(*plancontext.PlanningContext, sqlparser.Expr, bool) int { - panic(vterrors.VT13001("noColumns operators have no column")) -} - -func (noColumns) GetSelectExprs(*plancontext.PlanningContext) sqlparser.SelectExprs { - panic(vterrors.VT13001("noColumns operators have no column")) -} - -// AddPredicate implements the Operator interface -func (noPredicates) AddPredicate(*plancontext.PlanningContext, sqlparser.Expr) ops.Operator { - panic(vterrors.VT13001("the noColumns operator cannot accept predicates")) -} - -// tryTruncateColumnsAt will see if we can truncate the columns by just asking the operator to do it for us -func tryTruncateColumnsAt(op ops.Operator, truncateAt int) bool { - type columnTruncator interface { - setTruncateColumnCount(offset int) - } - - truncator, ok := op.(columnTruncator) - if ok { - truncator.setTruncateColumnCount(truncateAt) - return true + // See GroupBy#SimplifiedExpr for more details about this + SimplifiedExpr sqlparser.Expr } +) - switch op := op.(type) { - case *Limit: - return tryTruncateColumnsAt(op.Source, truncateAt) - case *SubQuery: - for _, offset := range op.Vars { - if offset >= truncateAt { - return false - } - } - return tryTruncateColumnsAt(op.Outer, truncateAt) - default: - return false +// Map takes in a mapping function and applies it to both the expression in OrderBy. +func (ob OrderBy) Map(mappingFunc func(sqlparser.Expr) sqlparser.Expr) OrderBy { + return OrderBy{ + Inner: &sqlparser.Order{ + Expr: mappingFunc(ob.Inner.Expr), + Direction: ob.Inner.Direction, + }, + SimplifiedExpr: mappingFunc(ob.SimplifiedExpr), } } - -func transformColumnsToSelectExprs(ctx *plancontext.PlanningContext, op ops.Operator) sqlparser.SelectExprs { - columns := op.GetColumns(ctx) - selExprs := slice.Map(columns, func(from *sqlparser.AliasedExpr) sqlparser.SelectExpr { - return from - }) - return selExprs -} diff --git a/go/vt/vtgate/planbuilder/operators/operator_funcs.go b/go/vt/vtgate/planbuilder/operators/operator_funcs.go index 7f7aaff29c5..cc3007438fa 100644 --- a/go/vt/vtgate/planbuilder/operators/operator_funcs.go +++ b/go/vt/vtgate/planbuilder/operators/operator_funcs.go @@ -21,13 +21,12 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // RemovePredicate is used when we turn a predicate into a plan operator, // and the predicate needs to be removed as an AST construct -func RemovePredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Operator) (ops.Operator, error) { +func RemovePredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op Operator) (Operator, error) { switch op := op.(type) { case *Route: newSrc, err := RemovePredicate(ctx, expr, op.Source) diff --git a/go/vt/vtgate/planbuilder/operators/ops/op.go b/go/vt/vtgate/planbuilder/operators/ops/op.go deleted file mode 100644 index 1117b947814..00000000000 --- a/go/vt/vtgate/planbuilder/operators/ops/op.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ops - -import ( - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" -) - -type ( - // Operator forms the tree of operators, representing the declarative query provided. - // The operator tree is no actually runnable, it's an intermediate representation used - // while query planning - // The mental model are operators that pull data from each other, the root being the - // full query output, and the leaves are most often `Route`s, representing communication - // with one or more shards. We want to push down as much work as possible under these Routes - Operator interface { - // Clone will return a copy of this operator, protected so changed to the original will not impact the clone - Clone(inputs []Operator) Operator - - // Inputs returns the inputs for this operator - Inputs() []Operator - - // SetInputs changes the inputs for this op - SetInputs([]Operator) - - // AddPredicate is used to push predicates. It pushed it as far down as is possible in the tree. - // If we encounter a join and the predicate depends on both sides of the join, the predicate will be split into two parts, - // where data is fetched from the LHS of the join to be used in the evaluation on the RHS - // TODO: we should remove this and replace it with rewriters - AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator - - AddColumn(ctx *plancontext.PlanningContext, reuseExisting bool, addToGroupBy bool, expr *sqlparser.AliasedExpr) int - - FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int - - GetColumns(ctx *plancontext.PlanningContext) []*sqlparser.AliasedExpr - GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.SelectExprs - - ShortDescription() string - - GetOrdering(ctx *plancontext.PlanningContext) []OrderBy - } - - // OrderBy contains the expression to used in order by and also if ordering is needed at VTGate level then what the weight_string function expression to be sent down for evaluation. - OrderBy struct { - Inner *sqlparser.Order - - // See GroupBy#SimplifiedExpr for more details about this - SimplifiedExpr sqlparser.Expr - } -) - -// Map takes in a mapping function and applies it to both the expression in OrderBy. -func (ob OrderBy) Map(mappingFunc func(sqlparser.Expr) sqlparser.Expr) OrderBy { - return OrderBy{ - Inner: &sqlparser.Order{ - Expr: mappingFunc(ob.Inner.Expr), - Direction: ob.Inner.Direction, - }, - SimplifiedExpr: mappingFunc(ob.SimplifiedExpr), - } -} diff --git a/go/vt/vtgate/planbuilder/operators/ordering.go b/go/vt/vtgate/planbuilder/operators/ordering.go index 66436f6a47d..bc088ca2220 100644 --- a/go/vt/vtgate/planbuilder/operators/ordering.go +++ b/go/vt/vtgate/planbuilder/operators/ordering.go @@ -22,20 +22,19 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type Ordering struct { - Source ops.Operator + Source Operator Offset []int WOffset []int - Order []ops.OrderBy + Order []OrderBy ResultColumns int } -func (o *Ordering) Clone(inputs []ops.Operator) ops.Operator { +func (o *Ordering) Clone(inputs []Operator) Operator { return &Ordering{ Source: inputs[0], Offset: slices.Clone(o.Offset), @@ -45,15 +44,15 @@ func (o *Ordering) Clone(inputs []ops.Operator) ops.Operator { } } -func (o *Ordering) Inputs() []ops.Operator { - return []ops.Operator{o.Source} +func (o *Ordering) Inputs() []Operator { + return []Operator{o.Source} } -func (o *Ordering) SetInputs(operators []ops.Operator) { +func (o *Ordering) SetInputs(operators []Operator) { o.Source = operators[0] } -func (o *Ordering) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (o *Ordering) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { o.Source = o.Source.AddPredicate(ctx, expr) return o } @@ -74,11 +73,11 @@ func (o *Ordering) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Se return o.Source.GetSelectExprs(ctx) } -func (o *Ordering) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (o *Ordering) GetOrdering(*plancontext.PlanningContext) []OrderBy { return o.Order } -func (o *Ordering) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (o *Ordering) planOffsets(ctx *plancontext.PlanningContext) Operator { for _, order := range o.Order { offset := o.Source.AddColumn(ctx, true, false, aeWrap(order.SimplifiedExpr)) o.Offset = append(o.Offset, offset) @@ -96,7 +95,7 @@ func (o *Ordering) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { } func (o *Ordering) ShortDescription() string { - ordering := slice.Map(o.Order, func(o ops.OrderBy) string { + ordering := slice.Map(o.Order, func(o OrderBy) string { return sqlparser.String(o.SimplifiedExpr) }) return strings.Join(ordering, ", ") diff --git a/go/vt/vtgate/planbuilder/operators/phases.go b/go/vt/vtgate/planbuilder/operators/phases.go index 557124e9320..8a47507a526 100644 --- a/go/vt/vtgate/planbuilder/operators/phases.go +++ b/go/vt/vtgate/planbuilder/operators/phases.go @@ -20,8 +20,6 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -79,7 +77,7 @@ func (p Phase) shouldRun(s semantics.QuerySignature) bool { } } -func (p Phase) act(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { +func (p Phase) act(ctx *plancontext.PlanningContext, op Operator) Operator { switch p { case pullDistinctFromUnion: return pullDistinctFromUNION(ctx, op) @@ -90,9 +88,9 @@ func (p Phase) act(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Opera case cleanOutPerfDistinct: return removePerformanceDistinctAboveRoute(ctx, op) case subquerySettling: - return settleSubqueries(ctx, op), nil + return settleSubqueries(ctx, op) default: - return op, nil + return op } } @@ -115,51 +113,47 @@ func (p *phaser) next(ctx *plancontext.PlanningContext) Phase { } } -func removePerformanceDistinctAboveRoute(_ *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { - return rewrite.BottomUp(op, TableID, func(innerOp ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { +func removePerformanceDistinctAboveRoute(_ *plancontext.PlanningContext, op Operator) Operator { + return BottomUp(op, TableID, func(innerOp Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { d, ok := innerOp.(*Distinct) if !ok || d.Required { - return innerOp, rewrite.SameTree, nil + return innerOp, NoRewrite } - return d.Source, rewrite.NewTree("removed distinct not required that was not pushed under route"), nil + return d.Source, Rewrote("removed distinct not required that was not pushed under route") }, stopAtRoute) } -func enableDelegateAggregation(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { +func enableDelegateAggregation(ctx *plancontext.PlanningContext, op Operator) Operator { return addColumnsToInput(ctx, op) } // addOrderingForAllAggregations is run we have pushed down Aggregators as far down as possible. -func addOrderingForAllAggregations(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func addOrderingForAllAggregations(ctx *plancontext.PlanningContext, root Operator) Operator { + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { aggrOp, ok := in.(*Aggregator) if !ok { - return in, rewrite.SameTree, nil + return in, NoRewrite } - requireOrdering, err := needsOrdering(ctx, aggrOp) - if err != nil { - return nil, nil, err - } - - var res *rewrite.ApplyResult + requireOrdering := needsOrdering(ctx, aggrOp) + var res *ApplyResult if requireOrdering { addOrderingFor(aggrOp) - res = rewrite.NewTree("added ordering before aggregation") + res = Rewrote("added ordering before aggregation") } - return in, res, nil + return in, res } - return rewrite.BottomUp(root, TableID, visitor, stopAtRoute) + return BottomUp(root, TableID, visitor, stopAtRoute) } func addOrderingFor(aggrOp *Aggregator) { - orderBys := slice.Map(aggrOp.Grouping, func(from GroupBy) ops.OrderBy { + orderBys := slice.Map(aggrOp.Grouping, func(from GroupBy) OrderBy { return from.AsOrderBy() }) if aggrOp.DistinctExpr != nil { - orderBys = append(orderBys, ops.OrderBy{ + orderBys = append(orderBys, OrderBy{ Inner: &sqlparser.Order{ Expr: aggrOp.DistinctExpr, }, @@ -172,7 +166,7 @@ func addOrderingFor(aggrOp *Aggregator) { } } -func needsOrdering(ctx *plancontext.PlanningContext, in *Aggregator) (bool, error) { +func needsOrdering(ctx *plancontext.PlanningContext, in *Aggregator) bool { requiredOrder := slice.Map(in.Grouping, func(from GroupBy) sqlparser.Expr { return from.SimplifiedExpr }) @@ -180,44 +174,44 @@ func needsOrdering(ctx *plancontext.PlanningContext, in *Aggregator) (bool, erro requiredOrder = append(requiredOrder, in.DistinctExpr) } if len(requiredOrder) == 0 { - return false, nil + return false } srcOrdering := in.Source.GetOrdering(ctx) if len(srcOrdering) < len(requiredOrder) { - return true, nil + return true } for idx, gb := range requiredOrder { if !ctx.SemTable.EqualsExprWithDeps(srcOrdering[idx].SimplifiedExpr, gb) { - return true, nil + return true } } - return false, nil + return false } -func addGroupByOnRHSOfJoin(root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func addGroupByOnRHSOfJoin(root Operator) Operator { + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { join, ok := in.(*ApplyJoin) if !ok { - return in, rewrite.SameTree, nil + return in, NoRewrite } return addLiteralGroupingToRHS(join) } - return rewrite.TopDown(root, TableID, visitor, stopAtRoute) + return TopDown(root, TableID, visitor, stopAtRoute) } -func addLiteralGroupingToRHS(in *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { - _ = rewrite.Visit(in.RHS, func(op ops.Operator) error { +func addLiteralGroupingToRHS(in *ApplyJoin) (Operator, *ApplyResult) { + _ = Visit(in.RHS, func(op Operator) error { aggr, isAggr := op.(*Aggregator) if !isAggr { return nil } if len(aggr.Grouping) == 0 { gb := sqlparser.NewIntLiteral(".0") - aggr.Grouping = append(aggr.Grouping, NewGroupBy(gb, gb, aeWrap(gb))) + aggr.Grouping = append(aggr.Grouping, NewGroupBy(gb, gb)) } return nil }) - return in, rewrite.SameTree, nil + return in, NoRewrite } diff --git a/go/vt/vtgate/planbuilder/operators/plan_query.go b/go/vt/vtgate/planbuilder/operators/plan_query.go new file mode 100644 index 00000000000..811f0c8dc76 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/plan_query.go @@ -0,0 +1,165 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package operators contains the operators used to plan queries. +/* +The operators go through a few phases while planning: +1. Initial plan + In this first pass, we build an operator tree from the incoming parsed query. + At the leaves, it will contain QueryGraphs - these are the tables in the FROM clause + that we can easily do join ordering on because they are all inner joins. + All the post-processing - aggregations, sorting, limit etc. are at this stage + contained in Horizon structs. We try to push these down under routes, and expand + the ones that can't be pushed down into individual operators such as Projection, + Agreggation, Limit, etc. +2. Planning + Once the initial plan has been fully built, we go through a number of phases. + recursively running rewriters on the tree in a fixed point fashion, until we've gone + over all phases and the tree has stop changing. +3. Offset planning + Now is the time to stop working with AST objects and transform remaining expressions being + used on top of vtgate to either offsets on inputs or evalengine expressions. +*/ +package operators + +import ( + "fmt" + + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +type ( + // helper type that implements Inputs() returning nil + noInputs struct{} + + // helper type that implements AddColumn() returning an error + noColumns struct{} + + // helper type that implements AddPredicate() returning an error + noPredicates struct{} +) + +// PlanQuery creates a query plan for a given SQL statement +func PlanQuery(ctx *plancontext.PlanningContext, stmt sqlparser.Statement) (result Operator, err error) { + defer PanicHandler(&err) + + op := translateQueryToOp(ctx, stmt) + + if DebugOperatorTree { + fmt.Println("Initial tree:") + fmt.Println(ToTree(op)) + } + + op = compact(ctx, op) + if err = checkValid(op); err != nil { + return nil, err + } + + if op, err = planQuery(ctx, op); err != nil { + return nil, err + } + + _, isRoute := op.(*Route) + if !isRoute && ctx.SemTable.NotSingleRouteErr != nil { + // If we got here, we don't have a single shard plan + return nil, ctx.SemTable.NotSingleRouteErr + } + + return op, err +} + +func PanicHandler(err *error) { + if r := recover(); r != nil { + badness, ok := r.(error) + if !ok { + panic(r) + } + + *err = badness + } +} + +// Inputs implements the Operator interface +func (noInputs) Inputs() []Operator { + return nil +} + +// SetInputs implements the Operator interface +func (noInputs) SetInputs(ops []Operator) { + if len(ops) > 0 { + panic("the noInputs operator does not have inputs") + } +} + +// AddColumn implements the Operator interface +func (noColumns) AddColumn(*plancontext.PlanningContext, bool, bool, *sqlparser.AliasedExpr) int { + panic(vterrors.VT13001("noColumns operators have no column")) +} + +func (noColumns) GetColumns(*plancontext.PlanningContext) []*sqlparser.AliasedExpr { + panic(vterrors.VT13001("noColumns operators have no column")) +} + +func (noColumns) FindCol(*plancontext.PlanningContext, sqlparser.Expr, bool) int { + panic(vterrors.VT13001("noColumns operators have no column")) +} + +func (noColumns) GetSelectExprs(*plancontext.PlanningContext) sqlparser.SelectExprs { + panic(vterrors.VT13001("noColumns operators have no column")) +} + +// AddPredicate implements the Operator interface +func (noPredicates) AddPredicate(*plancontext.PlanningContext, sqlparser.Expr) Operator { + panic(vterrors.VT13001("the noColumns operator cannot accept predicates")) +} + +// tryTruncateColumnsAt will see if we can truncate the columns by just asking the operator to do it for us +func tryTruncateColumnsAt(op Operator, truncateAt int) bool { + type columnTruncator interface { + setTruncateColumnCount(offset int) + } + + truncator, ok := op.(columnTruncator) + if ok { + truncator.setTruncateColumnCount(truncateAt) + return true + } + + switch op := op.(type) { + case *Limit: + return tryTruncateColumnsAt(op.Source, truncateAt) + case *SubQuery: + for _, offset := range op.Vars { + if offset >= truncateAt { + return false + } + } + return tryTruncateColumnsAt(op.Outer, truncateAt) + default: + return false + } +} + +func transformColumnsToSelectExprs(ctx *plancontext.PlanningContext, op Operator) sqlparser.SelectExprs { + columns := op.GetColumns(ctx) + selExprs := slice.Map(columns, func(from *sqlparser.AliasedExpr) sqlparser.SelectExpr { + return from + }) + return selExprs +} diff --git a/go/vt/vtgate/planbuilder/operators/projection.go b/go/vt/vtgate/planbuilder/operators/projection.go index 7e9f2d71a71..12b70d3e4ef 100644 --- a/go/vt/vtgate/planbuilder/operators/projection.go +++ b/go/vt/vtgate/planbuilder/operators/projection.go @@ -25,8 +25,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -34,7 +32,7 @@ import ( // Projection is used when we need to evaluate expressions on the vtgate // It uses the evalengine to accomplish its goal type Projection struct { - Source ops.Operator + Source Operator // Columns contain the expressions as viewed from the outside of this operator Columns ProjCols @@ -128,7 +126,7 @@ func newProjExprWithInner(ae *sqlparser.AliasedExpr, in sqlparser.Expr) *ProjExp } } -func newAliasedProjection(src ops.Operator) *Projection { +func newAliasedProjection(src Operator) *Projection { return &Projection{ Source: src, Columns: AliasedProjections{}, @@ -194,22 +192,19 @@ var _ selectExpressions = (*Projection)(nil) // createSimpleProjection returns a projection where all columns are offsets. // used to change the name and order of the columns in the final output -func createSimpleProjection(ctx *plancontext.PlanningContext, qp *QueryProjection, src ops.Operator) (*Projection, error) { +func createSimpleProjection(ctx *plancontext.PlanningContext, qp *QueryProjection, src Operator) *Projection { p := newAliasedProjection(src) for _, e := range qp.SelectExprs { ae, err := e.GetAliasedExpr() if err != nil { - return nil, err + panic(err) } offset := p.Source.AddColumn(ctx, true, false, ae) expr := newProjExpr(ae) expr.Info = Offset(offset) - _, err = p.addProjExpr(expr) - if err != nil { - return nil, err - } + p.addProjExpr(expr) } - return p, nil + return p } // canPush returns false if the projection has subquery expressions in it and the subqueries have not yet @@ -263,57 +258,45 @@ func (p *Projection) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Ex return -1 } -func (p *Projection) addProjExpr(pe ...*ProjExpr) (int, error) { +func (p *Projection) addProjExpr(pe ...*ProjExpr) int { ap, err := p.GetAliasedProjections() if err != nil { - return 0, err + panic(err) } offset := len(ap) ap = append(ap, pe...) p.Columns = ap - return offset, nil + return offset } -func (p *Projection) addUnexploredExpr(ae *sqlparser.AliasedExpr, e sqlparser.Expr) (int, error) { +func (p *Projection) addUnexploredExpr(ae *sqlparser.AliasedExpr, e sqlparser.Expr) int { return p.addProjExpr(newProjExprWithInner(ae, e)) } -func (p *Projection) addSubqueryExpr(ae *sqlparser.AliasedExpr, expr sqlparser.Expr, sqs ...*SubQuery) error { +func (p *Projection) addSubqueryExpr(ae *sqlparser.AliasedExpr, expr sqlparser.Expr, sqs ...*SubQuery) { pe := newProjExprWithInner(ae, expr) pe.Info = SubQueryExpression(sqs) - _, err := p.addProjExpr(pe) - return err + _ = p.addProjExpr(pe) } func (p *Projection) addColumnWithoutPushing(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, _ bool) int { - column, err := p.addColumn(ctx, true, false, expr, false) - if err != nil { - panic(err) - } - return column + return p.addColumn(ctx, true, false, expr, false) } func (p *Projection) addColumnsWithoutPushing(ctx *plancontext.PlanningContext, reuse bool, _ []bool, exprs []*sqlparser.AliasedExpr) []int { offsets := make([]int, len(exprs)) for idx, expr := range exprs { - offset, err := p.addColumn(ctx, reuse, false, expr, false) - if err != nil { - panic(err) - } + offset := p.addColumn(ctx, reuse, false, expr, false) offsets[idx] = offset } return offsets } func (p *Projection) AddColumn(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy bool, ae *sqlparser.AliasedExpr) int { - column, err := p.addColumn(ctx, reuse, addToGroupBy, ae, true) - if err != nil { - panic(err) - } - return column + return p.addColumn(ctx, reuse, addToGroupBy, ae, true) } func (p *Projection) addColumn( @@ -322,13 +305,13 @@ func (p *Projection) addColumn( addToGroupBy bool, ae *sqlparser.AliasedExpr, push bool, -) (int, error) { +) int { expr := p.DT.RewriteExpression(ctx, ae.Expr) if reuse { offset := p.FindCol(ctx, expr, false) if offset >= 0 { - return offset, nil + return offset } } @@ -337,7 +320,7 @@ func (p *Projection) addColumn( if ok { cols, ok := p.Columns.(AliasedProjections) if !ok { - return 0, vterrors.VT09015() + panic(vterrors.VT09015()) } for _, projExpr := range cols { if ctx.SemTable.EqualsExprWithDeps(ws.Expr, projExpr.ColExpr) { @@ -364,7 +347,7 @@ func (po Offset) expr() {} func (po *EvalEngine) expr() {} func (po SubQueryExpression) expr() {} -func (p *Projection) Clone(inputs []ops.Operator) ops.Operator { +func (p *Projection) Clone(inputs []Operator) Operator { return &Projection{ Source: inputs[0], Columns: p.Columns, // TODO don't think we need to deep clone here @@ -373,15 +356,15 @@ func (p *Projection) Clone(inputs []ops.Operator) ops.Operator { } } -func (p *Projection) Inputs() []ops.Operator { - return []ops.Operator{p.Source} +func (p *Projection) Inputs() []Operator { + return []Operator{p.Source} } -func (p *Projection) SetInputs(operators []ops.Operator) { +func (p *Projection) SetInputs(operators []Operator) { p.Source = operators[0] } -func (p *Projection) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (p *Projection) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { // we just pass through the predicate to our source p.Source = p.Source.AddPredicate(ctx, expr) return p @@ -412,7 +395,7 @@ func (p *Projection) GetSelectExprs(*plancontext.PlanningContext) sqlparser.Sele } } -func (p *Projection) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (p *Projection) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return p.Source.GetOrdering(ctx) } @@ -454,10 +437,10 @@ func (p *Projection) ShortDescription() string { return strings.Join(result, ", ") } -func (p *Projection) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) { +func (p *Projection) Compact(ctx *plancontext.PlanningContext) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if err != nil { - return p, rewrite.SameTree, nil + return p, NoRewrite } // for projections that are not derived tables, we can check if it is safe to remove or not @@ -471,7 +454,7 @@ func (p *Projection) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *r } if !needed { - return p.Source, rewrite.NewTree("removed projection only passing through the input"), nil + return p.Source, Rewrote("removed projection only passing through the input") } switch src := p.Source.(type) { @@ -480,13 +463,13 @@ func (p *Projection) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *r case *ApplyJoin: return p.compactWithJoin(ctx, src) } - return p, rewrite.SameTree, nil + return p, NoRewrite } -func (p *Projection) compactWithJoin(ctx *plancontext.PlanningContext, join *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { +func (p *Projection) compactWithJoin(ctx *plancontext.PlanningContext, join *ApplyJoin) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if err != nil { - return p, rewrite.SameTree, nil + return p, NoRewrite } var newColumns []int @@ -499,49 +482,46 @@ func (p *Projection) compactWithJoin(ctx *plancontext.PlanningContext, join *App case nil: if !ctx.SemTable.EqualsExprWithDeps(col.EvalExpr, col.ColExpr) { // the inner expression is different from what we are presenting to the outside - this means we need to evaluate - return p, rewrite.SameTree, nil + return p, NoRewrite } offset := slices.IndexFunc(join.JoinColumns, func(jc JoinColumn) bool { - return ctx.SemTable.EqualsExprWithDeps(jc.Original.Expr, col.ColExpr) + return ctx.SemTable.EqualsExprWithDeps(jc.Original, col.ColExpr) }) if offset < 0 { - return p, rewrite.SameTree, nil + return p, NoRewrite } if len(join.Columns) > 0 { newColumns = append(newColumns, join.Columns[offset]) } newColumnsAST = append(newColumnsAST, join.JoinColumns[offset]) default: - return p, rewrite.SameTree, nil + return p, NoRewrite } } join.Columns = newColumns join.JoinColumns = newColumnsAST - return join, rewrite.NewTree("remove projection from before join"), nil + return join, Rewrote("remove projection from before join") } -func (p *Projection) compactWithRoute(ctx *plancontext.PlanningContext, rb *Route) (ops.Operator, *rewrite.ApplyResult, error) { +func (p *Projection) compactWithRoute(ctx *plancontext.PlanningContext, rb *Route) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if err != nil { - return p, rewrite.SameTree, nil + return p, NoRewrite } for i, col := range ap { offset, ok := col.Info.(Offset) if !ok || int(offset) != i { - return p, rewrite.SameTree, nil + return p, NoRewrite } } columns := rb.GetColumns(ctx) - if err != nil { - return nil, nil, err - } if len(columns) == len(ap) { - return rb, rewrite.NewTree("remove projection from before route"), nil + return rb, Rewrote("remove projection from before route") } rb.ResultColumns = len(columns) - return rb, rewrite.SameTree, nil + return rb, NoRewrite } // needsEvaluation finds the expression given by this argument and checks if the inside and outside expressions match @@ -561,7 +541,7 @@ func (p *Projection) needsEvaluation(ctx *plancontext.PlanningContext, e sqlpars return false } -func (p *Projection) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (p *Projection) planOffsets(ctx *plancontext.PlanningContext) Operator { ap, err := p.GetAliasedProjections() if err != nil { panic(err) diff --git a/go/vt/vtgate/planbuilder/operators/query_planning.go b/go/vt/vtgate/planbuilder/operators/query_planning.go index ab140faf9b9..0994ee4402a 100644 --- a/go/vt/vtgate/planbuilder/operators/query_planning.go +++ b/go/vt/vtgate/planbuilder/operators/query_planning.go @@ -21,8 +21,6 @@ import ( "io" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -35,26 +33,20 @@ type ( } ) -func planQuery(ctx *plancontext.PlanningContext, root ops.Operator) (output ops.Operator, err error) { +func planQuery(ctx *plancontext.PlanningContext, root Operator) (output Operator, err error) { output, err = runPhases(ctx, root) if err != nil { return nil, err } - output, err = planOffsets(ctx, output) - if err != nil { - return nil, err - } + output = planOffsets(ctx, output) - if rewrite.DebugOperatorTree { + if DebugOperatorTree { fmt.Println("After offset planning:") - fmt.Println(ops.ToTree(output)) + fmt.Println(ToTree(output)) } - output, err = compact(ctx, output) - if err != nil { - return nil, err - } + output = compact(ctx, output) return addTruncationOrProjectionToReturnOutput(ctx, root, output) } @@ -63,17 +55,17 @@ func planQuery(ctx *plancontext.PlanningContext, root ops.Operator) (output ops. // If we can push it under a route - done. // If we can't, we will instead expand the Horizon into // smaller operators and try to push these down as far as possible -func runPhases(ctx *plancontext.PlanningContext, root ops.Operator) (op ops.Operator, err error) { +func runPhases(ctx *plancontext.PlanningContext, root Operator) (op Operator, err error) { op = root p := phaser{} for phase := p.next(ctx); phase != DONE; phase = p.next(ctx) { ctx.CurrentPhase = int(phase) - if rewrite.DebugOperatorTree { + if DebugOperatorTree { fmt.Printf("PHASE: %s\n", phase.String()) } - op, err = phase.act(ctx, op) + op = phase.act(ctx, op) if err != nil { return nil, err } @@ -83,17 +75,17 @@ func runPhases(ctx *plancontext.PlanningContext, root ops.Operator) (op ops.Oper return nil, err } - op, err = compact(ctx, op) + op = compact(ctx, op) if err != nil { return nil, err } } - return addGroupByOnRHSOfJoin(op) + return addGroupByOnRHSOfJoin(op), nil } -func runRewriters(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func runRewriters(ctx *plancontext.PlanningContext, root Operator) (Operator, error) { + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { switch in := in.(type) { case *Horizon: return pushOrExpandHorizon(ctx, in) @@ -120,23 +112,23 @@ func runRewriters(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Oper case *LockAndComment: return pushLockAndComment(in) default: - return in, rewrite.SameTree, nil + return in, NoRewrite } } - return rewrite.FixedPointBottomUp(root, TableID, visitor, stopAtRoute) + return FixedPointBottomUp(root, TableID, visitor, stopAtRoute), nil } -func pushLockAndComment(l *LockAndComment) (ops.Operator, *rewrite.ApplyResult, error) { +func pushLockAndComment(l *LockAndComment) (Operator, *ApplyResult) { switch src := l.Source.(type) { case *Horizon, *QueryGraph: // we want to wait until the horizons have been pushed under a route or expanded // that way we know that we've replaced the QueryGraphs with Routes - return l, rewrite.SameTree, nil + return l, NoRewrite case *Route: src.Comments = l.Comments src.Lock = l.Lock - return src, rewrite.NewTree("put lock and comment into route"), nil + return src, Rewrote("put lock and comment into route") default: inputs := src.Inputs() for i, op := range inputs { @@ -147,23 +139,20 @@ func pushLockAndComment(l *LockAndComment) (ops.Operator, *rewrite.ApplyResult, } } src.SetInputs(inputs) - return src, rewrite.NewTree("pushed down lock and comments"), nil + return src, Rewrote("pushed down lock and comments") } } -func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (ops.Operator, *rewrite.ApplyResult, error) { +func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (Operator, *ApplyResult) { if in.IsDerived() { - newOp, result, err := pushDerived(ctx, in) - if err != nil { - return nil, nil, err - } - if result != rewrite.SameTree { - return newOp, result, nil + newOp, result := pushDerived(ctx, in) + if result != NoRewrite { + return newOp, result } } if !reachedPhase(ctx, initialPlanning) { - return in, rewrite.SameTree, nil + return in, NoRewrite } if ctx.SemTable.QuerySignature.SubQueries { @@ -172,15 +161,12 @@ func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (ops.Ope rb, isRoute := in.src().(*Route) if isRoute && rb.IsSingleShard() { - return rewrite.Swap(in, rb, "push horizon into route") + return Swap(in, rb, "push horizon into route") } sel, isSel := in.selectStatement().(*sqlparser.Select) - qp, err := in.getQP(ctx) - if err != nil { - return nil, nil, err - } + qp := in.getQP(ctx) needsOrdering := len(qp.OrderExprs) > 0 hasHaving := isSel && sel.Having != nil @@ -193,7 +179,7 @@ func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (ops.Ope in.selectStatement().GetLimit() == nil if canPush { - return rewrite.Swap(in, rb, "push horizon into route") + return Swap(in, rb, "push horizon into route") } return expandHorizon(ctx, in) @@ -202,42 +188,42 @@ func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (ops.Ope func tryPushProjection( ctx *plancontext.PlanningContext, p *Projection, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { switch src := p.Source.(type) { case *Route: - return rewrite.Swap(p, src, "push projection under route") + return Swap(p, src, "push projection under route") case *ApplyJoin: if p.FromAggr || !p.canPush(ctx) { - return p, rewrite.SameTree, nil + return p, NoRewrite } return pushProjectionInApplyJoin(ctx, p, src) case *Vindex: if !p.canPush(ctx) { - return p, rewrite.SameTree, nil + return p, NoRewrite } return pushProjectionInVindex(ctx, p, src) case *SubQueryContainer: if !p.canPush(ctx) { - return p, rewrite.SameTree, nil + return p, NoRewrite } return pushProjectionToOuterContainer(ctx, p, src) case *SubQuery: return pushProjectionToOuter(ctx, p, src) case *Limit: - return rewrite.Swap(p, src, "push projection under limit") + return Swap(p, src, "push projection under limit") default: - return p, rewrite.SameTree, nil + return p, NoRewrite } } -func pushProjectionToOuter(ctx *plancontext.PlanningContext, p *Projection, sq *SubQuery) (ops.Operator, *rewrite.ApplyResult, error) { +func pushProjectionToOuter(ctx *plancontext.PlanningContext, p *Projection, sq *SubQuery) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if err != nil { - return p, rewrite.SameTree, nil + return p, NoRewrite } if !reachedPhase(ctx, subquerySettling) || err != nil { - return p, rewrite.SameTree, nil + return p, NoRewrite } outer := TableID(sq.Outer) @@ -248,7 +234,7 @@ func pushProjectionToOuter(ctx *plancontext.PlanningContext, p *Projection, sq * } if !ctx.SemTable.RecursiveDeps(pe.EvalExpr).IsSolvedBy(outer) { - return p, rewrite.SameTree, nil + return p, NoRewrite } se, ok := pe.Info.(SubQueryExpression) @@ -258,22 +244,22 @@ func pushProjectionToOuter(ctx *plancontext.PlanningContext, p *Projection, sq * } // all projections can be pushed to the outer sq.Outer, p.Source = p, sq.Outer - return sq, rewrite.NewTree("push projection into outer side of subquery"), nil + return sq, Rewrote("push projection into outer side of subquery") } func pushProjectionInVindex( ctx *plancontext.PlanningContext, p *Projection, src *Vindex, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if err != nil { - return nil, nil, err + panic(err) } for _, pe := range ap { src.AddColumn(ctx, true, false, aeWrap(pe.EvalExpr)) } - return src, rewrite.NewTree("push projection into vindex"), nil + return src, Rewrote("push projection into vindex") } func (p *projector) add(pe *ProjExpr, col *sqlparser.IdentifierCI) { @@ -291,11 +277,11 @@ func pushProjectionInApplyJoin( ctx *plancontext.PlanningContext, p *Projection, src *ApplyJoin, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if src.LeftJoin || err != nil { // we can't push down expression evaluation to the rhs if we are not sure if it will even be executed - return p, rewrite.SameTree, nil + return p, NoRewrite } lhs, rhs := &projector{}, &projector{} if p.DT != nil && len(p.DT.Columns) > 0 { @@ -309,31 +295,18 @@ func pushProjectionInApplyJoin( if p.DT != nil && idx < len(p.DT.Columns) { col = &p.DT.Columns[idx] } - err := splitProjectionAcrossJoin(ctx, src, lhs, rhs, pe, col) - if err != nil { - return nil, nil, err - } + splitProjectionAcrossJoin(ctx, src, lhs, rhs, pe, col) } if p.isDerived() { - err := exposeColumnsThroughDerivedTable(ctx, p, src, lhs) - if err != nil { - return nil, nil, err - } + exposeColumnsThroughDerivedTable(ctx, p, src, lhs) } // Create and update the Projection operators for the left and right children, if needed. - src.LHS, err = createProjectionWithTheseColumns(ctx, src.LHS, lhs, p.DT) - if err != nil { - return nil, nil, err - } + src.LHS = createProjectionWithTheseColumns(ctx, src.LHS, lhs, p.DT) + src.RHS = createProjectionWithTheseColumns(ctx, src.RHS, rhs, p.DT) - src.RHS, err = createProjectionWithTheseColumns(ctx, src.RHS, rhs, p.DT) - if err != nil { - return nil, nil, err - } - - return src, rewrite.NewTree("split projection to either side of join"), nil + return src, Rewrote("split projection to either side of join") } // splitProjectionAcrossJoin creates JoinPredicates for all projections, @@ -344,21 +317,16 @@ func splitProjectionAcrossJoin( lhs, rhs *projector, pe *ProjExpr, colAlias *sqlparser.IdentifierCI, -) error { +) { // Check if the current expression can reuse an existing column in the ApplyJoin. if _, found := canReuseColumn(ctx, join.JoinColumns, pe.EvalExpr, joinColumnToExpr); found { - return nil - } - - col, err := splitUnexploredExpression(ctx, join, lhs, rhs, pe, colAlias) - if err != nil { - return err + return } // Add the new JoinColumn to the ApplyJoin's JoinPredicates. - join.JoinColumns = append(join.JoinColumns, col) - return nil + join.JoinColumns = append(join.JoinColumns, + splitUnexploredExpression(ctx, join, lhs, rhs, pe, colAlias)) } func splitUnexploredExpression( @@ -367,12 +335,9 @@ func splitUnexploredExpression( lhs, rhs *projector, pe *ProjExpr, colAlias *sqlparser.IdentifierCI, -) (JoinColumn, error) { +) JoinColumn { // Get a JoinColumn for the current expression. - col, err := join.getJoinColumnFor(ctx, pe.Original, pe.ColExpr, false) - if err != nil { - return JoinColumn{}, err - } + col := join.getJoinColumnFor(ctx, pe.Original, pe.ColExpr, false) // Update the left and right child columns and names based on the JoinColumn type. switch { @@ -395,7 +360,7 @@ func splitUnexploredExpression( innerPE.Info = pe.Info rhs.add(innerPE, colAlias) } - return col, nil + return col } // exposeColumnsThroughDerivedTable rewrites expressions within a join that is inside a derived table @@ -410,25 +375,25 @@ func splitUnexploredExpression( // The function iterates through each join predicate, rewriting the expressions in the predicate's // LHS expressions to include the derived table. This allows the expressions to be accessed outside // the derived table. -func exposeColumnsThroughDerivedTable(ctx *plancontext.PlanningContext, p *Projection, src *ApplyJoin, lhs *projector) error { +func exposeColumnsThroughDerivedTable(ctx *plancontext.PlanningContext, p *Projection, src *ApplyJoin, lhs *projector) { derivedTbl, err := ctx.SemTable.TableInfoFor(p.DT.TableID) if err != nil { - return err + panic(err) } derivedTblName, err := derivedTbl.Name() if err != nil { - return err + panic(err) } for _, predicate := range src.JoinPredicates { for idx, bve := range predicate.LHSExprs { expr := bve.Expr tbl, err := ctx.SemTable.TableInfoForExpr(expr) if err != nil { - return err + panic(err) } tblName, err := tbl.Name() if err != nil { - return err + panic(err) } expr = semantics.RewriteDerivedTableExpression(expr, derivedTbl) @@ -445,7 +410,6 @@ func exposeColumnsThroughDerivedTable(ctx *plancontext.PlanningContext, p *Proje lhs.add(projExpr, colAlias) } } - return nil } // prefixColNames adds qualifier prefixes to all ColName:s. @@ -462,17 +426,14 @@ func prefixColNames(ctx *plancontext.PlanningContext, tblName sqlparser.TableNam func createProjectionWithTheseColumns( ctx *plancontext.PlanningContext, - src ops.Operator, + src Operator, p *projector, dt *DerivedTable, -) (ops.Operator, error) { +) Operator { if len(p.columns) == 0 { - return src, nil - } - proj, err := createProjection(ctx, src) - if err != nil { - return nil, err + return src } + proj := createProjection(ctx, src) proj.Columns = AliasedProjections(p.columns) if dt != nil { kopy := *dt @@ -480,42 +441,42 @@ func createProjectionWithTheseColumns( proj.DT = &kopy } - return proj, nil + return proj } -func tryPushLimit(in *Limit) (ops.Operator, *rewrite.ApplyResult, error) { +func tryPushLimit(in *Limit) (Operator, *ApplyResult) { switch src := in.Source.(type) { case *Route: return tryPushingDownLimitInRoute(in, src) case *Aggregator: - return in, rewrite.SameTree, nil + return in, NoRewrite default: return setUpperLimit(in) } } -func tryPushingDownLimitInRoute(in *Limit, src *Route) (ops.Operator, *rewrite.ApplyResult, error) { +func tryPushingDownLimitInRoute(in *Limit, src *Route) (Operator, *ApplyResult) { if src.IsSingleShard() { - return rewrite.Swap(in, src, "push limit under route") + return Swap(in, src, "push limit under route") } return setUpperLimit(in) } -func setUpperLimit(in *Limit) (ops.Operator, *rewrite.ApplyResult, error) { +func setUpperLimit(in *Limit) (Operator, *ApplyResult) { if in.Pushed { - return in, rewrite.SameTree, nil + return in, NoRewrite } in.Pushed = true - visitor := func(op ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { - return op, rewrite.SameTree, nil + visitor := func(op Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { + return op, NoRewrite } - var result *rewrite.ApplyResult - shouldVisit := func(op ops.Operator) rewrite.VisitRule { + var result *ApplyResult + shouldVisit := func(op Operator) VisitRule { switch op := op.(type) { case *Join, *ApplyJoin, *SubQueryContainer, *SubQuery: // we can't push limits down on either side - return rewrite.SkipChildren + return SkipChildren case *Route: newSrc := &Limit{ Source: op.Source, @@ -523,48 +484,46 @@ func setUpperLimit(in *Limit) (ops.Operator, *rewrite.ApplyResult, error) { Pushed: false, } op.Source = newSrc - result = result.Merge(rewrite.NewTree("push limit under route")) - return rewrite.SkipChildren + result = result.Merge(Rewrote("push limit under route")) + return SkipChildren default: - return rewrite.VisitChildren + return VisitChildren } } - _, err := rewrite.TopDown(in.Source, TableID, visitor, shouldVisit) - if err != nil { - return nil, nil, err - } - return in, result, nil + TopDown(in.Source, TableID, visitor, shouldVisit) + + return in, result } -func tryPushOrdering(ctx *plancontext.PlanningContext, in *Ordering) (ops.Operator, *rewrite.ApplyResult, error) { +func tryPushOrdering(ctx *plancontext.PlanningContext, in *Ordering) (Operator, *ApplyResult) { switch src := in.Source.(type) { case *Route: - return rewrite.Swap(in, src, "push ordering under route") + return Swap(in, src, "push ordering under route") case *Filter: - return rewrite.Swap(in, src, "push ordering under filter") + return Swap(in, src, "push ordering under filter") case *ApplyJoin: if canPushLeft(ctx, src, in.Order) { // ApplyJoin is stable in regard to the columns coming from the LHS, // so if all the ordering columns come from the LHS, we can push down the Ordering there src.LHS, in.Source = in, src.LHS - return src, rewrite.NewTree("push down ordering on the LHS of a join"), nil + return src, Rewrote("push down ordering on the LHS of a join") } case *Ordering: // we'll just remove the order underneath. The top order replaces whatever was incoming in.Source = src.Source - return in, rewrite.NewTree("remove double ordering"), nil + return in, Rewrote("remove double ordering") case *Projection: // we can move ordering under a projection if it's not introducing a column we're sorting by for _, by := range in.Order { if !fetchByOffset(by.SimplifiedExpr) { - return in, rewrite.SameTree, nil + return in, NoRewrite } } - return rewrite.Swap(in, src, "push ordering under projection") + return Swap(in, src, "push ordering under projection") case *Aggregator: if !src.QP.AlignGroupByAndOrderBy(ctx) && !overlaps(ctx, in.Order, src.Grouping) { - return in, rewrite.SameTree, nil + return in, NoRewrite } return pushOrderingUnderAggr(ctx, in, src) @@ -573,26 +532,26 @@ func tryPushOrdering(ctx *plancontext.PlanningContext, in *Ordering) (ops.Operat for _, order := range in.Order { deps := ctx.SemTable.RecursiveDeps(order.Inner.Expr) if !deps.IsSolvedBy(outerTableID) { - return in, rewrite.SameTree, nil + return in, NoRewrite } } src.Outer, in.Source = in, src.Outer - return src, rewrite.NewTree("push ordering into outer side of subquery"), nil + return src, Rewrote("push ordering into outer side of subquery") case *SubQuery: outerTableID := TableID(src.Outer) for _, order := range in.Order { deps := ctx.SemTable.RecursiveDeps(order.Inner.Expr) if !deps.IsSolvedBy(outerTableID) { - return in, rewrite.SameTree, nil + return in, NoRewrite } } src.Outer, in.Source = in, src.Outer - return src, rewrite.NewTree("push ordering into outer side of subquery"), nil + return src, Rewrote("push ordering into outer side of subquery") } - return in, rewrite.SameTree, nil + return in, NoRewrite } -func overlaps(ctx *plancontext.PlanningContext, order []ops.OrderBy, grouping []GroupBy) bool { +func overlaps(ctx *plancontext.PlanningContext, order []OrderBy, grouping []GroupBy) bool { ordering: for _, orderBy := range order { for _, groupBy := range grouping { @@ -606,13 +565,13 @@ ordering: return true } -func pushOrderingUnderAggr(ctx *plancontext.PlanningContext, order *Ordering, aggregator *Aggregator) (ops.Operator, *rewrite.ApplyResult, error) { +func pushOrderingUnderAggr(ctx *plancontext.PlanningContext, order *Ordering, aggregator *Aggregator) (Operator, *ApplyResult) { // If Aggregator is a derived table, then we should rewrite the ordering before pushing. if aggregator.isDerived() { for idx, orderExpr := range order.Order { ti, err := ctx.SemTable.TableInfoFor(aggregator.DT.TableID) if err != nil { - return nil, nil, err + panic(err) } newOrderExpr := orderExpr.Map(func(expr sqlparser.Expr) sqlparser.Expr { return semantics.RewriteDerivedTableExpression(expr, ti) @@ -666,12 +625,12 @@ func pushOrderingUnderAggr(ctx *plancontext.PlanningContext, order *Ordering, ag order.Source = aggrSource.Source aggrSource.Source = nil // removing from plan tree aggregator.Source = order - return aggregator, rewrite.NewTree("push ordering under aggregation, removing extra ordering"), nil + return aggregator, Rewrote("push ordering under aggregation, removing extra ordering") } - return rewrite.Swap(order, aggregator, "push ordering under aggregation") + return Swap(order, aggregator, "push ordering under aggregation") } -func canPushLeft(ctx *plancontext.PlanningContext, aj *ApplyJoin, order []ops.OrderBy) bool { +func canPushLeft(ctx *plancontext.PlanningContext, aj *ApplyJoin, order []OrderBy) bool { lhs := TableID(aj.LHS) for _, order := range order { deps := ctx.SemTable.DirectDeps(order.Inner.Expr) @@ -682,7 +641,7 @@ func canPushLeft(ctx *plancontext.PlanningContext, aj *ApplyJoin, order []ops.Or return true } -func isOuterTable(op ops.Operator, ts semantics.TableSet) bool { +func isOuterTable(op Operator, ts semantics.TableSet) bool { aj, ok := op.(*ApplyJoin) if ok && aj.LeftJoin && TableID(aj.RHS).IsOverlapping(ts) { return true @@ -697,39 +656,35 @@ func isOuterTable(op ops.Operator, ts semantics.TableSet) bool { return false } -func tryPushFilter(ctx *plancontext.PlanningContext, in *Filter) (ops.Operator, *rewrite.ApplyResult, error) { +func tryPushFilter(ctx *plancontext.PlanningContext, in *Filter) (Operator, *ApplyResult) { switch src := in.Source.(type) { case *Projection: return pushFilterUnderProjection(ctx, in, src) case *Route: for _, pred := range in.Predicates { - var err error deps := ctx.SemTable.RecursiveDeps(pred) if !isOuterTable(src, deps) { // we can only update based on predicates on inner tables - src.Routing, err = src.Routing.updateRoutingLogic(ctx, pred) - if err != nil { - return nil, nil, err - } + src.Routing = src.Routing.updateRoutingLogic(ctx, pred) } } - return rewrite.Swap(in, src, "push filter into Route") + return Swap(in, src, "push filter into Route") case *SubQuery: outerTableID := TableID(src.Outer) for _, pred := range in.Predicates { deps := ctx.SemTable.RecursiveDeps(pred) if !deps.IsSolvedBy(outerTableID) { - return in, rewrite.SameTree, nil + return in, NoRewrite } } src.Outer, in.Source = in, src.Outer - return src, rewrite.NewTree("push filter to outer query in subquery container"), nil + return src, Rewrote("push filter to outer query in subquery container") } - return in, rewrite.SameTree, nil + return in, NoRewrite } -func pushFilterUnderProjection(ctx *plancontext.PlanningContext, filter *Filter, projection *Projection) (ops.Operator, *rewrite.ApplyResult, error) { +func pushFilterUnderProjection(ctx *plancontext.PlanningContext, filter *Filter, projection *Projection) (Operator, *ApplyResult) { for _, p := range filter.Predicates { cantPush := false _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { @@ -746,64 +701,64 @@ func pushFilterUnderProjection(ctx *plancontext.PlanningContext, filter *Filter, }, p) if cantPush { - return filter, rewrite.SameTree, nil + return filter, NoRewrite } } - return rewrite.Swap(filter, projection, "push filter under projection") + return Swap(filter, projection, "push filter under projection") } -func tryPushDistinct(in *Distinct) (ops.Operator, *rewrite.ApplyResult, error) { +func tryPushDistinct(in *Distinct) (Operator, *ApplyResult) { if in.Required && in.PushedPerformance { - return in, rewrite.SameTree, nil + return in, NoRewrite } switch src := in.Source.(type) { case *Route: if isDistinct(src.Source) && src.IsSingleShard() { - return src, rewrite.NewTree("distinct not needed"), nil + return src, Rewrote("distinct not needed") } if src.IsSingleShard() || !in.Required { - return rewrite.Swap(in, src, "push distinct under route") + return Swap(in, src, "push distinct under route") } if isDistinct(src.Source) { - return in, rewrite.SameTree, nil + return in, NoRewrite } src.Source = &Distinct{Source: src.Source} in.PushedPerformance = true - return in, rewrite.NewTree("added distinct under route - kept original"), nil + return in, Rewrote("added distinct under route - kept original") case *Distinct: src.Required = false src.PushedPerformance = false - return src, rewrite.NewTree("remove double distinct"), nil + return src, Rewrote("remove double distinct") case *Union: for i := range src.Sources { src.Sources[i] = &Distinct{Source: src.Sources[i]} } in.PushedPerformance = true - return in, rewrite.NewTree("push down distinct under union"), nil + return in, Rewrote("push down distinct under union") case *ApplyJoin: src.LHS = &Distinct{Source: src.LHS} src.RHS = &Distinct{Source: src.RHS} in.PushedPerformance = true if in.Required { - return in, rewrite.NewTree("push distinct under join - kept original"), nil + return in, Rewrote("push distinct under join - kept original") } - return in.Source, rewrite.NewTree("push distinct under join"), nil + return in.Source, Rewrote("push distinct under join") case *Ordering: in.Source = src.Source - return in, rewrite.NewTree("remove ordering under distinct"), nil + return in, Rewrote("remove ordering under distinct") } - return in, rewrite.SameTree, nil + return in, NoRewrite } -func isDistinct(op ops.Operator) bool { +func isDistinct(op Operator) bool { switch op := op.(type) { case *Distinct: return true @@ -818,44 +773,40 @@ func isDistinct(op ops.Operator) bool { } } -func tryPushUnion(ctx *plancontext.PlanningContext, op *Union) (ops.Operator, *rewrite.ApplyResult, error) { - if res := compactUnion(op); res != rewrite.SameTree { - return op, res, nil +func tryPushUnion(ctx *plancontext.PlanningContext, op *Union) (Operator, *ApplyResult) { + if res := compactUnion(op); res != NoRewrite { + return op, res } - var sources []ops.Operator + var sources []Operator var selects []sqlparser.SelectExprs - var err error if op.distinct { - sources, selects, err = mergeUnionInputInAnyOrder(ctx, op) + sources, selects = mergeUnionInputInAnyOrder(ctx, op) } else { - sources, selects, err = mergeUnionInputsInOrder(ctx, op) - } - if err != nil { - return nil, nil, err + sources, selects = mergeUnionInputsInOrder(ctx, op) } if len(sources) == 1 { result := sources[0].(*Route) if result.IsSingleShard() || !op.distinct { - return result, rewrite.NewTree("push union under route"), nil + return result, Rewrote("push union under route") } return &Distinct{ Source: result, Required: true, - }, rewrite.NewTree("push union under route"), nil + }, Rewrote("push union under route") } if len(sources) == len(op.Sources) { - return op, rewrite.SameTree, nil + return op, NoRewrite } - return newUnion(sources, selects, op.unionColumns, op.distinct), rewrite.NewTree("merge union inputs"), nil + return newUnion(sources, selects, op.unionColumns, op.distinct), Rewrote("merge union inputs") } // addTruncationOrProjectionToReturnOutput uses the original Horizon to make sure that the output columns line up with what the user asked for -func addTruncationOrProjectionToReturnOutput(ctx *plancontext.PlanningContext, oldHorizon ops.Operator, output ops.Operator) (ops.Operator, error) { +func addTruncationOrProjectionToReturnOutput(ctx *plancontext.PlanningContext, oldHorizon Operator, output Operator) (Operator, error) { horizon, ok := oldHorizon.(*Horizon) if !ok { return output, nil @@ -871,20 +822,14 @@ func addTruncationOrProjectionToReturnOutput(ctx *plancontext.PlanningContext, o return output, nil } - qp, err := horizon.getQP(ctx) - if err != nil { - return nil, err - } - proj, err := createSimpleProjection(ctx, qp, output) - if err != nil { - return nil, err - } + qp := horizon.getQP(ctx) + proj := createSimpleProjection(ctx, qp, output) return proj, nil } -func stopAtRoute(operator ops.Operator) rewrite.VisitRule { +func stopAtRoute(operator Operator) VisitRule { _, isRoute := operator.(*Route) - return rewrite.VisitRule(!isRoute) + return VisitRule(!isRoute) } func aeWrap(e sqlparser.Expr) *sqlparser.AliasedExpr { diff --git a/go/vt/vtgate/planbuilder/operators/querygraph.go b/go/vt/vtgate/planbuilder/operators/querygraph.go index b0e6b4440be..bc731f29df6 100644 --- a/go/vt/vtgate/planbuilder/operators/querygraph.go +++ b/go/vt/vtgate/planbuilder/operators/querygraph.go @@ -20,7 +20,6 @@ import ( "strings" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -63,7 +62,7 @@ type ( } ) -var _ ops.Operator = (*QueryGraph)(nil) +var _ Operator = (*QueryGraph)(nil) // Introduces implements the tableIDIntroducer interface func (qg *QueryGraph) introducesTableID() semantics.TableSet { @@ -163,7 +162,7 @@ func (qg *QueryGraph) UnsolvedPredicates(_ *semantics.SemTable) []sqlparser.Expr } // Clone implements the Operator interface -func (qg *QueryGraph) Clone([]ops.Operator) ops.Operator { +func (qg *QueryGraph) Clone([]Operator) Operator { result := &QueryGraph{ Tables: nil, innerJoins: nil, @@ -176,11 +175,11 @@ func (qg *QueryGraph) Clone([]ops.Operator) ops.Operator { return result } -func (qg *QueryGraph) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (qg *QueryGraph) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } -func (qg *QueryGraph) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (qg *QueryGraph) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { for _, e := range sqlparser.SplitAndExpression(nil, expr) { qg.collectPredicate(ctx, e) } diff --git a/go/vt/vtgate/planbuilder/operators/queryprojection.go b/go/vt/vtgate/planbuilder/operators/queryprojection.go index 1cef6706a9f..f9f6f7fa15d 100644 --- a/go/vt/vtgate/planbuilder/operators/queryprojection.go +++ b/go/vt/vtgate/planbuilder/operators/queryprojection.go @@ -28,7 +28,6 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -47,7 +46,7 @@ type ( HasAggr bool Distinct bool groupByExprs []GroupBy - OrderExprs []ops.OrderBy + OrderExprs []OrderBy HasStar bool // AddedColumn keeps a counter for expressions added to solve HAVING expressions the user is not selecting @@ -72,9 +71,6 @@ type ( // The index at which the user expects to see this column. Set to nil, if the user does not ask for it InnerIndex *int - // The original aliased expression that this group by is referring - aliasedExpr *sqlparser.AliasedExpr - // points to the column on the same aggregator ColOffset int WSOffset int @@ -127,11 +123,10 @@ func (aggr Aggr) GetTypeCollation(ctx *plancontext.PlanningContext) evalengine.T } // NewGroupBy creates a new group by from the given fields. -func NewGroupBy(inner, simplified sqlparser.Expr, aliasedExpr *sqlparser.AliasedExpr) GroupBy { +func NewGroupBy(inner, simplified sqlparser.Expr) GroupBy { return GroupBy{ Inner: inner, SimplifiedExpr: simplified, - aliasedExpr: aliasedExpr, ColOffset: -1, WSOffset: -1, } @@ -148,8 +143,8 @@ func NewAggr(opCode opcode.AggregateOpcode, f sqlparser.AggrFunc, original *sqlp } } -func (b GroupBy) AsOrderBy() ops.OrderBy { - return ops.OrderBy{ +func (b GroupBy) AsOrderBy() OrderBy { + return OrderBy{ Inner: &sqlparser.Order{ Expr: b.Inner, Direction: sqlparser.AscOrder, @@ -158,26 +153,6 @@ func (b GroupBy) AsOrderBy() ops.OrderBy { } } -func (b GroupBy) AsAliasedExpr() *sqlparser.AliasedExpr { - if b.aliasedExpr != nil { - return b.aliasedExpr - } - col, isColName := b.Inner.(*sqlparser.ColName) - if isColName && b.SimplifiedExpr != b.Inner { - return &sqlparser.AliasedExpr{ - Expr: b.SimplifiedExpr, - As: col.Name, - } - } - if !isColName && b.SimplifiedExpr != b.Inner { - panic("this should not happen - different inner and weighStringExpr and not a column alias") - } - - return &sqlparser.AliasedExpr{ - Expr: b.SimplifiedExpr, - } -} - // GetExpr returns the underlying sqlparser.Expr of our SelectExpr func (s SelectExpr) GetExpr() (sqlparser.Expr, error) { switch sel := s.Col.(type) { @@ -316,7 +291,7 @@ func containsAggr(e sqlparser.SQLNode) (hasAggr bool) { _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { switch node.(type) { case *sqlparser.Offset: - // offsets here indicate that a possible aggregation has already been handled by an input + // offsets here indicate that a possible aggregation has already been handled by an input, // so we don't need to worry about aggregation in the original return false, nil case sqlparser.AggrFunc: @@ -381,7 +356,7 @@ func (qp *QueryProjection) addOrderBy(ctx *plancontext.PlanningContext, orderBy if !es.add(ctx, simpleExpr) { continue } - qp.OrderExprs = append(qp.OrderExprs, ops.OrderBy{ + qp.OrderExprs = append(qp.OrderExprs, OrderBy{ Inner: sqlparser.CloneRefOfOrder(order), SimplifiedExpr: simpleExpr, }) @@ -436,7 +411,7 @@ func (qp *QueryProjection) calculateDistinct(ctx *plancontext.PlanningContext) e func (qp *QueryProjection) addGroupBy(ctx *plancontext.PlanningContext, groupBy sqlparser.GroupBy) error { es := &expressionSet{} for _, group := range groupBy { - selectExprIdx, aliasExpr := qp.FindSelectExprIndexForExpr(ctx, group) + selectExprIdx := qp.FindSelectExprIndexForExpr(ctx, group) simpleExpr, err := qp.GetSimplifiedExpr(ctx, group) if err != nil { return err @@ -450,7 +425,7 @@ func (qp *QueryProjection) addGroupBy(ctx *plancontext.PlanningContext, groupBy continue } - groupBy := NewGroupBy(group, simpleExpr, aliasExpr) + groupBy := NewGroupBy(group, simpleExpr) groupBy.InnerIndex = selectExprIdx qp.groupByExprs = append(qp.groupByExprs, groupBy) @@ -809,7 +784,7 @@ func createAggrFromAggrFunc(fnc sqlparser.AggrFunc, aliasedExpr *sqlparser.Alias // FindSelectExprIndexForExpr returns the index of the given expression in the select expressions, if it is part of it // returns -1 otherwise. -func (qp *QueryProjection) FindSelectExprIndexForExpr(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (*int, *sqlparser.AliasedExpr) { +func (qp *QueryProjection) FindSelectExprIndexForExpr(ctx *plancontext.PlanningContext, expr sqlparser.Expr) *int { colExpr, isCol := expr.(*sqlparser.ColName) for idx, selectExpr := range qp.SelectExprs { @@ -820,14 +795,14 @@ func (qp *QueryProjection) FindSelectExprIndexForExpr(ctx *plancontext.PlanningC if isCol { isAliasExpr := aliasedExpr.As.NotEmpty() if isAliasExpr && colExpr.Name.Equal(aliasedExpr.As) { - return &idx, aliasedExpr + return &idx } } if ctx.SemTable.EqualsExprWithDeps(aliasedExpr.Expr, expr) { - return &idx, aliasedExpr + return &idx } } - return nil, nil + return nil } // OldAlignGroupByAndOrderBy TODO Remove once all of horizon planning is done on the operators @@ -920,7 +895,7 @@ func (qp *QueryProjection) GetColumnCount() int { func (qp *QueryProjection) orderByOverlapWithSelectExpr(ctx *plancontext.PlanningContext) bool { for _, expr := range qp.OrderExprs { - idx, _ := qp.FindSelectExprIndexForExpr(ctx, expr.SimplifiedExpr) + idx := qp.FindSelectExprIndexForExpr(ctx, expr.SimplifiedExpr) if idx != nil { return true } @@ -950,7 +925,7 @@ func (qp *QueryProjection) useGroupingOverDistinct(ctx *plancontext.PlanningCont if found != -1 { continue } - groupBy := NewGroupBy(ae.Expr, sExpr, ae) + groupBy := NewGroupBy(ae.Expr, sExpr) selectExprIdx := idx groupBy.InnerIndex = &selectExprIdx diff --git a/go/vt/vtgate/planbuilder/operators/queryprojection_test.go b/go/vt/vtgate/planbuilder/operators/queryprojection_test.go index 7c92b716d7c..1319ad7f9f6 100644 --- a/go/vt/vtgate/planbuilder/operators/queryprojection_test.go +++ b/go/vt/vtgate/planbuilder/operators/queryprojection_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -33,7 +32,7 @@ func TestQP(t *testing.T) { sql string expErr string - expOrder []ops.OrderBy + expOrder []OrderBy }{ { sql: "select * from user", @@ -46,20 +45,20 @@ func TestQP(t *testing.T) { }, { sql: "select 1, count(1) from user order by 1", - expOrder: []ops.OrderBy{ + expOrder: []OrderBy{ {Inner: &sqlparser.Order{Expr: sqlparser.NewIntLiteral("1")}, SimplifiedExpr: sqlparser.NewIntLiteral("1")}, }, }, { sql: "select id from user order by col, id, 1", - expOrder: []ops.OrderBy{ + expOrder: []OrderBy{ {Inner: &sqlparser.Order{Expr: sqlparser.NewColName("col")}, SimplifiedExpr: sqlparser.NewColName("col")}, {Inner: &sqlparser.Order{Expr: sqlparser.NewColName("id")}, SimplifiedExpr: sqlparser.NewColName("id")}, }, }, { sql: "SELECT CONCAT(last_name,', ',first_name) AS full_name FROM mytable ORDER BY full_name", // alias in order not supported - expOrder: []ops.OrderBy{ + expOrder: []OrderBy{ { Inner: &sqlparser.Order{Expr: sqlparser.NewColName("full_name")}, SimplifiedExpr: &sqlparser.FuncExpr{ diff --git a/go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go b/go/vt/vtgate/planbuilder/operators/rewriters.go similarity index 67% rename from go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go rename to go/vt/vtgate/planbuilder/operators/rewriters.go index 1ecc0cd8e76..6a329860b4b 100644 --- a/go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go +++ b/go/vt/vtgate/planbuilder/operators/rewriters.go @@ -14,27 +14,26 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rewrite +package operators import ( "fmt" "slices" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/semantics" ) type ( // VisitF is the visitor that walks an operator tree VisitF func( - op ops.Operator, // op is the operator being visited + op Operator, // op is the operator being visited lhsTables semantics.TableSet, // lhsTables contains the TableSet for all table on the LHS of our parent isRoot bool, // isRoot will be true for the root of the operator tree - ) (ops.Operator, *ApplyResult, error) + ) (Operator, *ApplyResult) // ShouldVisit is used when we want to control which nodes and ancestors to visit and which to skip - ShouldVisit func(ops.Operator) VisitRule + ShouldVisit func(Operator) VisitRule // ApplyResult tracks modifications to node and expression trees. // Only return SameTree when it is acceptable to return the original @@ -52,7 +51,7 @@ type ( ) var ( - SameTree *ApplyResult = nil + NoRewrite *ApplyResult = nil ) const ( @@ -60,7 +59,7 @@ const ( SkipChildren VisitRule = false ) -func NewTree(message string) *ApplyResult { +func Rewrote(message string) *ApplyResult { if DebugOperatorTree { fmt.Println(">>>>>>>> " + message) } @@ -82,13 +81,13 @@ func (ar *ApplyResult) Changed() bool { } // Visit allows for the walking of the operator tree. If any error is returned, the walk is aborted -func Visit(root ops.Operator, visitor func(ops.Operator) error) error { - _, _, err := breakableTopDown(root, func(op ops.Operator) (ops.Operator, *ApplyResult, VisitRule, error) { +func Visit(root Operator, visitor func(Operator) error) error { + _, _, err := breakableTopDown(root, func(op Operator) (Operator, *ApplyResult, VisitRule, error) { err := visitor(op) if err != nil { - return nil, SameTree, SkipChildren, err + return nil, NoRewrite, SkipChildren, err } - return op, SameTree, VisitChildren, nil + return op, NoRewrite, VisitChildren, nil }) return err } @@ -97,16 +96,13 @@ func Visit(root ops.Operator, visitor func(ops.Operator) error) error { // the given operator tree from the bottom up. Each callback [f] returns a ApplyResult that is aggregated // into a final output indicating whether the operator tree was changed. func BottomUp( - root ops.Operator, - resolveID func(ops.Operator) semantics.TableSet, + root Operator, + resolveID func(Operator) semantics.TableSet, visit VisitF, shouldVisit ShouldVisit, -) (ops.Operator, error) { - op, _, err := bottomUp(root, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) - if err != nil { - return nil, err - } - return op, nil +) Operator { + op, _ := bottomUp(root, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) + return op } var DebugOperatorTree = false @@ -122,26 +118,23 @@ func EnableDebugPrinting() (reset func()) { // FixedPointBottomUp rewrites an operator tree much like BottomUp does, // but does the rewriting repeatedly, until a tree walk is done with no changes to the tree. func FixedPointBottomUp( - root ops.Operator, - resolveID func(ops.Operator) semantics.TableSet, + root Operator, + resolveID func(Operator) semantics.TableSet, visit VisitF, shouldVisit ShouldVisit, -) (op ops.Operator, err error) { +) (op Operator) { var id *ApplyResult op = root // will loop while the rewriting changes anything - for ok := true; ok; ok = id != SameTree { + for ok := true; ok; ok = id != NoRewrite { if DebugOperatorTree { - fmt.Println(ops.ToTree(op)) + fmt.Println(ToTree(op)) } // Continue the top-down rewriting process as long as changes were made during the last traversal - op, id, err = bottomUp(op, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) - if err != nil { - return nil, err - } + op, id = bottomUp(op, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) } - return op, nil + return op } // TopDown rewrites an operator tree from the bottom up. BottomUp applies a transformation function to @@ -155,31 +148,28 @@ func FixedPointBottomUp( // - shouldVisit: The ShouldVisit function to control which nodes and ancestors to visit and which to skip. // // Returns: -// - ops.Operator: The root of the (potentially) transformed operator tree. +// - Operator: The root of the (potentially) transformed operator tree. // - error: An error if any occurred during the traversal. func TopDown( - root ops.Operator, - resolveID func(ops.Operator) semantics.TableSet, + root Operator, + resolveID func(Operator) semantics.TableSet, visit VisitF, shouldVisit ShouldVisit, -) (op ops.Operator, err error) { - op, _, err = topDown(root, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) - if err != nil { - return nil, err - } +) Operator { + op, _ := topDown(root, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) - return op, nil + return op } // Swap takes a tree like a->b->c and swaps `a` and `b`, so we end up with b->a->c -func Swap(parent, child ops.Operator, message string) (ops.Operator, *ApplyResult, error) { +func Swap(parent, child Operator, message string) (Operator, *ApplyResult) { c := child.Inputs() if len(c) != 1 { - return nil, nil, vterrors.VT13001("Swap can only be used on single input operators") + panic(vterrors.VT13001("Swap can only be used on single input operators")) } aInputs := slices.Clone(parent.Inputs()) - var tmp ops.Operator + var tmp Operator for i, in := range aInputs { if in == child { tmp = aInputs[i] @@ -188,30 +178,30 @@ func Swap(parent, child ops.Operator, message string) (ops.Operator, *ApplyResul } } if tmp == nil { - return nil, nil, vterrors.VT13001("Swap can only be used when the second argument is an input to the first") + panic(vterrors.VT13001("Swap can only be used when the second argument is an input to the first")) } - child.SetInputs([]ops.Operator{parent}) + child.SetInputs([]Operator{parent}) parent.SetInputs(aInputs) - return child, NewTree(message), nil + return child, Rewrote(message) } func bottomUp( - root ops.Operator, + root Operator, rootID semantics.TableSet, - resolveID func(ops.Operator) semantics.TableSet, + resolveID func(Operator) semantics.TableSet, rewriter VisitF, shouldVisit ShouldVisit, isRoot bool, -) (ops.Operator, *ApplyResult, error) { +) (Operator, *ApplyResult) { if shouldVisit != nil && !shouldVisit(root) { - return root, SameTree, nil + return root, NoRewrite } oldInputs := root.Inputs() var anythingChanged *ApplyResult - newInputs := make([]ops.Operator, len(oldInputs)) + newInputs := make([]Operator, len(oldInputs)) childID := rootID // noLHSTableSet is used to mark which operators that do not send data from the LHS to the RHS @@ -227,10 +217,7 @@ func bottomUp( if _, isUnion := root.(noLHSTableSet); !isUnion && i > 0 { childID = childID.Merge(resolveID(oldInputs[0])) } - in, changed, err := bottomUp(operator, childID, resolveID, rewriter, shouldVisit, false) - if err != nil { - return nil, nil, err - } + in, changed := bottomUp(operator, childID, resolveID, rewriter, shouldVisit, false) anythingChanged = anythingChanged.Merge(changed) newInputs[i] = in } @@ -239,18 +226,15 @@ func bottomUp( root = root.Clone(newInputs) } - newOp, treeIdentity, err := rewriter(root, rootID, isRoot) - if err != nil { - return nil, nil, err - } + newOp, treeIdentity := rewriter(root, rootID, isRoot) anythingChanged = anythingChanged.Merge(treeIdentity) - return newOp, anythingChanged, nil + return newOp, anythingChanged } func breakableTopDown( - in ops.Operator, - rewriter func(ops.Operator) (ops.Operator, *ApplyResult, VisitRule, error), -) (ops.Operator, *ApplyResult, error) { + in Operator, + rewriter func(Operator) (Operator, *ApplyResult, VisitRule, error), +) (Operator, *ApplyResult, error) { newOp, identity, visit, err := rewriter(in) if err != nil || visit == SkipChildren { return newOp, identity, err @@ -259,17 +243,17 @@ func breakableTopDown( var anythingChanged *ApplyResult oldInputs := newOp.Inputs() - newInputs := make([]ops.Operator, len(oldInputs)) + newInputs := make([]Operator, len(oldInputs)) for i, oldInput := range oldInputs { newInputs[i], identity, err = breakableTopDown(oldInput, rewriter) anythingChanged = anythingChanged.Merge(identity) if err != nil { - return nil, SameTree, err + return nil, NoRewrite, err } } if anythingChanged.Changed() { - return newOp, SameTree, nil + return newOp, NoRewrite, nil } return newOp.Clone(newInputs), anythingChanged, nil @@ -279,20 +263,17 @@ func breakableTopDown( // top down and applies the given transformation function. It also returns the ApplyResult // indicating whether the tree was changed func topDown( - root ops.Operator, + root Operator, rootID semantics.TableSet, - resolveID func(ops.Operator) semantics.TableSet, + resolveID func(Operator) semantics.TableSet, rewriter VisitF, shouldVisit ShouldVisit, isRoot bool, -) (ops.Operator, *ApplyResult, error) { - newOp, anythingChanged, err := rewriter(root, rootID, isRoot) - if err != nil { - return nil, nil, err - } +) (Operator, *ApplyResult) { + newOp, anythingChanged := rewriter(root, rootID, isRoot) if !shouldVisit(root) { - return newOp, anythingChanged, nil + return newOp, anythingChanged } if anythingChanged.Changed() { @@ -300,7 +281,7 @@ func topDown( } oldInputs := root.Inputs() - newInputs := make([]ops.Operator, len(oldInputs)) + newInputs := make([]Operator, len(oldInputs)) childID := rootID type noLHSTableSet interface{ NoLHSTableSet() } @@ -309,17 +290,14 @@ func topDown( if _, isUnion := root.(noLHSTableSet); !isUnion && i > 0 { childID = childID.Merge(resolveID(oldInputs[0])) } - in, changed, err := topDown(operator, childID, resolveID, rewriter, shouldVisit, false) - if err != nil { - return nil, nil, err - } + in, changed := topDown(operator, childID, resolveID, rewriter, shouldVisit, false) anythingChanged = anythingChanged.Merge(changed) newInputs[i] = in } - if anythingChanged != SameTree { - return root.Clone(newInputs), anythingChanged, nil + if anythingChanged != NoRewrite { + return root.Clone(newInputs), anythingChanged } - return root, SameTree, nil + return root, NoRewrite } diff --git a/go/vt/vtgate/planbuilder/operators/route.go b/go/vt/vtgate/planbuilder/operators/route.go index acbc28553dd..d5eee19e5dd 100644 --- a/go/vt/vtgate/planbuilder/operators/route.go +++ b/go/vt/vtgate/planbuilder/operators/route.go @@ -25,7 +25,6 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -33,7 +32,7 @@ import ( type ( Route struct { - Source ops.Operator + Source Operator // Routes that have been merged into this one. MergedWith []*Route @@ -89,7 +88,7 @@ type ( Routing interface { // UpdateRoutingParams allows a Routing to control the routing params that will be used by the engine Route // OpCode is already set, and the default keyspace is set for read queries - UpdateRoutingParams(ctx *plancontext.PlanningContext, rp *engine.RoutingParameters) error + UpdateRoutingParams(ctx *plancontext.PlanningContext, rp *engine.RoutingParameters) // Clone returns a copy of the routing. Since we are trying different variation of merging, // one Routing can be used in different constellations. @@ -103,27 +102,27 @@ type ( // updateRoutingLogic updates the routing to take predicates into account. This can be used for routing // using vindexes or for figuring out which keyspace an information_schema query should be sent to. - updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (Routing, error) + updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Routing } ) // UpdateRoutingLogic first checks if we are dealing with a predicate that -func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r Routing) (Routing, error) { +func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r Routing) Routing { ks := r.Keyspace() if ks == nil { var err error ks, err = ctx.VSchema.AnyKeyspace() if err != nil { - return nil, err + panic(err) } } nr := &NoneRouting{keyspace: ks} if isConstantFalse(expr) { - return nr, nil + return nr } - exit := func() (Routing, error) { + exit := func() Routing { return r.updateRoutingLogic(ctx, expr) } @@ -135,7 +134,7 @@ func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r if cmp.Operator != sqlparser.NullSafeEqualOp && (sqlparser.IsNull(cmp.Left) || sqlparser.IsNull(cmp.Right)) { // any comparison against a literal null, except a null safe equality (<=>), will return null - return nr, nil + return nr } tuples, ok := cmp.Right.(sqlparser.ValTuple) @@ -148,13 +147,13 @@ func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r for _, n := range tuples { // If any of the values in the tuple is a literal null, we know that this comparison will always return NULL if sqlparser.IsNull(n) { - return nr, nil + return nr } } case sqlparser.InOp: // WHERE col IN (null) if len(tuples) == 1 && sqlparser.IsNull(tuples[0]) { - return nr, nil + return nr } } @@ -189,7 +188,7 @@ func (r *Route) Cost() int { } // Clone implements the Operator interface -func (r *Route) Clone(inputs []ops.Operator) ops.Operator { +func (r *Route) Clone(inputs []Operator) Operator { cloneRoute := *r cloneRoute.Source = inputs[0] cloneRoute.Routing = r.Routing.Clone() @@ -197,12 +196,12 @@ func (r *Route) Clone(inputs []ops.Operator) ops.Operator { } // Inputs implements the Operator interface -func (r *Route) Inputs() []ops.Operator { - return []ops.Operator{r.Source} +func (r *Route) Inputs() []Operator { + return []Operator{r.Source} } // SetInputs implements the Operator interface -func (r *Route) SetInputs(ops []ops.Operator) { +func (r *Route) SetInputs(ops []Operator) { r.Source = ops[0] } @@ -357,7 +356,7 @@ func createRoute( ctx *plancontext.PlanningContext, queryTable *QueryTable, solves semantics.TableSet, -) (ops.Operator, error) { +) Operator { if queryTable.IsInfSchema { return createInfSchemaRoute(ctx, queryTable) } @@ -372,13 +371,13 @@ func findVSchemaTableAndCreateRoute( tableName sqlparser.TableName, solves semantics.TableSet, planAlternates bool, -) (*Route, error) { +) *Route { vschemaTable, _, _, _, target, err := ctx.VSchema.FindTableOrVindex(tableName) if target != nil { - return nil, vterrors.VT09017("SELECT with a target destination is not allowed") + panic(vterrors.VT09017("SELECT with a target destination is not allowed")) } if err != nil { - return nil, err + panic(err) } return createRouteFromVSchemaTable( @@ -397,7 +396,7 @@ func createRouteFromVSchemaTable( vschemaTable *vindexes.Table, solves semantics.TableSet, planAlternates bool, -) (*Route, error) { +) *Route { if vschemaTable.Name.String() != queryTable.Table.Name.String() { // we are dealing with a routed table queryTable = queryTable.Clone() @@ -405,7 +404,7 @@ func createRouteFromVSchemaTable( queryTable.Table.Name = vschemaTable.Name astTable, ok := queryTable.Alias.Expr.(sqlparser.TableName) if !ok { - return nil, vterrors.VT13001("a derived table should never be a routed table") + panic(vterrors.VT13001("a derived table should never be a routed table")) } realTableName := sqlparser.NewIdentifierCS(vschemaTable.Name.String()) astTable.Name = realTableName @@ -424,11 +423,7 @@ func createRouteFromVSchemaTable( // We create the appropiate Routing struct here, depending on the type of table we are dealing with. routing := createRoutingForVTable(vschemaTable, solves) for _, predicate := range queryTable.Predicates { - var err error - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, predicate, routing) } plan.Routing = routing @@ -436,24 +431,16 @@ func createRouteFromVSchemaTable( switch routing := routing.(type) { case *ShardedRouting: if routing.isScatter() && len(queryTable.Predicates) > 0 { - var err error // If we have a scatter query, it's worth spending a little extra time seeing if we can't improve it - plan.Routing, err = routing.tryImprove(ctx, queryTable) - if err != nil { - return nil, err - } + plan.Routing = routing.tryImprove(ctx, queryTable) } case *AnyShardRouting: if planAlternates { - alternates, err := createAlternateRoutesFromVSchemaTable(ctx, queryTable, vschemaTable, solves) - if err != nil { - return nil, err - } - routing.Alternates = alternates + routing.Alternates = createAlternateRoutesFromVSchemaTable(ctx, queryTable, vschemaTable, solves) } } - return plan, nil + return plan } func createRoutingForVTable(vschemaTable *vindexes.Table, id semantics.TableSet) Routing { @@ -474,13 +461,13 @@ func createAlternateRoutesFromVSchemaTable( queryTable *QueryTable, vschemaTable *vindexes.Table, solves semantics.TableSet, -) (map[*vindexes.Keyspace]*Route, error) { +) map[*vindexes.Keyspace]*Route { routes := make(map[*vindexes.Keyspace]*Route) switch vschemaTable.Type { case "", vindexes.TypeReference: for ksName, referenceTable := range vschemaTable.ReferencedBy { - route, err := findVSchemaTableAndCreateRoute( + route := findVSchemaTableAndCreateRoute( ctx, queryTable, sqlparser.TableName{ @@ -490,23 +477,17 @@ func createAlternateRoutesFromVSchemaTable( solves, false, /*planAlternates*/ ) - if err != nil { - return nil, err - } routes[referenceTable.Keyspace] = route } if vschemaTable.Source != nil { - route, err := findVSchemaTableAndCreateRoute( + route := findVSchemaTableAndCreateRoute( ctx, queryTable, vschemaTable.Source.TableName, solves, false, /*planAlternates*/ ) - if err != nil { - return nil, err - } keyspace := route.Routing.Keyspace() if keyspace != nil { routes[keyspace] = route @@ -514,15 +495,12 @@ func createAlternateRoutesFromVSchemaTable( } } - return routes, nil + return routes } -func (r *Route) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (r *Route) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { // first we see if the predicate changes how we route - newRouting, err := UpdateRoutingLogic(ctx, expr, r.Routing) - if err != nil { - panic(err) - } + newRouting := UpdateRoutingLogic(ctx, expr, r.Routing) r.Routing = newRouting // we also need to push the predicate down into the query @@ -530,16 +508,13 @@ func (r *Route) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Ex return r } -func createProjection(ctx *plancontext.PlanningContext, src ops.Operator) (*Projection, error) { +func createProjection(ctx *plancontext.PlanningContext, src Operator) *Projection { proj := newAliasedProjection(src) cols := src.GetColumns(ctx) for _, col := range cols { - _, err := proj.addUnexploredExpr(col, col.Expr) - if err != nil { - return nil, err - } + proj.addUnexploredExpr(col, col.Expr) } - return proj, nil + return proj } func (r *Route) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, expr *sqlparser.AliasedExpr) int { @@ -561,10 +536,7 @@ func (r *Route) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, } // If no-one could be found, we probably don't have one yet, so we add one here - src, err := createProjection(ctx, r.Source) - if err != nil { - panic(err) - } + src := createProjection(ctx, r.Source) r.Source = src offsets = src.addColumnsWithoutPushing(ctx, reuse, []bool{gb}, []*sqlparser.AliasedExpr{expr}) @@ -572,7 +544,7 @@ func (r *Route) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, } type selectExpressions interface { - ops.Operator + Operator addColumnWithoutPushing(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, addToGroupBy bool) int addColumnsWithoutPushing(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) []int isDerived() bool @@ -581,7 +553,7 @@ type selectExpressions interface { // addColumnToInput adds a column to an operator without pushing it down. // It will return a bool indicating whether the addition was successful or not, // and an offset to where the column can be found -func addMultipleColumnsToInput(ctx *plancontext.PlanningContext, operator ops.Operator, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) (ops.Operator, bool, []int) { +func addMultipleColumnsToInput(ctx *plancontext.PlanningContext, operator Operator, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) (Operator, bool, []int) { switch op := operator.(type) { case *SubQuery: src, added, offset := addMultipleColumnsToInput(ctx, op.Outer, reuse, addToGroupBy, exprs) @@ -657,7 +629,7 @@ func (r *Route) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Selec return r.Source.GetSelectExprs(ctx) } -func (r *Route) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (r *Route) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return r.Source.GetOrdering(ctx) } @@ -673,7 +645,7 @@ func (r *Route) TablesUsed() []string { return collect() } -func isSpecialOrderBy(o ops.OrderBy) bool { +func isSpecialOrderBy(o OrderBy) bool { if sqlparser.IsNull(o.Inner.Expr) { return true } @@ -681,7 +653,7 @@ func isSpecialOrderBy(o ops.OrderBy) bool { return isFunction && f.Name.Lowered() == "rand" } -func (r *Route) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (r *Route) planOffsets(ctx *plancontext.PlanningContext) Operator { // if operator is returning data from a single shard, we don't need to do anything more if r.IsSingleShard() { return nil diff --git a/go/vt/vtgate/planbuilder/operators/route_planning.go b/go/vt/vtgate/planbuilder/operators/route_planning.go index 5bce334a609..30c187ac955 100644 --- a/go/vt/vtgate/planbuilder/operators/route_planning.go +++ b/go/vt/vtgate/planbuilder/operators/route_planning.go @@ -26,8 +26,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -38,34 +36,34 @@ type ( left, right semantics.TableSet } - opCacheMap map[tableSetPair]ops.Operator + opCacheMap map[tableSetPair]Operator ) -func pushDerived(ctx *plancontext.PlanningContext, op *Horizon) (ops.Operator, *rewrite.ApplyResult, error) { +func pushDerived(ctx *plancontext.PlanningContext, op *Horizon) (Operator, *ApplyResult) { innerRoute, ok := op.Source.(*Route) if !ok { - return op, rewrite.SameTree, nil + return op, NoRewrite } if !(innerRoute.Routing.OpCode() == engine.EqualUnique) && !op.IsMergeable(ctx) { // no need to check anything if we are sure that we will only hit a single shard - return op, rewrite.SameTree, nil + return op, NoRewrite } - return rewrite.Swap(op, op.Source, "push derived under route") + return Swap(op, op.Source, "push derived under route") } -func optimizeJoin(ctx *plancontext.PlanningContext, op *Join) (ops.Operator, *rewrite.ApplyResult, error) { +func optimizeJoin(ctx *plancontext.PlanningContext, op *Join) (Operator, *ApplyResult) { return mergeOrJoin(ctx, op.LHS, op.RHS, sqlparser.SplitAndExpression(nil, op.Predicate), !op.LeftJoin) } -func optimizeQueryGraph(ctx *plancontext.PlanningContext, op *QueryGraph) (result ops.Operator, changed *rewrite.ApplyResult, err error) { +func optimizeQueryGraph(ctx *plancontext.PlanningContext, op *QueryGraph) (result Operator, changed *ApplyResult) { switch { case ctx.PlannerVersion == querypb.ExecuteOptions_Gen4Left2Right: - result, err = leftToRightSolve(ctx, op) + result = leftToRightSolve(ctx, op) default: - result, err = greedySolve(ctx, op) + result = greedySolve(ctx, op) } unresolved := op.UnsolvedPredicates(ctx.SemTable) @@ -75,7 +73,7 @@ func optimizeQueryGraph(ctx *plancontext.PlanningContext, op *QueryGraph) (resul result = newFilter(result, ctx.SemTable.AndExpressions(unresolved...)) } - changed = rewrite.NewTree("solved query graph") + changed = Rewrote("solved query graph") return } @@ -84,18 +82,18 @@ func buildVindexTableForDML( tableInfo semantics.TableInfo, table *QueryTable, dmlType string, -) (*vindexes.Table, Routing, error) { +) (*vindexes.Table, Routing) { vindexTable := tableInfo.GetVindexTable() if vindexTable.Source != nil { sourceTable, _, _, _, _, err := ctx.VSchema.FindTableOrVindex(vindexTable.Source.TableName) if err != nil { - return nil, nil, err + panic(err) } vindexTable = sourceTable } if !vindexTable.Keyspace.Sharded { - return vindexTable, &AnyShardRouting{keyspace: vindexTable.Keyspace}, nil + return vindexTable, &AnyShardRouting{keyspace: vindexTable.Keyspace} } var dest key.Destination @@ -103,23 +101,23 @@ func buildVindexTableForDML( var err error tblName, ok := table.Alias.Expr.(sqlparser.TableName) if !ok { - return nil, nil, vterrors.VT12001("multi shard UPDATE with LIMIT") + panic(vterrors.VT12001("multi shard UPDATE with LIMIT")) } _, _, _, typ, dest, err = ctx.VSchema.FindTableOrVindex(tblName) if err != nil { - return nil, nil, err + panic(err) } if dest == nil { routing := &ShardedRouting{ keyspace: vindexTable.Keyspace, RouteOpCode: engine.Scatter, } - return vindexTable, routing, nil + return vindexTable, routing } if typ != topodatapb.TabletType_PRIMARY { - return nil, nil, vterrors.VT09002(dmlType) + panic(vterrors.VT09002(dmlType)) } // we are dealing with an explicitly targeted DML @@ -127,7 +125,7 @@ func buildVindexTableForDML( keyspace: vindexTable.Keyspace, TargetDestination: dest, } - return vindexTable, routing, nil + return vindexTable, routing } func generateOwnedVindexQuery(tblExpr sqlparser.TableExpr, del *sqlparser.Delete, table *vindexes.Table, ksidCols []sqlparser.IdentifierCI) string { @@ -154,21 +152,14 @@ func getUpdateVindexInformation( vindexTable *vindexes.Table, tableID semantics.TableSet, assignments []SetExpr, -) ([]*VindexPlusPredicates, map[string]*engine.VindexValues, string, []string, error) { +) ([]*VindexPlusPredicates, map[string]*engine.VindexValues, string, []string) { if !vindexTable.Keyspace.Sharded { - return nil, nil, "", nil, nil + return nil, nil, "", nil } - primaryVindex, vindexAndPredicates, err := getVindexInformation(tableID, vindexTable) - if err != nil { - return nil, nil, "", nil, err - } - - changedVindexValues, ownedVindexQuery, subQueriesArgOnChangedVindex, err := buildChangedVindexesValues(ctx, updStmt, vindexTable, primaryVindex.Columns, assignments) - if err != nil { - return nil, nil, "", nil, err - } - return vindexAndPredicates, changedVindexValues, ownedVindexQuery, subQueriesArgOnChangedVindex, nil + primaryVindex, vindexAndPredicates := getVindexInformation(tableID, vindexTable) + changedVindexValues, ownedVindexQuery, subQueriesArgOnChangedVindex := buildChangedVindexesValues(ctx, updStmt, vindexTable, primaryVindex.Columns, assignments) + return vindexAndPredicates, changedVindexValues, ownedVindexQuery, subQueriesArgOnChangedVindex } /* @@ -177,67 +168,51 @@ func getUpdateVindexInformation( and removes the two inputs to this cheapest plan and instead adds the join. As an optimization, it first only considers joining tables that have predicates defined between them */ -func greedySolve(ctx *plancontext.PlanningContext, qg *QueryGraph) (ops.Operator, error) { - routeOps, err := seedOperatorList(ctx, qg) +func greedySolve(ctx *plancontext.PlanningContext, qg *QueryGraph) Operator { + routeOps := seedOperatorList(ctx, qg) planCache := opCacheMap{} - if err != nil { - return nil, err - } - op, err := mergeRoutes(ctx, qg, routeOps, planCache, false) - if err != nil { - return nil, err - } - return op, nil + return mergeRoutes(ctx, qg, routeOps, planCache, false) } -func leftToRightSolve(ctx *plancontext.PlanningContext, qg *QueryGraph) (ops.Operator, error) { - plans, err := seedOperatorList(ctx, qg) - if err != nil { - return nil, err - } +func leftToRightSolve(ctx *plancontext.PlanningContext, qg *QueryGraph) Operator { + plans := seedOperatorList(ctx, qg) - var acc ops.Operator + var acc Operator for _, plan := range plans { if acc == nil { acc = plan continue } joinPredicates := qg.GetPredicates(TableID(acc), TableID(plan)) - acc, _, err = mergeOrJoin(ctx, acc, plan, joinPredicates, true) - if err != nil { - return nil, err - } + acc, _ = mergeOrJoin(ctx, acc, plan, joinPredicates, true) } - return acc, nil + return acc } // seedOperatorList returns a route for each table in the qg -func seedOperatorList(ctx *plancontext.PlanningContext, qg *QueryGraph) ([]ops.Operator, error) { - plans := make([]ops.Operator, len(qg.Tables)) +func seedOperatorList(ctx *plancontext.PlanningContext, qg *QueryGraph) []Operator { + plans := make([]Operator, len(qg.Tables)) // we start by seeding the table with the single routes for i, table := range qg.Tables { solves := ctx.SemTable.TableSetFor(table.Alias) - plan, err := createRoute(ctx, table, solves) - if err != nil { - return nil, err - } + plan := createRoute(ctx, table, solves) if qg.NoDeps != nil { plan = plan.AddPredicate(ctx, qg.NoDeps) } plans[i] = plan } - return plans, nil + return plans } -func createInfSchemaRoute(ctx *plancontext.PlanningContext, table *QueryTable) (ops.Operator, error) { +func createInfSchemaRoute(ctx *plancontext.PlanningContext, table *QueryTable) Operator { ks, err := ctx.VSchema.AnyKeyspace() if err != nil { - return nil, err + panic(err) } - var src ops.Operator = &Table{ + var src Operator = &Table{ QTable: table, VTable: &vindexes.Table{ Name: table.Table.Name, @@ -246,26 +221,20 @@ func createInfSchemaRoute(ctx *plancontext.PlanningContext, table *QueryTable) ( } var routing Routing = &InfoSchemaRouting{} for _, pred := range table.Predicates { - routing, err = UpdateRoutingLogic(ctx, pred, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, pred, routing) } return &Route{ Source: src, Routing: routing, - }, nil + } } -func mergeRoutes(ctx *plancontext.PlanningContext, qg *QueryGraph, physicalOps []ops.Operator, planCache opCacheMap, crossJoinsOK bool) (ops.Operator, error) { +func mergeRoutes(ctx *plancontext.PlanningContext, qg *QueryGraph, physicalOps []Operator, planCache opCacheMap, crossJoinsOK bool) Operator { if len(physicalOps) == 0 { - return nil, nil + return nil } for len(physicalOps) > 1 { - bestTree, lIdx, rIdx, err := findBestJoin(ctx, qg, physicalOps, planCache, crossJoinsOK) - if err != nil { - return nil, err - } + bestTree, lIdx, rIdx := findBestJoin(ctx, qg, physicalOps, planCache, crossJoinsOK) // if we found a plan, we'll replace the two plans that were joined with the join plan created if bestTree != nil { // we remove one plan, and replace the other @@ -279,7 +248,7 @@ func mergeRoutes(ctx *plancontext.PlanningContext, qg *QueryGraph, physicalOps [ physicalOps = append(physicalOps, bestTree) } else { if crossJoinsOK { - return nil, vterrors.VT13001("should not happen: we should be able to merge cross joins") + panic(vterrors.VT13001("should not happen: we should be able to merge cross joins")) } // we will only fail to find a join plan when there are only cross joins left // when that happens, we switch over to allow cross joins as well. @@ -287,20 +256,20 @@ func mergeRoutes(ctx *plancontext.PlanningContext, qg *QueryGraph, physicalOps [ crossJoinsOK = true } } - return physicalOps[0], nil + return physicalOps[0] } -func removeAt(plans []ops.Operator, idx int) []ops.Operator { +func removeAt(plans []Operator, idx int) []Operator { return append(plans[:idx], plans[idx+1:]...) } func findBestJoin( ctx *plancontext.PlanningContext, qg *QueryGraph, - plans []ops.Operator, + plans []Operator, planCache opCacheMap, crossJoinsOK bool, -) (bestPlan ops.Operator, lIdx int, rIdx int, err error) { +) (bestPlan Operator, lIdx int, rIdx int) { for i, lhs := range plans { for j, rhs := range plans { if i == j { @@ -313,10 +282,7 @@ func findBestJoin( // cartesian product, which is almost always a bad idea continue } - plan, err := getJoinFor(ctx, planCache, lhs, rhs, joinPredicates) - if err != nil { - return nil, 0, 0, err - } + plan := getJoinFor(ctx, planCache, lhs, rhs, joinPredicates) if bestPlan == nil || CostOf(plan) < CostOf(bestPlan) { bestPlan = plan // remember which plans we based on, so we can remove them later @@ -325,30 +291,25 @@ func findBestJoin( } } } - return bestPlan, lIdx, rIdx, nil + return bestPlan, lIdx, rIdx } -func getJoinFor(ctx *plancontext.PlanningContext, cm opCacheMap, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr) (ops.Operator, error) { +func getJoinFor(ctx *plancontext.PlanningContext, cm opCacheMap, lhs, rhs Operator, joinPredicates []sqlparser.Expr) Operator { solves := tableSetPair{left: TableID(lhs), right: TableID(rhs)} cachedPlan := cm[solves] if cachedPlan != nil { - return cachedPlan, nil + return cachedPlan } - join, _, err := mergeOrJoin(ctx, lhs, rhs, joinPredicates, true) - if err != nil { - return nil, err - } + join, _ := mergeOrJoin(ctx, lhs, rhs, joinPredicates, true) cm[solves] = join - return join, nil + return join } // requiresSwitchingSides will return true if any of the operators with the root from the given operator tree // is of the type that should not be on the RHS of a join -func requiresSwitchingSides(ctx *plancontext.PlanningContext, op ops.Operator) bool { - required := false - - _ = rewrite.Visit(op, func(current ops.Operator) error { +func requiresSwitchingSides(ctx *plancontext.PlanningContext, op Operator) (required bool) { + _ = Visit(op, func(current Operator) error { horizon, isHorizon := current.(*Horizon) if isHorizon && !horizon.IsMergeable(ctx) { @@ -358,14 +319,13 @@ func requiresSwitchingSides(ctx *plancontext.PlanningContext, op ops.Operator) b return nil }) - - return required + return } -func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr, inner bool) (ops.Operator, *rewrite.ApplyResult, error) { +func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPredicates []sqlparser.Expr, inner bool) (Operator, *ApplyResult) { newPlan := mergeJoinInputs(ctx, lhs, rhs, joinPredicates, newJoinMerge(joinPredicates, inner)) if newPlan != nil { - return newPlan, rewrite.NewTree("merge routes into single operator"), nil + return newPlan, Rewrote("merge routes into single operator") } if len(joinPredicates) > 0 && requiresSwitchingSides(ctx, rhs) { @@ -376,26 +336,20 @@ func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPr join.AddJoinPredicate(ctx, pred) } ctx.SemTable.QuerySignature.HashJoin = true - return join, rewrite.NewTree("use a hash join because we have LIMIT on the LHS"), nil + return join, Rewrote("use a hash join because we have LIMIT on the LHS") } join := NewApplyJoin(Clone(rhs), Clone(lhs), nil, !inner) - newOp, err := pushJoinPredicates(ctx, joinPredicates, join) - if err != nil { - return nil, nil, err - } - return newOp, rewrite.NewTree("logical join to applyJoin, switching side because LIMIT"), nil + newOp := pushJoinPredicates(ctx, joinPredicates, join) + return newOp, Rewrote("logical join to applyJoin, switching side because LIMIT") } join := NewApplyJoin(Clone(lhs), Clone(rhs), nil, !inner) - newOp, err := pushJoinPredicates(ctx, joinPredicates, join) - if err != nil { - return nil, nil, err - } - return newOp, rewrite.NewTree("logical join to applyJoin "), nil + newOp := pushJoinPredicates(ctx, joinPredicates, join) + return newOp, Rewrote("logical join to applyJoin ") } -func operatorsToRoutes(a, b ops.Operator) (*Route, *Route) { +func operatorsToRoutes(a, b Operator) (*Route, *Route) { aRoute, ok := a.(*Route) if !ok { return nil, nil @@ -433,7 +387,7 @@ func canMergeOnFilter(ctx *plancontext.PlanningContext, a, b *Route, predicate s return rVindex == lVindex } -func findColumnVindex(ctx *plancontext.PlanningContext, a ops.Operator, exp sqlparser.Expr) vindexes.SingleColumn { +func findColumnVindex(ctx *plancontext.PlanningContext, a Operator, exp sqlparser.Expr) vindexes.SingleColumn { _, isCol := exp.(*sqlparser.ColName) if !isCol { return nil @@ -458,7 +412,7 @@ func findColumnVindex(ctx *plancontext.PlanningContext, a ops.Operator, exp sqlp deps := ctx.SemTable.RecursiveDeps(expr) - _ = rewrite.Visit(a, func(rel ops.Operator) error { + _ = Visit(a, func(rel Operator) error { to, isTableOp := rel.(tableIDIntroducer) if !isTableOp { return nil @@ -612,14 +566,14 @@ func hexEqual(a, b *sqlparser.Literal) bool { return false } -func pushJoinPredicates(ctx *plancontext.PlanningContext, exprs []sqlparser.Expr, op *ApplyJoin) (ops.Operator, error) { +func pushJoinPredicates(ctx *plancontext.PlanningContext, exprs []sqlparser.Expr, op *ApplyJoin) Operator { if len(exprs) == 0 { - return op, nil + return op } for _, expr := range exprs { AddPredicate(ctx, op, expr, true, newFilter) } - return op, nil + return op } diff --git a/go/vt/vtgate/planbuilder/operators/sequential.go b/go/vt/vtgate/planbuilder/operators/sequential.go index 2b333c6270a..2db376a97bb 100644 --- a/go/vt/vtgate/planbuilder/operators/sequential.go +++ b/go/vt/vtgate/planbuilder/operators/sequential.go @@ -17,35 +17,34 @@ limitations under the License. package operators import ( - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type Sequential struct { - Sources []ops.Operator + Sources []Operator noPredicates noColumns } // Clone implements the Operator interface -func (s *Sequential) Clone(inputs []ops.Operator) ops.Operator { +func (s *Sequential) Clone(inputs []Operator) Operator { newOp := *s newOp.Sources = inputs return &newOp } -func (s *Sequential) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (s *Sequential) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } // Inputs implements the Operator interface -func (s *Sequential) Inputs() []ops.Operator { +func (s *Sequential) Inputs() []Operator { return s.Sources } // SetInputs implements the Operator interface -func (s *Sequential) SetInputs(ops []ops.Operator) { +func (s *Sequential) SetInputs(ops []Operator) { s.Sources = ops } diff --git a/go/vt/vtgate/planbuilder/operators/sharded_routing.go b/go/vt/vtgate/planbuilder/operators/sharded_routing.go index d54db071d46..239ae9ce419 100644 --- a/go/vt/vtgate/planbuilder/operators/sharded_routing.go +++ b/go/vt/vtgate/planbuilder/operators/sharded_routing.go @@ -97,28 +97,23 @@ func (tr *ShardedRouting) isScatter() bool { // This can sometimes push a predicate to the top, so it's not hiding inside an OR // 2. If that is not enough, an additional rewrite pass is performed where we try to // turn ORs into IN, which is easier for the planner to plan -func (tr *ShardedRouting) tryImprove(ctx *plancontext.PlanningContext, queryTable *QueryTable) (Routing, error) { +func (tr *ShardedRouting) tryImprove(ctx *plancontext.PlanningContext, queryTable *QueryTable) Routing { oldPredicates := queryTable.Predicates queryTable.Predicates = nil tr.SeenPredicates = nil var routing Routing = tr - var err error for _, pred := range oldPredicates { rewritten := sqlparser.RewritePredicate(pred) predicates := sqlparser.SplitAndExpression(nil, rewritten.(sqlparser.Expr)) for _, predicate := range predicates { queryTable.Predicates = append(queryTable.Predicates, predicate) - - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, predicate, routing) } } // If we have something other than a sharded routing with scatter, we are done if sr, ok := routing.(*ShardedRouting); !ok || !sr.isScatter() { - return routing, nil + return routing } // if we _still_ haven't found a better route, we can run this additional rewrite on any ORs we have @@ -128,23 +123,19 @@ func (tr *ShardedRouting) tryImprove(ctx *plancontext.PlanningContext, queryTabl continue } for _, predicate := range sqlparser.ExtractINFromOR(or) { - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, predicate, routing) } } - return routing, nil + return routing } -func (tr *ShardedRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (tr *ShardedRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.Keyspace = tr.keyspace if tr.Selected != nil { rp.Vindex = tr.Selected.FoundVindex rp.Values = tr.Selected.Values } - return nil } func (tr *ShardedRouting) Clone() Routing { @@ -166,17 +157,13 @@ func (tr *ShardedRouting) Clone() Routing { } } -func (tr *ShardedRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (Routing, error) { +func (tr *ShardedRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Routing { tr.SeenPredicates = append(tr.SeenPredicates, expr) - newRouting, newVindexFound, err := tr.searchForNewVindexes(ctx, expr) - if err != nil { - return nil, err - } - + newRouting, newVindexFound := tr.searchForNewVindexes(ctx, expr) if newRouting != nil { // we found something that we can route with something other than ShardedRouting - return newRouting, nil + return newRouting } // if we didn't open up any new vindex Options, no need to enter here @@ -184,10 +171,10 @@ func (tr *ShardedRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, e tr.PickBestAvailableVindex() } - return tr, nil + return tr } -func (tr *ShardedRouting) resetRoutingLogic(ctx *plancontext.PlanningContext) (Routing, error) { +func (tr *ShardedRouting) resetRoutingLogic(ctx *plancontext.PlanningContext) Routing { tr.RouteOpCode = engine.Scatter tr.Selected = nil for i, vp := range tr.VindexPreds { @@ -196,16 +183,12 @@ func (tr *ShardedRouting) resetRoutingLogic(ctx *plancontext.PlanningContext) (R var routing Routing = tr for _, predicate := range tr.SeenPredicates { - var err error - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, predicate, routing) } - return routing, nil + return routing } -func (tr *ShardedRouting) searchForNewVindexes(ctx *plancontext.PlanningContext, predicate sqlparser.Expr) (Routing, bool, error) { +func (tr *ShardedRouting) searchForNewVindexes(ctx *plancontext.PlanningContext, predicate sqlparser.Expr) (Routing, bool) { newVindexFound := false switch node := predicate.(type) { case *sqlparser.ComparisonExpr: @@ -216,23 +199,23 @@ func (tr *ShardedRouting) searchForNewVindexes(ctx *plancontext.PlanningContext, newVindexFound = newVindexFound || found } - return nil, newVindexFound, nil + return nil, newVindexFound } -func (tr *ShardedRouting) planComparison(ctx *plancontext.PlanningContext, cmp *sqlparser.ComparisonExpr) (routing Routing, foundNew bool, err error) { +func (tr *ShardedRouting) planComparison(ctx *plancontext.PlanningContext, cmp *sqlparser.ComparisonExpr) (routing Routing, foundNew bool) { switch cmp.Operator { case sqlparser.EqualOp: found := tr.planEqualOp(ctx, cmp) - return nil, found, nil + return nil, found case sqlparser.InOp: found := tr.planInOp(ctx, cmp) - return nil, found, nil + return nil, found case sqlparser.LikeOp: found := tr.planLikeOp(ctx, cmp) - return nil, found, nil + return nil, found } - return nil, false, nil + return nil, false } func (tr *ShardedRouting) planIsExpr(ctx *plancontext.PlanningContext, node *sqlparser.IsExpr) bool { diff --git a/go/vt/vtgate/planbuilder/operators/subquery.go b/go/vt/vtgate/planbuilder/operators/subquery.go index a401b29074d..279669ade38 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery.go +++ b/go/vt/vtgate/planbuilder/operators/subquery.go @@ -25,7 +25,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -34,8 +33,8 @@ import ( // outer query through a join. type SubQuery struct { // Fields filled in at the time of construction: - Outer ops.Operator // Outer query operator. - Subquery ops.Operator // Subquery operator. + Outer Operator // Outer query operator. + Subquery Operator // Subquery operator. FilterType opcode.PulloutOpcode // Type of subquery filter. Original sqlparser.Expr // This is the expression we should use if we can merge the inner to the outer originalSubquery *sqlparser.Subquery // Subquery representation, e.g., (SELECT foo from user LIMIT 1). @@ -54,7 +53,7 @@ type SubQuery struct { IsProjection bool } -func (sq *SubQuery) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { +func (sq *SubQuery) planOffsets(ctx *plancontext.PlanningContext) Operator { sq.Vars = make(map[string]int) columns, err := sq.GetJoinColumns(ctx, sq.Outer) if err != nil { @@ -69,24 +68,24 @@ func (sq *SubQuery) planOffsets(ctx *plancontext.PlanningContext) ops.Operator { return nil } -func (sq *SubQuery) OuterExpressionsNeeded(ctx *plancontext.PlanningContext, outer ops.Operator) (result []*sqlparser.ColName, err error) { +func (sq *SubQuery) OuterExpressionsNeeded(ctx *plancontext.PlanningContext, outer Operator) (result []*sqlparser.ColName) { joinColumns, err := sq.GetJoinColumns(ctx, outer) if err != nil { - return nil, err + return nil } for _, jc := range joinColumns { for _, lhsExpr := range jc.LHSExprs { col, ok := lhsExpr.Expr.(*sqlparser.ColName) if !ok { - return nil, vterrors.VT13001("joins can only compare columns: %s", sqlparser.String(lhsExpr.Expr)) + panic(vterrors.VT13001("joins can only compare columns: %s", sqlparser.String(lhsExpr.Expr))) } result = append(result, col) } } - return result, nil + return result } -func (sq *SubQuery) GetJoinColumns(ctx *plancontext.PlanningContext, outer ops.Operator) ([]JoinColumn, error) { +func (sq *SubQuery) GetJoinColumns(ctx *plancontext.PlanningContext, outer Operator) ([]JoinColumn, error) { if outer == nil { return nil, vterrors.VT13001("outer operator cannot be nil") } @@ -98,7 +97,7 @@ func (sq *SubQuery) GetJoinColumns(ctx *plancontext.PlanningContext, outer ops.O } sq.outerID = outerID mapper := func(in sqlparser.Expr) (JoinColumn, error) { - return breakExpressionInLHSandRHSForApplyJoin(ctx, in, outerID) + return breakExpressionInLHSandRHSForApplyJoin(ctx, in, outerID), nil } joinPredicates, err := slice.MapWithError(sq.Predicates, mapper) if err != nil { @@ -109,7 +108,7 @@ func (sq *SubQuery) GetJoinColumns(ctx *plancontext.PlanningContext, outer ops.O } // Clone implements the Operator interface -func (sq *SubQuery) Clone(inputs []ops.Operator) ops.Operator { +func (sq *SubQuery) Clone(inputs []Operator) Operator { klone := *sq switch len(inputs) { case 1: @@ -126,21 +125,21 @@ func (sq *SubQuery) Clone(inputs []ops.Operator) ops.Operator { return &klone } -func (sq *SubQuery) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (sq *SubQuery) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return sq.Outer.GetOrdering(ctx) } // Inputs implements the Operator interface -func (sq *SubQuery) Inputs() []ops.Operator { +func (sq *SubQuery) Inputs() []Operator { if sq.Outer == nil { - return []ops.Operator{sq.Subquery} + return []Operator{sq.Subquery} } - return []ops.Operator{sq.Outer, sq.Subquery} + return []Operator{sq.Outer, sq.Subquery} } // SetInputs implements the Operator interface -func (sq *SubQuery) SetInputs(inputs []ops.Operator) { +func (sq *SubQuery) SetInputs(inputs []Operator) { switch len(inputs) { case 1: sq.Subquery = inputs[0] @@ -168,7 +167,7 @@ func (sq *SubQuery) ShortDescription() string { return fmt.Sprintf("%s %v%s", typ, sq.FilterType.String(), pred) } -func (sq *SubQuery) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (sq *SubQuery) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { sq.Outer = sq.Outer.AddPredicate(ctx, expr) return sq } @@ -197,7 +196,7 @@ func (sq *SubQuery) GetMergePredicates() []sqlparser.Expr { return sq.Predicates } -func (sq *SubQuery) settle(ctx *plancontext.PlanningContext, outer ops.Operator) (ops.Operator, error) { +func (sq *SubQuery) settle(ctx *plancontext.PlanningContext, outer Operator) (Operator, error) { if !sq.TopLevel { return nil, subqueryNotAtTopErr } @@ -215,7 +214,7 @@ func (sq *SubQuery) settle(ctx *plancontext.PlanningContext, outer ops.Operator) var correlatedSubqueryErr = vterrors.VT12001("correlated subquery is only supported for EXISTS") var subqueryNotAtTopErr = vterrors.VT12001("unmergable subquery can not be inside complex expression") -func (sq *SubQuery) settleFilter(ctx *plancontext.PlanningContext, outer ops.Operator) (ops.Operator, error) { +func (sq *SubQuery) settleFilter(ctx *plancontext.PlanningContext, outer Operator) (Operator, error) { if len(sq.Predicates) > 0 { if sq.FilterType != opcode.PulloutExists { return nil, correlatedSubqueryErr @@ -282,22 +281,8 @@ func (sq *SubQuery) isMerged(ctx *plancontext.PlanningContext) bool { } // mapExpr rewrites all expressions according to the provided function -func (sq *SubQuery) mapExpr(f func(expr sqlparser.Expr) (sqlparser.Expr, error)) error { - newPredicates, err := slice.MapWithError(sq.Predicates, f) - if err != nil { - return err - } - sq.Predicates = newPredicates - - sq.Original, err = f(sq.Original) - if err != nil { - return err - } - - originalSubquery, err := f(sq.originalSubquery) - if err != nil { - return err - } - sq.originalSubquery = originalSubquery.(*sqlparser.Subquery) - return nil +func (sq *SubQuery) mapExpr(f func(expr sqlparser.Expr) sqlparser.Expr) { + sq.Predicates = slice.Map(sq.Predicates, f) + sq.Original = f(sq.Original) + sq.originalSubquery = f(sq.originalSubquery).(*sqlparser.Subquery) } diff --git a/go/vt/vtgate/planbuilder/operators/subquery_builder.go b/go/vt/vtgate/planbuilder/operators/subquery_builder.go index 1d1d12bbfe3..b2de19408b4 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery_builder.go +++ b/go/vt/vtgate/planbuilder/operators/subquery_builder.go @@ -19,7 +19,6 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -32,7 +31,7 @@ type SubQueryBuilder struct { outerID semantics.TableSet } -func (sqb *SubQueryBuilder) getRootOperator(op ops.Operator, decorator func(operator ops.Operator) ops.Operator) ops.Operator { +func (sqb *SubQueryBuilder) getRootOperator(op Operator, decorator func(operator Operator) Operator) Operator { if len(sqb.Inner) == 0 { return op } @@ -53,19 +52,16 @@ func (sqb *SubQueryBuilder) handleSubquery( ctx *plancontext.PlanningContext, expr sqlparser.Expr, outerID semantics.TableSet, -) (*SubQuery, error) { +) *SubQuery { subq, parentExpr := getSubQuery(expr) if subq == nil { - return nil, nil + return nil } argName := ctx.GetReservedArgumentFor(subq) - sqInner, err := createSubqueryOp(ctx, parentExpr, expr, subq, outerID, argName) - if err != nil { - return nil, err - } + sqInner := createSubqueryOp(ctx, parentExpr, expr, subq, outerID, argName) sqb.Inner = append(sqb.Inner, sqInner) - return sqInner, nil + return sqInner } func getSubQuery(expr sqlparser.Expr) (subqueryExprExists *sqlparser.Subquery, parentExpr sqlparser.Expr) { @@ -99,7 +95,7 @@ func createSubqueryOp( subq *sqlparser.Subquery, outerID semantics.TableSet, name string, -) (*SubQuery, error) { +) *SubQuery { switch parent := parent.(type) { case *sqlparser.NotExpr: switch parent.Expr.(type) { @@ -120,20 +116,14 @@ func createSubqueryOp( // and extracts subqueries into operators func (sqb *SubQueryBuilder) inspectStatement(ctx *plancontext.PlanningContext, stmt sqlparser.SelectStatement, -) (sqlparser.Exprs, []JoinColumn, error) { +) (sqlparser.Exprs, []JoinColumn) { switch stmt := stmt.(type) { case *sqlparser.Select: return sqb.inspectSelect(ctx, stmt) case *sqlparser.Union: - exprs1, cols1, err := sqb.inspectStatement(ctx, stmt.Left) - if err != nil { - return nil, nil, err - } - exprs2, cols2, err := sqb.inspectStatement(ctx, stmt.Right) - if err != nil { - return nil, nil, err - } - return append(exprs1, exprs2...), append(cols1, cols2...), nil + exprs1, cols1 := sqb.inspectStatement(ctx, stmt.Left) + exprs2, cols2 := sqb.inspectStatement(ctx, stmt.Right) + return append(exprs1, exprs2...), append(cols1, cols2...) } panic("unknown type") } @@ -144,22 +134,12 @@ func (sqb *SubQueryBuilder) inspectStatement(ctx *plancontext.PlanningContext, func (sqb *SubQueryBuilder) inspectSelect( ctx *plancontext.PlanningContext, sel *sqlparser.Select, -) (sqlparser.Exprs, []JoinColumn, error) { +) (sqlparser.Exprs, []JoinColumn) { // first we need to go through all the places where one can find predicates // and search for subqueries - newWhere, wherePreds, whereJoinCols, err := sqb.inspectWhere(ctx, sel.Where) - if err != nil { - return nil, nil, err - } - newHaving, havingPreds, havingJoinCols, err := sqb.inspectWhere(ctx, sel.Having) - if err != nil { - return nil, nil, err - } - - newFrom, onPreds, onJoinCols, err := sqb.inspectOnExpr(ctx, sel.From) - if err != nil { - return nil, nil, err - } + newWhere, wherePreds, whereJoinCols := sqb.inspectWhere(ctx, sel.Where) + newHaving, havingPreds, havingJoinCols := sqb.inspectWhere(ctx, sel.Having) + newFrom, onPreds, onJoinCols := sqb.inspectOnExpr(ctx, sel.From) // then we use the updated AST structs to build the operator // these AST elements have any subqueries replace by arguments @@ -168,8 +148,7 @@ func (sqb *SubQueryBuilder) inspectSelect( sel.From = newFrom return append(append(wherePreds, havingPreds...), onPreds...), - append(append(whereJoinCols, havingJoinCols...), onJoinCols...), - nil + append(append(whereJoinCols, havingJoinCols...), onJoinCols...) } func createSubquery( @@ -181,7 +160,7 @@ func createSubquery( argName string, filterType opcode.PulloutOpcode, isProjection bool, -) (*SubQuery, error) { +) *SubQuery { topLevel := ctx.SemTable.EqualsExpr(original, parent) original = cloneASTAndSemState(ctx, original) originalSq := cloneASTAndSemState(ctx, subq) @@ -189,20 +168,13 @@ func createSubquery( totalID := subqID.Merge(outerID) sqc := &SubQueryBuilder{totalID: totalID, subqID: subqID, outerID: outerID} - predicates, joinCols, err := sqc.inspectStatement(ctx, subq.Select) - if err != nil { - return nil, err - } - + predicates, joinCols := sqc.inspectStatement(ctx, subq.Select) stmt := rewriteRemainingColumns(ctx, subq.Select, subqID) // TODO: this should not be needed. We are using CopyOnRewrite above, but somehow this is not getting copied ctx.SemTable.CopySemanticInfo(subq.Select, stmt) - opInner, err := translateQueryToOp(ctx, stmt) - if err != nil { - return nil, err - } + opInner := translateQueryToOp(ctx, stmt) opInner = sqc.getRootOperator(opInner, nil) return &SubQuery{ @@ -215,15 +187,15 @@ func createSubquery( IsProjection: isProjection, TopLevel: topLevel, JoinColumns: joinCols, - }, nil + } } func (sqb *SubQueryBuilder) inspectWhere( ctx *plancontext.PlanningContext, in *sqlparser.Where, -) (*sqlparser.Where, sqlparser.Exprs, []JoinColumn, error) { +) (*sqlparser.Where, sqlparser.Exprs, []JoinColumn) { if in == nil { - return nil, nil, nil, nil + return nil, nil, nil } jpc := &joinPredicateCollector{ totalID: sqb.totalID, @@ -232,16 +204,11 @@ func (sqb *SubQueryBuilder) inspectWhere( } for _, predicate := range sqlparser.SplitAndExpression(nil, in.Expr) { sqlparser.RemoveKeyspaceFromColName(predicate) - subq, err := sqb.handleSubquery(ctx, predicate, sqb.totalID) - if err != nil { - return nil, nil, nil, err - } + subq := sqb.handleSubquery(ctx, predicate, sqb.totalID) if subq != nil { continue } - if err = jpc.inspectPredicate(ctx, predicate); err != nil { - return nil, nil, nil, err - } + jpc.inspectPredicate(ctx, predicate) } if len(jpc.remainingPredicates) == 0 { @@ -250,13 +217,13 @@ func (sqb *SubQueryBuilder) inspectWhere( in.Expr = sqlparser.AndExpressions(jpc.remainingPredicates...) } - return in, jpc.predicates, jpc.joinColumns, nil + return in, jpc.predicates, jpc.joinColumns } func (sqb *SubQueryBuilder) inspectOnExpr( ctx *plancontext.PlanningContext, from []sqlparser.TableExpr, -) (newFrom []sqlparser.TableExpr, onPreds sqlparser.Exprs, onJoinCols []JoinColumn, err error) { +) (newFrom []sqlparser.TableExpr, onPreds sqlparser.Exprs, onJoinCols []JoinColumn) { for _, tbl := range from { tbl := sqlparser.CopyOnRewrite(tbl, dontEnterSubqueries, func(cursor *sqlparser.CopyOnWriteCursor) { cond, ok := cursor.Node().(*sqlparser.JoinCondition) @@ -271,20 +238,11 @@ func (sqb *SubQueryBuilder) inspectOnExpr( } for _, pred := range sqlparser.SplitAndExpression(nil, cond.On) { - subq, innerErr := sqb.handleSubquery(ctx, pred, sqb.totalID) - if err != nil { - err = innerErr - cursor.StopTreeWalk() - return - } + subq := sqb.handleSubquery(ctx, pred, sqb.totalID) if subq != nil { continue } - if err = jpc.inspectPredicate(ctx, pred); err != nil { - err = innerErr - cursor.StopTreeWalk() - return - } + jpc.inspectPredicate(ctx, pred) } if len(jpc.remainingPredicates) == 0 { cond.On = nil @@ -294,9 +252,6 @@ func (sqb *SubQueryBuilder) inspectOnExpr( onPreds = append(onPreds, jpc.predicates...) onJoinCols = append(onJoinCols, jpc.joinColumns...) }, ctx.SemTable.CopySemanticInfo) - if err != nil { - return - } newFrom = append(newFrom, tbl.(sqlparser.TableExpr)) } return @@ -309,7 +264,7 @@ func createComparisonSubQuery( subFromOutside *sqlparser.Subquery, outerID semantics.TableSet, name string, -) (*SubQuery, error) { +) *SubQuery { subq, outside := semantics.GetSubqueryAndOtherSide(parent) if outside == nil || subq != subFromOutside { panic("uh oh") @@ -323,10 +278,7 @@ func createComparisonSubQuery( filterType = opcode.PulloutNotIn } - subquery, err := createSubquery(ctx, original, subq, outerID, parent, name, filterType, false) - if err != nil { - return nil, err - } + subquery := createSubquery(ctx, original, subq, outerID, parent, name, filterType, false) // if we are comparing with a column from the inner subquery, // we add this extra predicate to check if the two sides are mergable or not @@ -338,7 +290,7 @@ func createComparisonSubQuery( } } - return subquery, err + return subquery } func (sqb *SubQueryBuilder) pullOutValueSubqueries( @@ -346,25 +298,22 @@ func (sqb *SubQueryBuilder) pullOutValueSubqueries( expr sqlparser.Expr, outerID semantics.TableSet, isDML bool, -) (sqlparser.Expr, []*SubQuery, error) { +) (sqlparser.Expr, []*SubQuery) { original := sqlparser.CloneExpr(expr) sqe := extractSubQueries(ctx, expr, isDML) if sqe == nil { - return nil, nil, nil + return nil, nil } var newSubqs []*SubQuery for idx, subq := range sqe.subq { - sqInner, err := createSubquery(ctx, original, subq, outerID, original, sqe.cols[idx], sqe.pullOutCode[idx], true) - if err != nil { - return nil, nil, err - } + sqInner := createSubquery(ctx, original, subq, outerID, original, sqe.cols[idx], sqe.pullOutCode[idx], true) newSubqs = append(newSubqs, sqInner) } sqb.Inner = append(sqb.Inner, newSubqs...) - return sqe.new, newSubqs, nil + return sqe.new, newSubqs } type subqueryExtraction struct { diff --git a/go/vt/vtgate/planbuilder/operators/subquery_container.go b/go/vt/vtgate/planbuilder/operators/subquery_container.go index ab8d1104623..e4feeab49d8 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery_container.go +++ b/go/vt/vtgate/planbuilder/operators/subquery_container.go @@ -18,7 +18,6 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) @@ -27,15 +26,15 @@ type ( // The inner subqueries can be executed in any order, so we store them like this so we can see more opportunities // for merging SubQueryContainer struct { - Outer ops.Operator + Outer Operator Inner []*SubQuery } ) -var _ ops.Operator = (*SubQueryContainer)(nil) +var _ Operator = (*SubQueryContainer)(nil) // Clone implements the Operator interface -func (sqc *SubQueryContainer) Clone(inputs []ops.Operator) ops.Operator { +func (sqc *SubQueryContainer) Clone(inputs []Operator) Operator { result := &SubQueryContainer{ Outer: inputs[0], } @@ -49,13 +48,13 @@ func (sqc *SubQueryContainer) Clone(inputs []ops.Operator) ops.Operator { return result } -func (sqc *SubQueryContainer) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (sqc *SubQueryContainer) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return sqc.Outer.GetOrdering(ctx) } // Inputs implements the Operator interface -func (sqc *SubQueryContainer) Inputs() []ops.Operator { - operators := []ops.Operator{sqc.Outer} +func (sqc *SubQueryContainer) Inputs() []Operator { + operators := []Operator{sqc.Outer} for _, inner := range sqc.Inner { operators = append(operators, inner) } @@ -63,7 +62,7 @@ func (sqc *SubQueryContainer) Inputs() []ops.Operator { } // SetInputs implements the Operator interface -func (sqc *SubQueryContainer) SetInputs(ops []ops.Operator) { +func (sqc *SubQueryContainer) SetInputs(ops []Operator) { sqc.Outer = ops[0] } @@ -71,7 +70,7 @@ func (sqc *SubQueryContainer) ShortDescription() string { return "" } -func (sqc *SubQueryContainer) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (sqc *SubQueryContainer) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { sqc.Outer = sqc.Outer.AddPredicate(ctx, expr) return sqc } diff --git a/go/vt/vtgate/planbuilder/operators/subquery_planning.go b/go/vt/vtgate/planbuilder/operators/subquery_planning.go index 74761aef5c5..ed0c6bde941 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery_planning.go +++ b/go/vt/vtgate/planbuilder/operators/subquery_planning.go @@ -26,13 +26,11 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) -func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStatement, op ops.Operator) bool { +func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStatement, op Operator) bool { validVindex := func(expr sqlparser.Expr) bool { sc := findColumnVindex(ctx, op, expr) return sc != nil && sc.IsUnique() @@ -74,24 +72,24 @@ func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStateme } } -func settleSubqueries(ctx *plancontext.PlanningContext, op ops.Operator) ops.Operator { - visit := func(op ops.Operator, lhsTables semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func settleSubqueries(ctx *plancontext.PlanningContext, op Operator) Operator { + visit := func(op Operator, lhsTables semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { switch op := op.(type) { case *SubQueryContainer: outer := op.Outer for _, subq := range op.Inner { newOuter, err := subq.settle(ctx, outer) if err != nil { - return nil, nil, err + panic(err) } subq.Outer = newOuter outer = subq } - return outer, rewrite.NewTree("extracted subqueries from subquery container"), nil + return outer, Rewrote("extracted subqueries from subquery container") case *Projection: ap, err := op.GetAliasedProjections() if err != nil { - return nil, nil, err + panic(err) } for _, pe := range ap { @@ -102,13 +100,10 @@ func settleSubqueries(ctx *plancontext.PlanningContext, op ops.Operator) ops.Ope mergeSubqueryExpr(ctx, setExpr.Expr) } } - return op, rewrite.SameTree, nil + return op, NoRewrite } - op, err := rewrite.BottomUp(op, TableID, visit, nil) - if err != nil { - panic(err) - } - return op + + return BottomUp(op, TableID, visit, nil) } func mergeSubqueryExpr(ctx *plancontext.PlanningContext, pe *ProjExpr) { @@ -194,7 +189,7 @@ func tryPushSubQueryInJoin( ctx *plancontext.PlanningContext, inner *SubQuery, outer *ApplyJoin, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { lhs := TableID(outer.LHS) rhs := TableID(outer.RHS) joinID := TableID(outer) @@ -213,12 +208,9 @@ func tryPushSubQueryInJoin( // in general, we don't want to push down uncorrelated subqueries into the RHS of a join, // since this side is executed once per row from the LHS, so we would unnecessarily execute // the subquery multiple times. The exception is if we can merge the subquery with the RHS of the join. - merged, result, err := tryMergeWithRHS(ctx, inner, outer) - if err != nil { - return nil, nil, err - } + merged, result := tryMergeWithRHS(ctx, inner, outer) if merged != nil { - return merged, result, nil + return merged, result } _, ok := inner.Subquery.(*Projection) @@ -227,41 +219,37 @@ func tryPushSubQueryInJoin( // Projections are easy to push down, so if this is still at the top, // it means we have not tried pushing it yet. // Let's give it a chance to push down before we push it on the left - return nil, rewrite.SameTree, nil + return nil, NoRewrite } if deps.IsSolvedBy(lhs) { // we can safely push down the subquery on the LHS outer.LHS = addSubQuery(outer.LHS, inner) - return outer, rewrite.NewTree("push subquery into LHS of join"), nil + return outer, Rewrote("push subquery into LHS of join") } if outer.LeftJoin || len(inner.Predicates) == 0 { // we can't push any filters on the RHS of an outer join, and // we don't want to push uncorrelated subqueries to the RHS of a join - return nil, rewrite.SameTree, nil + return nil, NoRewrite } if deps.IsSolvedBy(rhs) { // we can push down the subquery filter on RHS of the join outer.RHS = addSubQuery(outer.RHS, inner) - return outer, rewrite.NewTree("push subquery into RHS of join"), nil + return outer, Rewrote("push subquery into RHS of join") } if deps.IsSolvedBy(joinID) { // we can rewrite the predicate to not use the values from the lhs, // and instead use arguments for these dependencies. // this way we can push the subquery into the RHS of this join - err := inner.mapExpr(extractLHSExpr(ctx, outer, lhs)) - if err != nil { - return nil, nil, err - } - + inner.mapExpr(extractLHSExpr(ctx, outer, lhs)) outer.RHS = addSubQuery(outer.RHS, inner) - return outer, rewrite.NewTree("push subquery into RHS of join rewriting predicates"), nil + return outer, Rewrote("push subquery into RHS of join rewriting predicates") } - return nil, rewrite.SameTree, nil + return nil, NoRewrite } // extractLHSExpr will return a function that extracts any ColName coming from the LHS table, @@ -270,37 +258,34 @@ func extractLHSExpr( ctx *plancontext.PlanningContext, outer *ApplyJoin, lhs semantics.TableSet, -) func(expr sqlparser.Expr) (sqlparser.Expr, error) { - return func(expr sqlparser.Expr) (sqlparser.Expr, error) { - col, err := breakExpressionInLHSandRHSForApplyJoin(ctx, expr, lhs) - if err != nil { - return nil, err - } +) func(expr sqlparser.Expr) sqlparser.Expr { + return func(expr sqlparser.Expr) sqlparser.Expr { + col := breakExpressionInLHSandRHSForApplyJoin(ctx, expr, lhs) if col.IsPureLeft() { - return nil, vterrors.VT13001("did not expect to find any predicates that do not need data from the inner here") + panic(vterrors.VT13001("did not expect to find any predicates that do not need data from the inner here")) } for _, bve := range col.LHSExprs { if !outer.isColNameMovedFromL2R(bve.Name) { outer.ExtraLHSVars = append(outer.ExtraLHSVars, bve) } } - return col.RHSExpr, nil + return col.RHSExpr } } // tryMergeWithRHS attempts to merge a subquery with the RHS of a join -func tryMergeWithRHS(ctx *plancontext.PlanningContext, inner *SubQuery, outer *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { +func tryMergeWithRHS(ctx *plancontext.PlanningContext, inner *SubQuery, outer *ApplyJoin) (Operator, *ApplyResult) { if outer.LeftJoin { - return nil, nil, nil + return nil, nil } // both sides need to be routes outerRoute, ok := outer.RHS.(*Route) if !ok { - return nil, nil, nil + return nil, nil } innerRoute, ok := inner.Subquery.(*Route) if !ok { - return nil, nil, nil + return nil, nil } newExpr := rewriteOriginalPushedToRHS(ctx, inner.Original, outer) @@ -311,18 +296,18 @@ func tryMergeWithRHS(ctx *plancontext.PlanningContext, inner *SubQuery, outer *A } newOp := mergeSubqueryInputs(ctx, innerRoute, outerRoute, inner.GetMergePredicates(), sqm) if newOp == nil { - return nil, nil, nil + return nil, nil } outer.RHS = newOp ctx.MergedSubqueries = append(ctx.MergedSubqueries, inner.originalSubquery) - return outer, rewrite.NewTree("merged subquery with rhs of join"), nil + return outer, Rewrote("merged subquery with rhs of join") } // addSubQuery adds a SubQuery to the given operator. If the operator is a SubQueryContainer, // it will add the SubQuery to the SubQueryContainer. If the operator is something else, it will // create a new SubQueryContainer with the given operator as the outer and the SubQuery as the inner. -func addSubQuery(in ops.Operator, inner *SubQuery) ops.Operator { +func addSubQuery(in Operator, inner *SubQuery) Operator { sql, ok := in.(*SubQueryContainer) if !ok { return &SubQueryContainer{ @@ -364,10 +349,10 @@ func rewriteOriginalPushedToRHS(ctx *plancontext.PlanningContext, expression sql return result.(sqlparser.Expr) } -func pushProjectionToOuterContainer(ctx *plancontext.PlanningContext, p *Projection, src *SubQueryContainer) (ops.Operator, *rewrite.ApplyResult, error) { +func pushProjectionToOuterContainer(ctx *plancontext.PlanningContext, p *Projection, src *SubQueryContainer) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if err != nil { - return p, rewrite.SameTree, nil + return p, NoRewrite } outer := TableID(src.Outer) @@ -378,7 +363,7 @@ func pushProjectionToOuterContainer(ctx *plancontext.PlanningContext, p *Project } if !ctx.SemTable.RecursiveDeps(pe.EvalExpr).IsSolvedBy(outer) { - return p, rewrite.SameTree, nil + return p, NoRewrite } if se, ok := pe.Info.(SubQueryExpression); ok { @@ -387,7 +372,7 @@ func pushProjectionToOuterContainer(ctx *plancontext.PlanningContext, p *Project } // all projections can be pushed to the outer src.Outer, p.Source = p, src.Outer - return src, rewrite.NewTree("push projection into outer side of subquery container"), nil + return src, Rewrote("push projection into outer side of subquery container") } func rewriteColNameToArgument(ctx *plancontext.PlanningContext, in sqlparser.Expr, se SubQueryExpression, subqueries ...*SubQuery) sqlparser.Expr { @@ -433,19 +418,16 @@ func rewriteColNameToArgument(ctx *plancontext.PlanningContext, in sqlparser.Exp return result.(sqlparser.Expr) } -func pushOrMergeSubQueryContainer(ctx *plancontext.PlanningContext, in *SubQueryContainer) (ops.Operator, *rewrite.ApplyResult, error) { +func pushOrMergeSubQueryContainer(ctx *plancontext.PlanningContext, in *SubQueryContainer) (Operator, *ApplyResult) { if !reachedPhase(ctx, initialPlanning) { - return in, rewrite.SameTree, nil + return in, NoRewrite } var remaining []*SubQuery - var result *rewrite.ApplyResult + var result *ApplyResult for _, inner := range in.Inner { - newOuter, _result, err := pushOrMerge(ctx, in.Outer, inner) - if err != nil { - return nil, nil, err - } - if _result == rewrite.SameTree { + newOuter, _result := pushOrMerge(ctx, in.Outer, inner) + if _result == NoRewrite { remaining = append(remaining, inner) continue } @@ -455,26 +437,26 @@ func pushOrMergeSubQueryContainer(ctx *plancontext.PlanningContext, in *SubQuery } if len(remaining) == 0 { - return in.Outer, result, nil + return in.Outer, result } in.Inner = remaining - return in, result, nil + return in, result } func tryMergeSubQuery( ctx *plancontext.PlanningContext, subQuery *SubQuery, outer *Route, -) (newOuter ops.Operator, result *rewrite.ApplyResult, err error) { +) (newOuter Operator, result *ApplyResult) { switch inner := subQuery.Subquery.(type) { case *Route: return tryMergeSubqueryWithOuter(ctx, subQuery, outer, inner) case *SubQueryContainer: return tryMergeSubqueriesRecursively(ctx, subQuery, outer, inner) } - return outer, rewrite.SameTree, nil + return outer, NoRewrite } // tryMergeSubqueriesRecursively attempts to merge a SubQueryContainer with the outer Route. @@ -483,7 +465,7 @@ func tryMergeSubqueriesRecursively( subQuery *SubQuery, outer *Route, inner *SubQueryContainer, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { exprs := subQuery.GetMergePredicates() merger := &subqueryRouteMerger{ outer: outer, @@ -492,32 +474,29 @@ func tryMergeSubqueriesRecursively( } op := mergeSubqueryInputs(ctx, inner.Outer, outer, exprs, merger) if op == nil { - return outer, rewrite.SameTree, nil + return outer, NoRewrite } op = Clone(op).(*Route) op.Source = outer.Source - var finalResult *rewrite.ApplyResult + var finalResult *ApplyResult for _, subq := range inner.Inner { - newOuter, res, err := tryMergeSubQuery(ctx, subq, op) - if err != nil { - return nil, nil, err - } - if res == rewrite.SameTree { + newOuter, res := tryMergeSubQuery(ctx, subq, op) + if res == NoRewrite { // we failed to merge one of the inners - we need to abort - return nil, rewrite.SameTree, nil + return nil, NoRewrite } op = newOuter.(*Route) finalResult = finalResult.Merge(res) } op.Source = &Filter{Source: outer.Source, Predicates: []sqlparser.Expr{subQuery.Original}} - return op, finalResult.Merge(rewrite.NewTree("merge outer of two subqueries")), nil + return op, finalResult.Merge(Rewrote("merge outer of two subqueries")) } -func tryMergeSubqueryWithOuter(ctx *plancontext.PlanningContext, subQuery *SubQuery, outer *Route, inner ops.Operator) (ops.Operator, *rewrite.ApplyResult, error) { +func tryMergeSubqueryWithOuter(ctx *plancontext.PlanningContext, subQuery *SubQuery, outer *Route, inner Operator) (Operator, *ApplyResult) { if updOp, ok := outer.Source.(*Update); ok && mergingIsBlocked(subQuery, updOp) { - return outer, rewrite.SameTree, nil + return outer, NoRewrite } exprs := subQuery.GetMergePredicates() merger := &subqueryRouteMerger{ @@ -527,13 +506,13 @@ func tryMergeSubqueryWithOuter(ctx *plancontext.PlanningContext, subQuery *SubQu } op := mergeSubqueryInputs(ctx, inner, outer, exprs, merger) if op == nil { - return outer, rewrite.SameTree, nil + return outer, NoRewrite } if !subQuery.IsProjection { op.Source = &Filter{Source: outer.Source, Predicates: []sqlparser.Expr{subQuery.Original}} } ctx.MergedSubqueries = append(ctx.MergedSubqueries, subQuery.originalSubquery) - return op, rewrite.NewTree("merged subquery with outer"), nil + return op, Rewrote("merged subquery with outer") } // This checked if subquery is part of the changed vindex values. Subquery cannot be merged with the outer route. @@ -546,21 +525,18 @@ func mergingIsBlocked(subQuery *SubQuery, updOp *Update) bool { return false } -func pushOrMerge(ctx *plancontext.PlanningContext, outer ops.Operator, inner *SubQuery) (ops.Operator, *rewrite.ApplyResult, error) { +func pushOrMerge(ctx *plancontext.PlanningContext, outer Operator, inner *SubQuery) (Operator, *ApplyResult) { switch o := outer.(type) { case *Route: return tryMergeSubQuery(ctx, inner, o) case *ApplyJoin: - join, applyResult, err := tryPushSubQueryInJoin(ctx, inner, o) - if err != nil { - return nil, nil, err - } + join, applyResult := tryPushSubQueryInJoin(ctx, inner, o) if join == nil { - return outer, rewrite.SameTree, nil + return outer, NoRewrite } - return join, applyResult, nil + return join, applyResult default: - return outer, rewrite.SameTree, nil + return outer, NoRewrite } } @@ -618,10 +594,7 @@ func (s *subqueryRouteMerger) mergeShardedRouting(ctx *plancontext.PlanningConte }) } - routing, err := tr.resetRoutingLogic(ctx) - if err != nil { - panic(err) - } + routing := tr.resetRoutingLogic(ctx) return s.merge(ctx, old1, old2, routing) } @@ -637,7 +610,7 @@ func (s *subqueryRouteMerger) merge(ctx *plancontext.PlanningContext, inner, out } } _, isSharded := r.(*ShardedRouting) - var src ops.Operator + var src Operator if isSharded { src = s.outer.Source if !s.subq.IsProjection { @@ -665,7 +638,7 @@ func (s *subqueryRouteMerger) merge(ctx *plancontext.PlanningContext, inner, out // we should be able to use this method for all plan types, // but using this method for sharded queries introduces bugs // We really need to figure out why this is not working as expected -func (s *subqueryRouteMerger) rewriteASTExpression(ctx *plancontext.PlanningContext, inner *Route) ops.Operator { +func (s *subqueryRouteMerger) rewriteASTExpression(ctx *plancontext.PlanningContext, inner *Route) Operator { src := s.outer.Source stmt, _, err := ToSQL(ctx, inner.Source) if err != nil { @@ -726,7 +699,7 @@ func (s *subqueryRouteMerger) rewriteASTExpression(ctx *plancontext.PlanningCont // If they can be merged, a new operator with the merged routing is returned // If they cannot be merged, nil is returned. // These rules are similar but different from join merging -func mergeSubqueryInputs(ctx *plancontext.PlanningContext, in, out ops.Operator, joinPredicates []sqlparser.Expr, m *subqueryRouteMerger) *Route { +func mergeSubqueryInputs(ctx *plancontext.PlanningContext, in, out Operator, joinPredicates []sqlparser.Expr, m *subqueryRouteMerger) *Route { inRoute, outRoute := operatorsToRoutes(in, out) if inRoute == nil || outRoute == nil { return nil diff --git a/go/vt/vtgate/planbuilder/operators/table.go b/go/vt/vtgate/planbuilder/operators/table.go index e731ec54201..93b406232b2 100644 --- a/go/vt/vtgate/planbuilder/operators/table.go +++ b/go/vt/vtgate/planbuilder/operators/table.go @@ -22,7 +22,6 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -43,7 +42,7 @@ type ( ) // Clone implements the Operator interface -func (to *Table) Clone([]ops.Operator) ops.Operator { +func (to *Table) Clone([]Operator) Operator { var columns []*sqlparser.ColName for _, name := range to.Columns { columns = append(columns, sqlparser.CloneRefOfColName(name)) @@ -61,7 +60,7 @@ func (to *Table) introducesTableID() semantics.TableSet { } // AddPredicate implements the PhysicalOperator interface -func (to *Table) AddPredicate(_ *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (to *Table) AddPredicate(_ *plancontext.PlanningContext, expr sqlparser.Expr) Operator { return newFilter(to, expr) } @@ -92,7 +91,7 @@ func (to *Table) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Sele return transformColumnsToSelectExprs(ctx, to) } -func (to *Table) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (to *Table) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } diff --git a/go/vt/vtgate/planbuilder/operators/ops/to_json.go b/go/vt/vtgate/planbuilder/operators/to_json.go similarity index 98% rename from go/vt/vtgate/planbuilder/operators/ops/to_json.go rename to go/vt/vtgate/planbuilder/operators/to_json.go index 2b8b747f433..48b7fa9a247 100644 --- a/go/vt/vtgate/planbuilder/operators/ops/to_json.go +++ b/go/vt/vtgate/planbuilder/operators/to_json.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package ops +package operators import ( "fmt" diff --git a/go/vt/vtgate/planbuilder/operators/union.go b/go/vt/vtgate/planbuilder/operators/union.go index b3d866a00a3..454a6370c2f 100644 --- a/go/vt/vtgate/planbuilder/operators/union.go +++ b/go/vt/vtgate/planbuilder/operators/union.go @@ -23,12 +23,11 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type Union struct { - Sources []ops.Operator + Sources []Operator // These are the select expressions coming from each source Selects []sqlparser.SelectExprs @@ -38,7 +37,7 @@ type Union struct { unionColumnsAsAlisedExprs []*sqlparser.AliasedExpr } -func newUnion(srcs []ops.Operator, sourceSelects []sqlparser.SelectExprs, columns sqlparser.SelectExprs, distinct bool) *Union { +func newUnion(srcs []Operator, sourceSelects []sqlparser.SelectExprs, columns sqlparser.SelectExprs, distinct bool) *Union { if columns == nil { panic("rt") } @@ -51,24 +50,24 @@ func newUnion(srcs []ops.Operator, sourceSelects []sqlparser.SelectExprs, column } // Clone implements the Operator interface -func (u *Union) Clone(inputs []ops.Operator) ops.Operator { +func (u *Union) Clone(inputs []Operator) Operator { newOp := *u newOp.Sources = inputs newOp.Selects = slices.Clone(u.Selects) return &newOp } -func (u *Union) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (u *Union) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } // Inputs implements the Operator interface -func (u *Union) Inputs() []ops.Operator { +func (u *Union) Inputs() []Operator { return u.Sources } // SetInputs implements the Operator interface -func (u *Union) SetInputs(ops []ops.Operator) { +func (u *Union) SetInputs(ops []Operator) { u.Sources = ops } @@ -93,7 +92,7 @@ Notice how `X.col = 42` has been translated to `foo = 42` and `id = 42` on respe The first SELECT of the union dictates the column names, and the second is whatever expression can be found on the same offset. The names of the RHS are discarded. */ -func (u *Union) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (u *Union) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { offsets := make(map[string]int) sel, err := u.GetSelectFor(0) if err != nil { diff --git a/go/vt/vtgate/planbuilder/operators/union_merging.go b/go/vt/vtgate/planbuilder/operators/union_merging.go index 03b7a212893..953d779c6a1 100644 --- a/go/vt/vtgate/planbuilder/operators/union_merging.go +++ b/go/vt/vtgate/planbuilder/operators/union_merging.go @@ -19,14 +19,12 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // mergeUnionInputInAnyOrder merges sources the sources of the union in any order // can be used for UNION DISTINCT -func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops.Operator, []sqlparser.SelectExprs, error) { +func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]Operator, []sqlparser.SelectExprs) { sources := op.Sources selects := op.Selects @@ -43,10 +41,7 @@ func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]o } selA := selects[idx] selB := selects[j] - newPlan, sel, err := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) - if err != nil { - return nil, nil, err - } + newPlan, sel := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) if newPlan != nil { sources[idx] = newPlan selects[idx] = sel @@ -57,10 +52,10 @@ func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]o } } if !merged { - return sources, selects, nil + return sources, selects } - var newSources []ops.Operator + var newSources []Operator var newSelects []sqlparser.SelectExprs for i, source := range sources { if keep[i] || i <= idx { @@ -73,10 +68,10 @@ func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]o selects = newSelects } - return sources, selects, nil + return sources, selects } -func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops.Operator, []sqlparser.SelectExprs, error) { +func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]Operator, []sqlparser.SelectExprs) { sources := op.Sources selects := op.Selects for { @@ -85,10 +80,7 @@ func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops j := i + 1 srcA, selA := sources[i], selects[i] srcB, selB := sources[j], selects[j] - newPlan, sel, err := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) - if err != nil { - return nil, nil, err - } + newPlan, sel := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) if newPlan != nil { sources[i] = newPlan selects[i] = sel @@ -102,7 +94,7 @@ func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops } } - return sources, selects, nil + return sources, selects } // mergeUnionInputs checks whether two operators can be merged into a single one. @@ -111,13 +103,13 @@ func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops // this function is very similar to mergeJoinInputs func mergeUnionInputs( ctx *plancontext.PlanningContext, - lhs, rhs ops.Operator, + lhs, rhs Operator, lhsExprs, rhsExprs sqlparser.SelectExprs, distinct bool, -) (ops.Operator, sqlparser.SelectExprs, error) { +) (Operator, sqlparser.SelectExprs) { lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(lhs, rhs) if lhsRoute == nil { - return nil, nil, nil + return nil, nil } switch { @@ -134,12 +126,12 @@ func mergeUnionInputs( return createMergedUnion(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct, routingA) case a == sharded && b == sharded && sameKeyspace: - res, exprs, err := tryMergeUnionShardedRouting(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct) - if err != nil || res != nil { - return res, exprs, err + res, exprs := tryMergeUnionShardedRouting(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct) + if res != nil { + return res, exprs } } - return nil, nil, nil + return nil, nil } func tryMergeUnionShardedRouting( @@ -147,7 +139,7 @@ func tryMergeUnionShardedRouting( routeA, routeB *Route, exprsA, exprsB sqlparser.SelectExprs, distinct bool, -) (ops.Operator, sqlparser.SelectExprs, error) { +) (Operator, sqlparser.SelectExprs) { tblA := routeA.Routing.(*ShardedRouting) tblB := routeB.Routing.(*ShardedRouting) @@ -173,7 +165,7 @@ func tryMergeUnionShardedRouting( } } - return nil, nil, nil + return nil, nil } func createMergedUnion( @@ -181,7 +173,7 @@ func createMergedUnion( lhsRoute, rhsRoute *Route, lhsExprs, rhsExprs sqlparser.SelectExprs, distinct bool, - routing Routing) (ops.Operator, sqlparser.SelectExprs, error) { + routing Routing) (Operator, sqlparser.SelectExprs) { // if there are `*` on either side, or a different number of SelectExpr items, // we give up aligning the expressions and trust that we can push everything down @@ -210,16 +202,16 @@ func createMergedUnion( ctx.SemTable.Recursive[col] = deps } - union := newUnion([]ops.Operator{lhsRoute.Source, rhsRoute.Source}, []sqlparser.SelectExprs{lhsExprs, rhsExprs}, cols, distinct) + union := newUnion([]Operator{lhsRoute.Source, rhsRoute.Source}, []sqlparser.SelectExprs{lhsExprs, rhsExprs}, cols, distinct) selectExprs := unionSelects(lhsExprs) return &Route{ Source: union, MergedWith: []*Route{rhsRoute}, Routing: routing, - }, selectExprs, nil + }, selectExprs } -func compactUnion(u *Union) *rewrite.ApplyResult { +func compactUnion(u *Union) *ApplyResult { if u.distinct { // first we remove unnecessary DISTINCTs for idx, source := range u.Sources { @@ -231,7 +223,7 @@ func compactUnion(u *Union) *rewrite.ApplyResult { } } - var newSources []ops.Operator + var newSources []Operator var newSelects []sqlparser.SelectExprs merged := false @@ -250,10 +242,10 @@ func compactUnion(u *Union) *rewrite.ApplyResult { } if !merged { - return rewrite.SameTree + return NoRewrite } u.Sources = newSources u.Selects = newSelects - return rewrite.NewTree("merged UNIONs") + return Rewrote("merged UNIONs") } diff --git a/go/vt/vtgate/planbuilder/operators/update.go b/go/vt/vtgate/planbuilder/operators/update.go index 8868e83c247..ccfdddc3ea9 100644 --- a/go/vt/vtgate/planbuilder/operators/update.go +++ b/go/vt/vtgate/planbuilder/operators/update.go @@ -28,7 +28,6 @@ import ( "vitess.io/vitess/go/vt/sysvars" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -71,14 +70,14 @@ func (u *Update) introducesTableID() semantics.TableSet { } // Clone implements the Operator interface -func (u *Update) Clone([]ops.Operator) ops.Operator { +func (u *Update) Clone([]Operator) Operator { upd := *u upd.Assignments = slices.Clone(u.Assignments) upd.ChangedVindexValues = maps.Clone(u.ChangedVindexValues) return &upd } -func (u *Update) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (u *Update) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } @@ -100,51 +99,39 @@ func (u *Update) ShortDescription() string { return strings.Join(s, " ") } -func createOperatorFromUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update) (ops.Operator, error) { - tableInfo, qt, err := createQueryTableForDML(ctx, updStmt.TableExprs[0], updStmt.Where) - if err != nil { - return nil, err - } +func createOperatorFromUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update) Operator { + tableInfo, qt := createQueryTableForDML(ctx, updStmt.TableExprs[0], updStmt.Where) - vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "update") - if err != nil { - return nil, err - } + vindexTable, routing := buildVindexTableForDML(ctx, tableInfo, qt, "update") updClone := sqlparser.CloneRefOfUpdate(updStmt) - updOp, err := createUpdateOperator(ctx, updStmt, vindexTable, qt, routing) - if err != nil { - return nil, err - } + updOp := createUpdateOperator(ctx, updStmt, vindexTable, qt, routing) parentFks := ctx.SemTable.GetParentForeignKeysList() childFks := ctx.SemTable.GetChildForeignKeysList() if len(childFks) == 0 && len(parentFks) == 0 { - return updOp, nil + return updOp } // If the delete statement has a limit, we don't support it yet. if updStmt.Limit != nil { - return nil, vterrors.VT12001("update with limit with foreign key constraints") + panic(vterrors.VT12001("update with limit with foreign key constraints")) } // Now we check if any of the foreign key columns that are being udpated have dependencies on other updated columns. // This is unsafe, and we currently don't support this in Vitess. - if err = ctx.SemTable.ErrIfFkDependentColumnUpdated(updStmt.Exprs); err != nil { - return nil, err + if err := ctx.SemTable.ErrIfFkDependentColumnUpdated(updStmt.Exprs); err != nil { + panic(err) } return buildFkOperator(ctx, updOp, updClone, parentFks, childFks, vindexTable) } -func createUpdateOperator(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update, vindexTable *vindexes.Table, qt *QueryTable, routing Routing) (ops.Operator, error) { +func createUpdateOperator(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update, vindexTable *vindexes.Table, qt *QueryTable, routing Routing) Operator { sqc := &SubQueryBuilder{} assignments := make([]SetExpr, len(updStmt.Exprs)) for idx, updExpr := range updStmt.Exprs { - expr, subqs, err := sqc.pullOutValueSubqueries(ctx, updExpr.Expr, qt.ID, true) - if err != nil { - return nil, err - } + expr, subqs := sqc.pullOutValueSubqueries(ctx, updExpr.Expr, qt.ID, true) if len(subqs) == 0 { expr = updExpr.Expr } @@ -158,10 +145,7 @@ func createUpdateOperator(ctx *plancontext.PlanningContext, updStmt *sqlparser.U } } - vp, cvv, ovq, subQueriesArgOnChangedVindex, err := getUpdateVindexInformation(ctx, updStmt, vindexTable, qt.ID, assignments) - if err != nil { - return nil, err - } + vp, cvv, ovq, subQueriesArgOnChangedVindex := getUpdateVindexInformation(ctx, updStmt, vindexTable, qt.ID, assignments) tr, ok := routing.(*ShardedRouting) if ok { @@ -169,20 +153,15 @@ func createUpdateOperator(ctx *plancontext.PlanningContext, updStmt *sqlparser.U } for _, predicate := range qt.Predicates { - if subq, err := sqc.handleSubquery(ctx, predicate, qt.ID); err != nil { - return nil, err - } else if subq != nil { + if subq := sqc.handleSubquery(ctx, predicate, qt.ID); subq != nil { continue } - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, predicate, routing) } if routing.OpCode() == engine.Scatter && updStmt.Limit != nil { // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07) - return nil, vterrors.VT12001("multi shard UPDATE with LIMIT") + panic(vterrors.VT12001("multi shard UPDATE with LIMIT")) } route := &Route{ @@ -201,23 +180,20 @@ func createUpdateOperator(ctx *plancontext.PlanningContext, updStmt *sqlparser.U Comments: updStmt.Comments, } - decorator := func(op ops.Operator) ops.Operator { + decorator := func(op Operator) Operator { return &LockAndComment{ Source: op, Lock: sqlparser.ShareModeLock, } } - return sqc.getRootOperator(route, decorator), nil + return sqc.getRootOperator(route, decorator) } -func buildFkOperator(ctx *plancontext.PlanningContext, updOp ops.Operator, updClone *sqlparser.Update, parentFks []vindexes.ParentFKInfo, childFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table) (ops.Operator, error) { +func buildFkOperator(ctx *plancontext.PlanningContext, updOp Operator, updClone *sqlparser.Update, parentFks []vindexes.ParentFKInfo, childFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table) Operator { restrictChildFks, cascadeChildFks := splitChildFks(childFks) - op, err := createFKCascadeOp(ctx, updOp, updClone, cascadeChildFks, updatedTable) - if err != nil { - return nil, err - } + op := createFKCascadeOp(ctx, updOp, updClone, cascadeChildFks, updatedTable) return createFKVerifyOp(ctx, op, updClone, parentFks, restrictChildFks, updatedTable) } @@ -241,9 +217,9 @@ func splitChildFks(fks []vindexes.ChildFKInfo) (restrictChildFks, cascadeChildFk return } -func createFKCascadeOp(ctx *plancontext.PlanningContext, parentOp ops.Operator, updStmt *sqlparser.Update, childFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table) (ops.Operator, error) { +func createFKCascadeOp(ctx *plancontext.PlanningContext, parentOp Operator, updStmt *sqlparser.Update, childFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table) Operator { if len(childFks) == 0 { - return parentOp, nil + return parentOp } var fkChildren []*FkChild @@ -252,7 +228,7 @@ func createFKCascadeOp(ctx *plancontext.PlanningContext, parentOp ops.Operator, for _, fk := range childFks { // We should have already filtered out update restrict foreign keys. if fk.OnUpdate.IsRestrict() { - return nil, vterrors.VT13001("ON UPDATE RESTRICT foreign keys should already be filtered") + panic(vterrors.VT13001("ON UPDATE RESTRICT foreign keys should already be filtered")) } // We need to select all the parent columns for the foreign key constraint, to use in the update of the child table. @@ -275,23 +251,17 @@ func createFKCascadeOp(ctx *plancontext.PlanningContext, parentOp ops.Operator, } } - fkChild, err := createFkChildForUpdate(ctx, fk, selectOffsets, nonLiteralUpdateInfo, updatedTable) - if err != nil { - return nil, err - } + fkChild := createFkChildForUpdate(ctx, fk, selectOffsets, nonLiteralUpdateInfo, updatedTable) fkChildren = append(fkChildren, fkChild) } - selectionOp, err := createSelectionOp(ctx, selectExprs, updStmt.TableExprs, updStmt.Where, updStmt.OrderBy, nil, sqlparser.ForUpdateLockNoWait) - if err != nil { - return nil, err - } + selectionOp := createSelectionOp(ctx, selectExprs, updStmt.TableExprs, updStmt.Where, updStmt.OrderBy, nil, sqlparser.ForUpdateLockNoWait) return &FkCascade{ Selection: selectionOp, Children: fkChildren, Parent: parentOp, - }, nil + } } // hasNonLiteralUpdate checks if any of the update expressions have a non-literal update. @@ -404,7 +374,7 @@ func getCastTypeForColumn(updatedTable *vindexes.Table, updExpr *sqlparser.Updat } // createFkChildForUpdate creates the update query operator for the child table based on the foreign key constraints. -func createFkChildForUpdate(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, selectOffsets []int, nonLiteralUpdateInfo []engine.NonLiteralUpdateInfo, updatedTable *vindexes.Table) (*FkChild, error) { +func createFkChildForUpdate(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, selectOffsets []int, nonLiteralUpdateInfo []engine.NonLiteralUpdateInfo, updatedTable *vindexes.Table) *FkChild { // Create a ValTuple of child column names var valTuple sqlparser.ValTuple for _, column := range fk.ChildColumns { @@ -426,18 +396,14 @@ func createFkChildForUpdate(ctx *plancontext.PlanningContext, fk vindexes.ChildF } } - var childOp ops.Operator - var err error + var childOp Operator switch fk.OnUpdate { case sqlparser.Cascade: - childOp, err = buildChildUpdOpForCascade(ctx, fk, childWhereExpr, nonLiteralUpdateInfo, updatedTable) + childOp = buildChildUpdOpForCascade(ctx, fk, childWhereExpr, nonLiteralUpdateInfo, updatedTable) case sqlparser.SetNull: - childOp, err = buildChildUpdOpForSetNull(ctx, fk, childWhereExpr, nonLiteralUpdateInfo, updatedTable) + childOp = buildChildUpdOpForSetNull(ctx, fk, childWhereExpr, nonLiteralUpdateInfo, updatedTable) case sqlparser.SetDefault: - return nil, vterrors.VT09016() - } - if err != nil { - return nil, err + panic(vterrors.VT09016()) } return &FkChild{ @@ -445,14 +411,14 @@ func createFkChildForUpdate(ctx *plancontext.PlanningContext, fk vindexes.ChildF Cols: selectOffsets, Op: childOp, NonLiteralInfo: nonLiteralUpdateInfo, - }, nil + } } // buildChildUpdOpForCascade builds the child update statement operator for the CASCADE type foreign key constraint. // The query looks like this - // // `UPDATE SET WHERE IN ()` -func buildChildUpdOpForCascade(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, childWhereExpr sqlparser.Expr, nonLiteralUpdateInfo []engine.NonLiteralUpdateInfo, updatedTable *vindexes.Table) (ops.Operator, error) { +func buildChildUpdOpForCascade(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, childWhereExpr sqlparser.Expr, nonLiteralUpdateInfo []engine.NonLiteralUpdateInfo, updatedTable *vindexes.Table) Operator { // The update expressions are the same as the update expressions in the parent update query // with the column names replaced with the child column names. var childUpdateExprs sqlparser.UpdateExprs @@ -501,7 +467,7 @@ func buildChildUpdOpForSetNull( childWhereExpr sqlparser.Expr, nonLiteralUpdateInfo []engine.NonLiteralUpdateInfo, updatedTable *vindexes.Table, -) (ops.Operator, error) { +) Operator { // For the SET NULL type constraint, we need to set all the child columns to NULL. var childUpdateExprs sqlparser.UpdateExprs for _, column := range fk.ChildColumns { @@ -557,23 +523,20 @@ func getParsedCommentsForFkChecks(ctx *plancontext.PlanningContext) (parsedComme // createFKVerifyOp creates the verify operator for the parent foreign key constraints. func createFKVerifyOp( ctx *plancontext.PlanningContext, - childOp ops.Operator, + childOp Operator, updStmt *sqlparser.Update, parentFks []vindexes.ParentFKInfo, restrictChildFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table, -) (ops.Operator, error) { +) Operator { if len(parentFks) == 0 && len(restrictChildFks) == 0 { - return childOp, nil + return childOp } var Verify []*VerifyOp // This validates that new values exists on the parent table. for _, fk := range parentFks { - op, err := createFkVerifyOpForParentFKForUpdate(ctx, updatedTable, updStmt, fk) - if err != nil { - return nil, err - } + op := createFkVerifyOpForParentFKForUpdate(ctx, updatedTable, updStmt, fk) Verify = append(Verify, &VerifyOp{ Op: op, Typ: engine.ParentVerify, @@ -581,10 +544,8 @@ func createFKVerifyOp( } // This validates that the old values don't exist on the child table. for _, fk := range restrictChildFks { - op, err := createFkVerifyOpForChildFKForUpdate(ctx, updatedTable, updStmt, fk) - if err != nil { - return nil, err - } + op := createFkVerifyOpForChildFKForUpdate(ctx, updatedTable, updStmt, fk) + Verify = append(Verify, &VerifyOp{ Op: op, Typ: engine.ChildVerify, @@ -594,7 +555,7 @@ func createFKVerifyOp( return &FkVerify{ Verify: Verify, Input: childOp, - }, nil + } } // Each parent foreign key constraint is verified by an anti join query of the form: @@ -608,11 +569,11 @@ func createFKVerifyOp( // where Parent.p1 is null and Parent.p2 is null and Child.id = 1 and Child.c2 + 1 is not null // and Child.c2 is not null and not ((Child.c1) <=> (Child.c2 + 1)) // limit 1 -func createFkVerifyOpForParentFKForUpdate(ctx *plancontext.PlanningContext, updatedTable *vindexes.Table, updStmt *sqlparser.Update, pFK vindexes.ParentFKInfo) (ops.Operator, error) { +func createFkVerifyOpForParentFKForUpdate(ctx *plancontext.PlanningContext, updatedTable *vindexes.Table, updStmt *sqlparser.Update, pFK vindexes.ParentFKInfo) Operator { childTblExpr := updStmt.TableExprs[0].(*sqlparser.AliasedTableExpr) childTbl, err := childTblExpr.TableName() if err != nil { - return nil, err + panic(err) } parentTbl := pFK.Table.GetTableName() var whereCond sqlparser.Expr @@ -708,16 +669,16 @@ func createFkVerifyOpForParentFKForUpdate(ctx *plancontext.PlanningContext, upda // verify query: // select 1 from Child join Parent on Parent.p1 = Child.c1 and Parent.p2 = Child.c2 // where Parent.id = 1 and ((Parent.col + 1) IS NULL OR (child.c1) NOT IN ((Parent.col + 1))) limit 1 -func createFkVerifyOpForChildFKForUpdate(ctx *plancontext.PlanningContext, updatedTable *vindexes.Table, updStmt *sqlparser.Update, cFk vindexes.ChildFKInfo) (ops.Operator, error) { +func createFkVerifyOpForChildFKForUpdate(ctx *plancontext.PlanningContext, updatedTable *vindexes.Table, updStmt *sqlparser.Update, cFk vindexes.ChildFKInfo) Operator { // ON UPDATE RESTRICT foreign keys that require validation, should only be allowed in the case where we // are verifying all the FKs on vtgate level. if !ctx.VerifyAllFKs { - return nil, vterrors.VT12002() + panic(vterrors.VT12002()) } parentTblExpr := updStmt.TableExprs[0].(*sqlparser.AliasedTableExpr) parentTbl, err := parentTblExpr.TableName() if err != nil { - return nil, err + panic(err) } childTbl := cFk.Table.GetTableName() var joinCond sqlparser.Expr diff --git a/go/vt/vtgate/planbuilder/operators/utils_test.go b/go/vt/vtgate/planbuilder/operators/utils_test.go index a7e25c1337c..596489150da 100644 --- a/go/vt/vtgate/planbuilder/operators/utils_test.go +++ b/go/vt/vtgate/planbuilder/operators/utils_test.go @@ -20,33 +20,32 @@ import ( "slices" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) type fakeOp struct { id semantics.TableSet - inputs []ops.Operator + inputs []Operator cols []*sqlparser.AliasedExpr } -var _ ops.Operator = (*fakeOp)(nil) +var _ Operator = (*fakeOp)(nil) -func (f *fakeOp) Clone(inputs []ops.Operator) ops.Operator { +func (f *fakeOp) Clone(inputs []Operator) Operator { return f } -func (f *fakeOp) Inputs() []ops.Operator { +func (f *fakeOp) Inputs() []Operator { return f.inputs } -func (f *fakeOp) SetInputs(operators []ops.Operator) { +func (f *fakeOp) SetInputs(operators []Operator) { // TODO implement me panic("implement me") } -func (f *fakeOp) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (f *fakeOp) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { // TODO implement me panic("implement me") } @@ -80,7 +79,7 @@ func (f *fakeOp) ShortDescription() string { panic("implement me") } -func (f *fakeOp) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (f *fakeOp) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { // TODO implement me panic("implement me") } diff --git a/go/vt/vtgate/planbuilder/operators/vindex.go b/go/vt/vtgate/planbuilder/operators/vindex.go index 2fe2bf4d3e5..f8667b45aba 100644 --- a/go/vt/vtgate/planbuilder/operators/vindex.go +++ b/go/vt/vtgate/planbuilder/operators/vindex.go @@ -21,7 +21,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -57,7 +56,7 @@ func (v *Vindex) introducesTableID() semantics.TableSet { } // Clone implements the Operator interface -func (v *Vindex) Clone([]ops.Operator) ops.Operator { +func (v *Vindex) Clone([]Operator) Operator { clone := *v return &clone } @@ -101,7 +100,7 @@ func (v *Vindex) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Sele return transformColumnsToSelectExprs(ctx, v) } -func (v *Vindex) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (v *Vindex) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } @@ -121,7 +120,7 @@ func (v *Vindex) CheckValid() error { return nil } -func (v *Vindex) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (v *Vindex) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { for _, e := range sqlparser.SplitAndExpression(nil, expr) { deps := ctx.SemTable.RecursiveDeps(e) if deps.NumberOfTables() > 1 { diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go index a488dd1e470..b5c814b2ea6 100644 --- a/go/vt/vtgate/planbuilder/plan_test.go +++ b/go/vt/vtgate/planbuilder/plan_test.go @@ -41,7 +41,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtgate/engine" - oprewriters "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -261,7 +261,7 @@ func TestViews(t *testing.T) { } func TestOne(t *testing.T) { - reset := oprewriters.EnableDebugPrinting() + reset := operators.EnableDebugPrinting() defer reset() lv := loadSchema(t, "vschemas/schema.json", true) @@ -319,7 +319,7 @@ func TestOneWithUserAsDefault(t *testing.T) { } func TestOneWithTPCHVSchema(t *testing.T) { - reset := oprewriters.EnableDebugPrinting() + reset := operators.EnableDebugPrinting() defer reset() vschema := &vschemawrapper.VSchemaWrapper{ V: loadSchema(t, "vschemas/tpch_schema.json", true), diff --git a/go/vt/vtgate/planbuilder/select.go b/go/vt/vtgate/planbuilder/select.go index 44976815bd2..77c883325c5 100644 --- a/go/vt/vtgate/planbuilder/select.go +++ b/go/vt/vtgate/planbuilder/select.go @@ -26,7 +26,6 @@ import ( "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -225,7 +224,7 @@ func newBuildSelectPlan( return plan, operators.TablesUsed(op), nil } -func createSelectOperator(ctx *plancontext.PlanningContext, selStmt sqlparser.SelectStatement, reservedVars *sqlparser.ReservedVars) (ops.Operator, error) { +func createSelectOperator(ctx *plancontext.PlanningContext, selStmt sqlparser.SelectStatement, reservedVars *sqlparser.ReservedVars) (operators.Operator, error) { err := queryRewrite(ctx.SemTable, reservedVars, selStmt) if err != nil { return nil, err