diff --git a/component/cli/command/verify.go b/component/cli/command/verify.go index de8ec11..b2d9bec 100644 --- a/component/cli/command/verify.go +++ b/component/cli/command/verify.go @@ -181,6 +181,7 @@ type AppVerifyGen struct { *AppVerify task string outputDir string + force bool } func (a *AppVerify) AppVerifyGen() component.Cmder { @@ -198,6 +199,7 @@ func (a *AppVerifyGen) Cmd() *cobra.Command { } cmd.Flags().StringVarP(&a.task, "task", "t", "", "the data compare task") cmd.Flags().StringVarP(&a.outputDir, "outputDir", "o", "/tmp", "the data compare task output file dir") + cmd.Flags().BoolVarP(&a.force, "force", "f", false, "the data compare task force ignore the task status success check, output fixed file") return cmd } @@ -235,7 +237,7 @@ func (a *AppVerifyGen) RunE(cmd *cobra.Command, args []string) error { } } - err := service.GenDataCompareTask(context.Background(), a.Server, a.task, a.outputDir) + err := service.GenDataCompareTask(context.Background(), a.Server, a.task, a.outputDir, a.force) if err != nil { if errors.Is(err, errors.New(constant.TaskDatabaseStatusEqual)) { fmt.Printf("Status: %s\n", cyan.Sprint("success")) diff --git a/component/cli/migrate/sql.go b/component/cli/migrate/sql.go index 1e4ebab..0c2e336 100644 --- a/component/cli/migrate/sql.go +++ b/component/cli/migrate/sql.go @@ -45,6 +45,7 @@ type SqlMigrateParam struct { SqlThreadT uint64 `toml:"sql-thread-t" json:"sqlThreadT"` SqlHintT string `toml:"sql-hint-t" json:"sqlHintT"` CallTimeout uint64 `toml:"call-timeout" json:"callTimeout"` + EnableCheckpoint bool `toml:"enable-checkpoint" json:"enableCheckpoint"` EnableConsistentRead bool `toml:"enable-consistent-read" json:"enableConsistentRead"` EnableSafeMode bool `toml:"enable-safe-mode" json:"enableSafeMode"` } diff --git a/database/data_compare.go b/database/data_compare.go index a52a59b..180aca3 100644 --- a/database/data_compare.go +++ b/database/data_compare.go @@ -27,7 +27,7 @@ type IDatabaseDataCompare interface { GetDatabaseTableStatisticsBucket(schemeNameS, tableNameS string, consColumns map[string]string) (map[string][]structure.Bucket, error) GetDatabaseTableStatisticsHistogram(schemeNameS, tableNameS string, consColumns map[string]string) (map[string]structure.Histogram, error) GetDatabaseTableColumnProperties(schemaNameS, tableNameS string, columnNameSli []string) ([]map[string]string, error) - GetDatabaseTableHighestSelectivityIndex(schemaNameS, tableNameS string, compareCondField string, ignoreCondFields []string) (*structure.HighestBucket, error) + GetDatabaseTableHighestSelectivityIndex(schemaNameS, tableNameS string, compareCondField string, ignoreCondFields []string) (*structure.Selectivity, error) GetDatabaseTableRandomValues(schemaNameS, tableNameS string, columns []string, conditions string, condArgs []interface{}, limit int, collations []string) ([][]string, error) GetDatabaseTableCompareData(querySQL string, callTimeout int, dbCharsetS, dbCharsetT string, queryArgs []interface{}) ([]string, uint32, map[string]int64, error) } diff --git a/database/data_migrate.go b/database/data_migrate.go index ecbaf69..933f730 100644 --- a/database/data_migrate.go +++ b/database/data_migrate.go @@ -31,7 +31,7 @@ type IDatabaseDataMigrate interface { GetDatabaseTableColumnNameSqlDimensions(sqlStr string) ([]string, map[string]string, map[string]string, error) GetDatabaseTableRows(schemaName, tableName string) (uint64, error) GetDatabaseTableSize(schemaName, tableName string) (float64, error) - GetDatabaseTableChunkTask(taskName, schemaName, tableName string, chunkSize uint64, callTimeout uint64) ([]map[string]string, error) + GetDatabaseTableChunkTask(taskName, schemaName, tableName string, chunkSize uint64, callTimeout uint64, batchSize int, dataChan chan []map[string]string) error GetDatabaseTableChunkData(querySQL string, queryArgs []interface{}, batchSize, callTimeout int, dbCharsetS, dbCharsetT, columnDetailO string, dataChan chan []interface{}) error GetDatabaseTableCsvData(querySQL string, queryArgs []interface{}, callTimeout int, taskFlow, dbCharsetS, dbCharsetT, columnDetailO string, escapeBackslash bool, nullValue, separator, delimiter string, dataChan chan []string) error } diff --git a/database/database.go b/database/database.go index 30a7d0d..71ef197 100644 --- a/database/database.go +++ b/database/database.go @@ -20,6 +20,7 @@ import ( "database/sql" "github.com/wentaojin/dbms/database/postgresql" "github.com/wentaojin/dbms/utils/structure" + "golang.org/x/sync/errgroup" "strings" "github.com/wentaojin/dbms/database/mysql" @@ -57,6 +58,45 @@ type IDatabaseSchemaTableRule interface { GenSchemaTableColumnSelectRule() (string, string, string, string, error) } +// IDatabaseRunner used for database table migrate runner +type IDatabaseRunner interface { + Init() error + Run() error + Resume() error +} + +func IDatabaseRun(ctx context.Context, i IDatabaseRunner) error { + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { + err := i.Init() + if err != nil { + return err + } + return nil + }) + + g.Go(func() error { + err := i.Run() + if err != nil { + return err + } + return nil + }) + + g.Go(func() error { + err := i.Resume() + if err != nil { + return err + } + return nil + }) + + if err := g.Wait(); err != nil { + return err + } + return nil +} + func NewDatabase(ctx context.Context, datasource *datasource.Datasource, migrateOracleSchema string, callTimeout int64) (IDatabase, error) { var ( database IDatabase diff --git a/database/mysql/data_compare.go b/database/mysql/data_compare.go index 012de35..b1102c7 100644 --- a/database/mysql/data_compare.go +++ b/database/mysql/data_compare.go @@ -211,7 +211,7 @@ func (d *Database) GetDatabaseTableStatisticsHistogram(schemaNameS, tableNameS s return nil, fmt.Errorf("the database table statistics histograms doesn't supported, only support tidb database, version: [%v]", res[0]["VERSION"]) } -func (d *Database) GetDatabaseTableHighestSelectivityIndex(schemaNameS, tableNameS string, compareCondField string, ignoreCondFields []string) (*structure.HighestBucket, error) { +func (d *Database) GetDatabaseTableHighestSelectivityIndex(schemaNameS, tableNameS string, compareCondField string, ignoreCondFields []string) (*structure.Selectivity, error) { consColumns, err := d.GetDatabaseTableConstraintIndexColumn(schemaNameS, tableNameS) if err != nil { return nil, err @@ -259,12 +259,12 @@ func (d *Database) GetDatabaseTableHighestSelectivityIndex(schemaNameS, tableNam return nil, nil } - highestBucket, err := structure.FindMatchDistinctCountBucket(sortHists, buckets, consColumns) + Selectivity, err := structure.FindMatchDistinctCountBucket(sortHists, buckets, consColumns) if err != nil { return nil, err } - properties, err := d.GetDatabaseTableColumnProperties(schemaNameS, tableNameS, highestBucket.IndexColumn) + properties, err := d.GetDatabaseTableColumnProperties(schemaNameS, tableNameS, Selectivity.IndexColumn) if err != nil { return nil, err } @@ -273,7 +273,7 @@ func (d *Database) GetDatabaseTableHighestSelectivityIndex(schemaNameS, tableNam columnCollations []string datetimePrecision []string ) - for _, c := range highestBucket.IndexColumn { + for _, c := range Selectivity.IndexColumn { for _, p := range properties { if strings.EqualFold(p["COLUMN_NAME"], c) { columnProps = append(columnProps, p["DATA_TYPE"]) @@ -294,10 +294,10 @@ func (d *Database) GetDatabaseTableHighestSelectivityIndex(schemaNameS, tableNam } } - highestBucket.ColumnDatatype = columnProps - highestBucket.ColumnCollation = columnCollations - highestBucket.DatetimePrecision = datetimePrecision - return highestBucket, nil + Selectivity.ColumnDatatype = columnProps + Selectivity.ColumnCollation = columnCollations + Selectivity.DatetimePrecision = datetimePrecision + return Selectivity, nil } func (d *Database) GetDatabaseTableRandomValues(schemaNameS, tableNameS string, columns []string, conditions string, condArgs []interface{}, limit int, collations []string) ([][]string, error) { diff --git a/database/mysql/data_migrate.go b/database/mysql/data_migrate.go index d22234c..e10cc96 100644 --- a/database/mysql/data_migrate.go +++ b/database/mysql/data_migrate.go @@ -119,7 +119,7 @@ WHERE return sizeMB, nil } -func (d *Database) GetDatabaseTableChunkTask(taskName, schemaName, tableName string, chunkSize uint64, callTimeout uint64) ([]map[string]string, error) { +func (d *Database) GetDatabaseTableChunkTask(taskName, schemaName, tableName string, chunkSize uint64, callTimeout uint64, batchSize int, dataChan chan []map[string]string) error { //TODO implement me panic("implement me") } diff --git a/database/oracle/data_compare.go b/database/oracle/data_compare.go index 4c7d79b..948b5dc 100644 --- a/database/oracle/data_compare.go +++ b/database/oracle/data_compare.go @@ -177,7 +177,7 @@ WHERE TABLE_OWNER = '%s' return hist, nil } -func (d *Database) GetDatabaseTableHighestSelectivityIndex(schemaNameS, tableNameS string, compareCondField string, ignoreCondFields []string) (*structure.HighestBucket, error) { +func (d *Database) GetDatabaseTableHighestSelectivityIndex(schemaNameS, tableNameS string, compareCondField string, ignoreCondFields []string) (*structure.Selectivity, error) { consColumns, err := d.GetDatabaseTableConstraintIndexColumn(schemaNameS, tableNameS) if err != nil { return nil, err @@ -225,12 +225,12 @@ func (d *Database) GetDatabaseTableHighestSelectivityIndex(schemaNameS, tableNam return nil, nil } - highestBucket, err := structure.FindMatchDistinctCountBucket(sortHists, buckets, consColumns) + Selectivity, err := structure.FindMatchDistinctCountBucket(sortHists, buckets, consColumns) if err != nil { return nil, err } - properties, err := d.GetDatabaseTableColumnProperties(schemaNameS, tableNameS, highestBucket.IndexColumn) + properties, err := d.GetDatabaseTableColumnProperties(schemaNameS, tableNameS, Selectivity.IndexColumn) if err != nil { return nil, err } @@ -239,7 +239,7 @@ func (d *Database) GetDatabaseTableHighestSelectivityIndex(schemaNameS, tableNam columnCollations []string datetimePrecision []string ) - for _, c := range highestBucket.IndexColumn { + for _, c := range Selectivity.IndexColumn { for _, p := range properties { if strings.EqualFold(p["COLUMN_NAME"], c) { columnProps = append(columnProps, p["DATA_TYPE"]) @@ -261,10 +261,10 @@ func (d *Database) GetDatabaseTableHighestSelectivityIndex(schemaNameS, tableNam } } - highestBucket.ColumnDatatype = columnProps - highestBucket.ColumnCollation = columnCollations - highestBucket.DatetimePrecision = datetimePrecision - return highestBucket, nil + Selectivity.ColumnDatatype = columnProps + Selectivity.ColumnCollation = columnCollations + Selectivity.DatetimePrecision = datetimePrecision + return Selectivity, nil } func (d *Database) GetDatabaseTableRandomValues(schemaNameS, tableNameS string, columns []string, conditions string, condArgs []interface{}, limit int, collations []string) ([][]string, error) { diff --git a/database/oracle/data_migrate.go b/database/oracle/data_migrate.go index 29f3483..b7058e8 100644 --- a/database/oracle/data_migrate.go +++ b/database/oracle/data_migrate.go @@ -152,13 +152,13 @@ func (d *Database) GetDatabaseDirectoryName(directory string) (string, error) { return res[0]["DIRECTORY_PATH"], nil } -func (d *Database) GetDatabaseTableChunkTask(taskName, schemaName, tableName string, chunkSize uint64, callTimeout uint64) ([]map[string]string, error) { +func (d *Database) GetDatabaseTableChunkTask(taskName, schemaName, tableName string, chunkSize uint64, callTimeout uint64, batchSize int, dataChan chan []map[string]string) error { sqlStr00 := fmt.Sprintf(`BEGIN DBMS_PARALLEL_EXECUTE.CREATE_TASK (TASK_NAME => '%s'); END;`, taskName) _, err := d.ExecContext(d.Ctx, sqlStr00) if err != nil { - return nil, fmt.Errorf("oracle DBMS_PARALLEL_EXECUTE create task failed: %v, sql: %v", err, sqlStr00) + return fmt.Errorf("oracle DBMS_PARALLEL_EXECUTE create task failed: %v, sql: %v", err, sqlStr00) } deadline := time.Now().Add(time.Duration(callTimeout) * time.Second) @@ -181,21 +181,91 @@ END;`, taskName) if err != nil { _, err = d.ExecContext(d.Ctx, sqlStr02) if err != nil { - return nil, fmt.Errorf("oracle DBMS_PARALLEL_EXECUTE create_chunks_by_rowid drop task failed: %v, sql: %v", err, sqlStr02) + return fmt.Errorf("oracle DBMS_PARALLEL_EXECUTE create_chunks_by_rowid drop task failed: %v, sql: %v", err, sqlStr02) } - return nil, fmt.Errorf("oracle DBMS_PARALLEL_EXECUTE create_chunks_by_rowid task failed: %v, sql: %v", err, sqlStr01) + return fmt.Errorf("oracle DBMS_PARALLEL_EXECUTE create_chunks_by_rowid task failed: %v, sql: %v", err, sqlStr01) } sqlStr03 := fmt.Sprintf(`SELECT 'ROWID BETWEEN ''' || START_ROWID || ''' AND ''' || END_ROWID || '''' CMD FROM DBA_PARALLEL_EXECUTE_CHUNKS WHERE TASK_NAME = '%s' ORDER BY CHUNK_ID`, taskName) - _, res, err := d.GeneralQuery(sqlStr03) + + batchRowsData := make([]map[string]string, 0, batchSize) + + qdeadline := time.Now().Add(time.Duration(d.CallTimeout) * time.Second) + + qctx, qcancel := context.WithDeadline(d.Ctx, qdeadline) + defer qcancel() + + rows, err := d.QueryContext(qctx, sqlStr03) + if err != nil { + _, err = d.ExecContext(d.Ctx, sqlStr02) + if err != nil { + return fmt.Errorf("oracle DBMS_PARALLEL_EXECUTE query_chunks_rowid drop task failed: %v, sql: %v", err, sqlStr03) + } + return err + } + defer rows.Close() + + // general query, automatic get column name + columns, err := rows.Columns() if err != nil { _, err = d.ExecContext(d.Ctx, sqlStr02) if err != nil { - return nil, fmt.Errorf("oracle DBMS_PARALLEL_EXECUTE query_chunks_rowid drop task failed: %v, sql: %v", err, sqlStr02) + return fmt.Errorf("oracle DBMS_PARALLEL_EXECUTE query_chunks_rowid drop task failed: %v, sql: %v", err, sqlStr03) } - return nil, fmt.Errorf("oracle DBMS_PARALLEL_EXECUTE query_chunks_rowid task failed: %v, sql: %v", err, sqlStr03) + return fmt.Errorf("query rows.Columns failed, sql: [%v], error: [%v]", sqlStr03, err) + } + + values := make([][]byte, len(columns)) + scans := make([]interface{}, len(columns)) + for i := range values { + scans[i] = &values[i] } - return res, nil + + for rows.Next() { + err = rows.Scan(scans...) + if err != nil { + return fmt.Errorf("query rows.Scan failed, sql: [%v], error: [%v]", sqlStr03, err) + } + + row := make(map[string]string) + for k, v := range values { + // Notes: oracle database NULL and "" + // 1, if the return value is NULLABLE, it represents the value is NULL, oracle sql query statement had be required the field NULL judgement, and if the filed is NULL, it returns that the value is NULLABLE + // 2, if the return value is nil, it represents the value is NULL + // 3, if the return value is "", it represents the value is "" string + // 4, if the return value is 'NULL' or 'null', it represents the value is NULL or null string + if v == nil { + row[columns[k]] = "NULLABLE" + } else { + // Handling empty string and other values, the return value output string + row[columns[k]] = stringutil.BytesToString(v) + } + } + + // temporary array + batchRowsData = append(batchRowsData, row) + + // batch + if len(batchRowsData) == batchSize { + dataChan <- batchRowsData + // clear + batchRowsData = make([]map[string]string, 0, batchSize) + } + } + + if err = rows.Err(); err != nil { + _, err = d.ExecContext(d.Ctx, sqlStr02) + if err != nil { + return fmt.Errorf("oracle DBMS_PARALLEL_EXECUTE query_chunks_rowid drop task failed: %v, sql: %v", err, sqlStr03) + } + return fmt.Errorf("query rows.Next failed, sql: [%v], error: [%v]", sqlStr03, err) + } + + // non-batch batch + if len(batchRowsData) > 0 { + dataChan <- batchRowsData + } + return nil } func (d *Database) GetDatabaseTableChunkData(querySQL string, queryArgs []interface{}, batchSize, callTimeout int, dbCharsetS, dbCharsetT, columnDetailO string, dataChan chan []interface{}) error { diff --git a/database/oracle/taskflow/csv_migrate.go b/database/oracle/taskflow/csv_migrate.go deleted file mode 100644 index 8077ed1..0000000 --- a/database/oracle/taskflow/csv_migrate.go +++ /dev/null @@ -1,1055 +0,0 @@ -/* -Copyright © 2020 Marvin - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package taskflow - -import ( - "context" - "fmt" - "github.com/wentaojin/dbms/database/processor" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/golang/snappy" - - "github.com/google/uuid" - "github.com/wentaojin/dbms/database" - "github.com/wentaojin/dbms/errconcurrent" - "github.com/wentaojin/dbms/logger" - "github.com/wentaojin/dbms/model" - "github.com/wentaojin/dbms/model/datasource" - "github.com/wentaojin/dbms/model/rule" - "github.com/wentaojin/dbms/model/task" - "github.com/wentaojin/dbms/proto/pb" - "github.com/wentaojin/dbms/utils/constant" - "github.com/wentaojin/dbms/utils/stringutil" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -type CsvMigrateTask struct { - Ctx context.Context - Task *task.Task - DatasourceS *datasource.Datasource - DatasourceT *datasource.Datasource - TaskParams *pb.CsvMigrateParam -} - -func (cmt *CsvMigrateTask) Start() error { - schemaTaskTime := time.Now() - logger.Info("csv migrate task get schema route", - zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow)) - schemaRoute, err := model.GetIMigrateSchemaRouteRW().GetSchemaRouteRule(cmt.Ctx, &rule.SchemaRouteRule{TaskName: cmt.Task.TaskName}) - if err != nil { - return err - } - - logger.Info("csv migrate task init database connection", - zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow)) - - sourceDatasource, err := model.GetIDatasourceRW().GetDatasource(cmt.Ctx, cmt.Task.DatasourceNameS) - if err != nil { - return err - } - databaseS, err := database.NewDatabase(cmt.Ctx, sourceDatasource, schemaRoute.SchemaNameS, int64(cmt.TaskParams.CallTimeout)) - if err != nil { - return err - } - defer databaseS.Close() - databaseT, err := database.NewDatabase(cmt.Ctx, cmt.DatasourceT, "", int64(cmt.TaskParams.CallTimeout)) - if err != nil { - return err - } - defer databaseT.Close() - - logger.Info("csv migrate task inspect migrate task", - zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow)) - _, err = processor.InspectOracleMigrateTask(cmt.Task.TaskName, cmt.Task.TaskFlow, cmt.Task.TaskMode, databaseS, stringutil.StringUpper(cmt.DatasourceS.ConnectCharset), stringutil.StringUpper(cmt.DatasourceT.ConnectCharset)) - if err != nil { - return err - } - - logger.Info("csv migrate task init task", - zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow)) - dbVersionS, err := databaseS.GetDatabaseVersion() - if err != nil { - return err - } - err = cmt.InitCsvMigrateTask(databaseS, dbVersionS, schemaRoute) - if err != nil { - return err - } - - logger.Info("csv migrate task get tables", - zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow)) - - summaries, err := model.GetIDataMigrateSummaryRW().FindDataMigrateSummary(cmt.Ctx, &task.DataMigrateSummary{ - TaskName: cmt.Task.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - }) - if err != nil { - return err - } - - for _, s := range summaries { - startTableTime := time.Now() - - statfs, err := stringutil.GetDiskUsage(cmt.TaskParams.OutputDir) - if err != nil { - return err - } - // MB - diskFactor, err := stringutil.StrconvFloatBitSize(cmt.TaskParams.DiskUsageFactor, 64) - if err != nil { - return err - } - estmTableSizeMB := s.TableSizeS * diskFactor - - totalSpace := statfs.Blocks * uint64(statfs.Bsize) / 1024 / 1024 - freeSpace := statfs.Bfree * uint64(statfs.Bsize) / 1024 / 1024 - usedSpace := totalSpace - freeSpace - - if freeSpace < uint64(estmTableSizeMB) { - logger.Warn("csv migrate task disk usage", - zap.String("task_name", cmt.Task.TaskName), - zap.String("task_mode", cmt.Task.TaskMode), - zap.String("task_flow", cmt.Task.TaskFlow), - zap.String("schema_name_s", s.SchemaNameS), - zap.String("table_name_s", s.TableNameS), - zap.String("output_dir", cmt.TaskParams.OutputDir), - zap.Uint64("disk total space(MB)", totalSpace), - zap.Uint64("disk used space(MB)", usedSpace), - zap.Uint64("disk free space(MB)", freeSpace), - zap.Uint64("estimate table space(MB)", uint64(estmTableSizeMB))) - _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(cmt.Ctx, &task.DataMigrateSummary{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - }, map[string]interface{}{ - "Refused": fmt.Sprintf("the output [%s] current disk quota isn't enough, total space(MB): [%v], used space(MB): [%v], free space(MB): [%v], estimate space(MB): [%v]", cmt.TaskParams.OutputDir, totalSpace, usedSpace, freeSpace, estmTableSizeMB), - }) - if err != nil { - return err - } - // skip - continue - } - - err = stringutil.PathNotExistOrCreate(filepath.Join( - cmt.TaskParams.OutputDir, - s.SchemaNameS, - s.TableNameS, - )) - if err != nil { - return err - } - - logger.Info("csv migrate task process table", - zap.String("task_name", cmt.Task.TaskName), - zap.String("task_mode", cmt.Task.TaskMode), - zap.String("task_flow", cmt.Task.TaskFlow), - zap.String("schema_name_s", s.SchemaNameS), - zap.String("table_name_s", s.TableNameS), - zap.String("output_dir", cmt.TaskParams.OutputDir), - zap.Uint64("disk total space(MB)", totalSpace), - zap.Uint64("disk used space(MB)", usedSpace), - zap.Uint64("disk free space(MB)", freeSpace), - zap.Uint64("estimate table space(MB)", uint64(estmTableSizeMB))) - - var migrateTasks []*task.DataMigrateTask - err = model.Transaction(cmt.Ctx, func(txnCtx context.Context) error { - // get migrate task tables - migrateTasks, err = model.GetIDataMigrateTaskRW().FindDataMigrateTask(txnCtx, - &task.DataMigrateTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - if err != nil { - return err - } - migrateFailedTasks, err := model.GetIDataMigrateTaskRW().FindDataMigrateTask(txnCtx, - &task.DataMigrateTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusFailed}) - if err != nil { - return err - } - migrateRunningTasks, err := model.GetIDataMigrateTaskRW().FindDataMigrateTask(txnCtx, - &task.DataMigrateTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusRunning}) - if err != nil { - return err - } - migrateStopTasks, err := model.GetIDataMigrateTaskRW().FindDataMigrateTask(txnCtx, - &task.DataMigrateTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusStopped}) - if err != nil { - return err - } - migrateTasks = append(migrateTasks, migrateFailedTasks...) - migrateTasks = append(migrateTasks, migrateRunningTasks...) - migrateTasks = append(migrateTasks, migrateStopTasks...) - return nil - }) - if err != nil { - return err - } - - logger.Info("csv migrate task process chunks", - zap.String("task_name", cmt.Task.TaskName), - zap.String("task_mode", cmt.Task.TaskMode), - zap.String("task_flow", cmt.Task.TaskFlow), - zap.String("schema_name_s", s.SchemaNameS), - zap.String("table_name_s", s.TableNameS)) - g := errconcurrent.NewGroup() - g.SetLimit(int(cmt.TaskParams.SqlThreadS)) - for _, j := range migrateTasks { - gTime := time.Now() - g.Go(j, gTime, func(j interface{}) error { - dt := j.(*task.DataMigrateTask) - errW := model.Transaction(cmt.Ctx, func(txnCtx context.Context) error { - _, err = model.GetIDataMigrateTaskRW().UpdateDataMigrateTask(txnCtx, - &task.DataMigrateTask{TaskName: dt.TaskName, SchemaNameS: dt.SchemaNameS, TableNameS: dt.TableNameS, ChunkID: dt.ChunkID}, - map[string]interface{}{ - "TaskStatus": constant.TaskDatabaseStatusRunning, - }) - if err != nil { - return err - } - _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ - TaskName: dt.TaskName, - SchemaNameS: dt.SchemaNameS, - TableNameS: dt.TableNameS, - LogDetail: fmt.Sprintf("%v [%v] csv migrate task [%v] taskflow [%v] source table [%v.%v] chunk [%s] start", - stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeCSVMigrate), - dt.TaskName, - cmt.Task.TaskMode, - dt.SchemaNameS, - dt.TableNameS, - dt.ChunkDetailS), - }) - if err != nil { - return err - } - return nil - }) - if errW != nil { - return errW - } - - err = database.IDataMigrateProcess(&processor.CsvMigrateRow{ - Ctx: cmt.Ctx, - TaskMode: cmt.Task.TaskMode, - TaskFlow: cmt.Task.TaskFlow, - BufioSize: constant.DefaultMigrateTaskBufferIOSize, - Dmt: dt, - DatabaseS: databaseS, - DBCharsetS: constant.MigrateOracleCharsetStringConvertMapping[stringutil.StringUpper(cmt.DatasourceS.ConnectCharset)], - DBCharsetT: stringutil.StringUpper(cmt.DatasourceT.ConnectCharset), - TaskParams: cmt.TaskParams, - ReadChan: make(chan []string, constant.DefaultMigrateTaskQueueSize), - WriteChan: make(chan string, constant.DefaultMigrateTaskQueueSize), - }) - if err != nil { - return err - } - - errW = model.Transaction(cmt.Ctx, func(txnCtx context.Context) error { - _, err = model.GetIDataMigrateTaskRW().UpdateDataMigrateTask(txnCtx, - &task.DataMigrateTask{TaskName: dt.TaskName, SchemaNameS: dt.SchemaNameS, TableNameS: dt.TableNameS, ChunkID: dt.ChunkID}, - map[string]interface{}{ - "TaskStatus": constant.TaskDatabaseStatusSuccess, - "Duration": fmt.Sprintf("%f", time.Now().Sub(gTime).Seconds()), - }) - if err != nil { - return err - } - _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ - TaskName: dt.TaskName, - SchemaNameS: dt.SchemaNameS, - TableNameS: dt.TableNameS, - LogDetail: fmt.Sprintf("%v [%v] csv migrate task [%v] taskflow [%v] source table [%v.%v] chunk [%s] success", - stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeCSVMigrate), - dt.TaskName, - cmt.Task.TaskMode, - dt.SchemaNameS, - dt.TableNameS, - dt.ChunkDetailS), - }) - if err != nil { - return err - } - return nil - }) - if errW != nil { - return errW - } - return nil - }) - } - - for _, r := range g.Wait() { - if r.Err != nil { - smt := r.Task.(*task.DataMigrateTask) - logger.Warn("csv migrate task process tables", - zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow), - zap.String("schema_name_s", smt.SchemaNameS), - zap.String("table_name_s", smt.TableNameS), - zap.Error(r.Err)) - - errW := model.Transaction(cmt.Ctx, func(txnCtx context.Context) error { - _, err = model.GetIDataMigrateTaskRW().UpdateDataMigrateTask(txnCtx, - &task.DataMigrateTask{TaskName: smt.TaskName, SchemaNameS: smt.SchemaNameS, TableNameS: smt.TableNameS, ChunkID: smt.ChunkID}, - map[string]interface{}{ - "TaskStatus": constant.TaskDatabaseStatusFailed, - "Duration": fmt.Sprintf("%f", time.Now().Sub(r.Time).Seconds()), - "ErrorDetail": r.Err.Error(), - }) - if err != nil { - return err - } - _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ - TaskName: smt.TaskName, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - LogDetail: fmt.Sprintf("%v [%v] csv migrate task [%v] taskflow [%v] source table [%v.%v] failed, please see [data_migrate_task] detail", - stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeStmtMigrate), - smt.TaskName, - cmt.Task.TaskMode, - smt.SchemaNameS, - smt.TableNameS), - }) - if err != nil { - return err - } - return nil - }) - if errW != nil { - return errW - } - } - } - - endTableTime := time.Now() - err = model.Transaction(cmt.Ctx, func(txnCtx context.Context) error { - tableStatusRecs, err := model.GetIDataMigrateTaskRW().FindDataMigrateTaskBySchemaTableChunkStatus(txnCtx, &task.DataMigrateTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - }) - if err != nil { - return err - } - for _, rec := range tableStatusRecs { - switch rec.TaskStatus { - case constant.TaskDatabaseStatusSuccess: - _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkSuccess": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusFailed: - _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkFails": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusWaiting: - _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkWaits": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusRunning: - _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkRuns": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusStopped: - _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkStops": rec.StatusTotals, - }) - if err != nil { - return err - } - default: - return fmt.Errorf("the task [%v] task_mode [%s] task_flow [%v] schema_name_s [%v] table_name_s [%v] task_status [%v] panic, please contact auhtor or reselect", s.TaskName, cmt.Task.TaskMode, cmt.Task.TaskFlow, rec.SchemaNameS, rec.TableNameS, rec.TaskStatus) - } - } - - _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - }, map[string]interface{}{ - "Refused": "", // reset - "Duration": fmt.Sprintf("%f", time.Now().Sub(startTableTime).Seconds()), - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - logger.Info("csv migrate task process table", - zap.String("task_name", cmt.Task.TaskName), - zap.String("task_mode", cmt.Task.TaskMode), - zap.String("task_flow", cmt.Task.TaskFlow), - zap.String("schema_name_s", s.SchemaNameS), - zap.String("table_name_s", s.TableNameS), - zap.String("cost", endTableTime.Sub(startTableTime).String())) - } - logger.Info("csv migrate task", - zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow), - zap.String("cost", time.Now().Sub(schemaTaskTime).String())) - return nil -} - -func (cmt *CsvMigrateTask) InitCsvMigrateTask(databaseS database.IDatabase, dbVersion string, schemaRoute *rule.SchemaRouteRule) error { - // delete checkpoint - initFlags, err := model.GetITaskRW().GetTask(cmt.Ctx, &task.Task{TaskName: cmt.Task.TaskName}) - if err != nil { - return err - } - if !cmt.TaskParams.EnableCheckpoint || strings.EqualFold(initFlags.TaskInit, constant.TaskInitStatusNotFinished) { - err := model.GetIDataMigrateSummaryRW().DeleteDataMigrateSummaryName(cmt.Ctx, []string{schemaRoute.TaskName}) - if err != nil { - return err - } - err = model.GetIDataMigrateTaskRW().DeleteDataMigrateTaskName(cmt.Ctx, []string{schemaRoute.TaskName}) - if err != nil { - return err - } - } else if cmt.TaskParams.EnableCheckpoint && strings.EqualFold(initFlags.TaskInit, constant.TaskInitStatusFinished) { - logger.Warn("csv migrate task init skip", - zap.String("task_name", cmt.Task.TaskName), - zap.String("task_mode", cmt.Task.TaskMode), - zap.String("task_flow", cmt.Task.TaskFlow), - zap.String("task_init", constant.TaskInitStatusFinished)) - return nil - } - - dbRole, err := databaseS.GetDatabaseRole() - if err != nil { - return err - } - - // filter database table - schemaTaskTables, err := model.GetIMigrateTaskTableRW().FindMigrateTaskTable(cmt.Ctx, &rule.MigrateTaskTable{ - TaskName: schemaRoute.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - }) - if err != nil { - return err - } - var ( - includeTables []string - excludeTables []string - databaseTaskTables []string // task tables - globalScn string - ) - databaseTableTypeMap := make(map[string]string) - - for _, t := range schemaTaskTables { - if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsExclude) { - excludeTables = append(excludeTables, t.TableNameS) - } - if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsNotExclude) { - includeTables = append(includeTables, t.TableNameS) - } - } - - tableObjs, err := databaseS.FilterDatabaseTable(schemaRoute.SchemaNameS, includeTables, excludeTables) - if err != nil { - return err - } - - // rule case field - for _, t := range tableObjs.TaskTables { - var tabName string - // the according target case field rule convert - if strings.EqualFold(cmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleLower) { - tabName = stringutil.StringLower(t) - } - if strings.EqualFold(cmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleUpper) { - tabName = stringutil.StringUpper(t) - } - if strings.EqualFold(cmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleOrigin) { - tabName = t - } - databaseTaskTables = append(databaseTaskTables, tabName) - } - - // clear the csv migrate task table - migrateGroupTasks, err := model.GetIDataMigrateTaskRW().FindDataMigrateTaskGroupByTaskSchemaTable(cmt.Ctx, cmt.Task.TaskName) - if err != nil { - return err - } - repeatInitTableMap := make(map[string]struct{}) - - if len(migrateGroupTasks) > 0 { - taskTablesMap := make(map[string]struct{}) - for _, t := range databaseTaskTables { - taskTablesMap[t] = struct{}{} - } - for _, smt := range migrateGroupTasks { - if smt.SchemaNameS == schemaRoute.SchemaNameS { - if _, ok := taskTablesMap[smt.TableNameS]; !ok { - err = model.Transaction(cmt.Ctx, func(txnCtx context.Context) error { - err = model.GetIDataMigrateSummaryRW().DeleteDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: smt.TaskName, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - }) - if err != nil { - return err - } - err = model.GetIDataMigrateTaskRW().DeleteDataMigrateTask(txnCtx, &task.DataMigrateTask{ - TaskName: smt.TaskName, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - continue - } - var summary *task.DataMigrateSummary - - summary, err = model.GetIDataMigrateSummaryRW().GetDataMigrateSummary(cmt.Ctx, &task.DataMigrateSummary{ - TaskName: smt.TaskName, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - }) - if err != nil { - return err - } - - if int64(summary.ChunkTotals) != smt.ChunkTotals { - err = model.Transaction(cmt.Ctx, func(txnCtx context.Context) error { - err = model.GetIDataMigrateSummaryRW().DeleteDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: smt.TaskName, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - }) - if err != nil { - return err - } - err = model.GetIDataMigrateTaskRW().DeleteDataMigrateTask(txnCtx, &task.DataMigrateTask{ - TaskName: smt.TaskName, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - continue - } - - repeatInitTableMap[smt.TableNameS] = struct{}{} - } - } - } - - databaseTableTypeMap, err = databaseS.GetDatabaseTableType(schemaRoute.SchemaNameS) - if err != nil { - return err - } - - globalScnS, err := databaseS.GetDatabaseConsistentPos() - if err != nil { - return err - } - globalScn = strconv.FormatUint(globalScnS, 10) - - // database tables - // init database table - dbTypeSli := stringutil.StringSplit(cmt.Task.TaskFlow, constant.StringSeparatorAite) - dbTypeS := dbTypeSli[0] - - logger.Info("csv migrate task init", - zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow)) - - g, gCtx := errgroup.WithContext(cmt.Ctx) - g.SetLimit(int(cmt.TaskParams.TableThread)) - - for _, taskJob := range databaseTaskTables { - sourceTable := taskJob - g.Go(func() error { - select { - case <-gCtx.Done(): - return gCtx.Err() - default: - startTime := time.Now() - if _, ok := repeatInitTableMap[sourceTable]; ok { - // skip - return nil - } - - tableRows, err := databaseS.GetDatabaseTableRows(schemaRoute.SchemaNameS, sourceTable) - if err != nil { - return err - } - tableSize, err := databaseS.GetDatabaseTableSize(schemaRoute.SchemaNameS, sourceTable) - if err != nil { - return err - } - - dataRule := &processor.DataMigrateRule{ - Ctx: gCtx, - TaskMode: cmt.Task.TaskMode, - TaskName: cmt.Task.TaskName, - TaskFlow: cmt.Task.TaskFlow, - DatabaseS: databaseS, - SchemaNameS: schemaRoute.SchemaNameS, - TableNameS: sourceTable, - TableTypeS: databaseTableTypeMap, - DBCharsetS: cmt.DatasourceS.ConnectCharset, - CaseFieldRuleS: cmt.Task.CaseFieldRuleS, - CaseFieldRuleT: cmt.Task.CaseFieldRuleT, - GlobalSqlHintS: cmt.TaskParams.SqlHintS, - } - - attsRule, err := database.IDataMigrateAttributesRule(dataRule) - if err != nil { - return err - } - - // only where range - if !attsRule.EnableChunkStrategy && !strings.EqualFold(attsRule.WhereRange, "") { - encChunkS := snappy.Encode(nil, []byte(attsRule.WhereRange)) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - err = model.Transaction(gCtx, func(txnCtx context.Context) error { - _, err = model.GetIDataMigrateTaskRW().CreateDataMigrateTask(txnCtx, &task.DataMigrateTask{ - TaskName: cmt.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScn, - ColumnDetailO: attsRule.ColumnDetailO, - ColumnDetailS: attsRule.ColumnDetailS, - ColumnDetailT: attsRule.ColumnDetailT, - SqlHintS: attsRule.SqlHintS, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: "", - ConsistentReadS: strconv.FormatBool(cmt.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - CsvFile: filepath.Join(cmt.TaskParams.OutputDir, attsRule.SchemaNameS, attsRule.TableNameS, - stringutil.StringBuilder(attsRule.SchemaNameT, `.`, attsRule.TableNameT, `.0.csv`)), - }) - if err != nil { - return err - } - _, err = model.GetIDataMigrateSummaryRW().CreateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: cmt.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - SnapshotPointS: globalScn, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: 1, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - return nil - } - - var whereRange string - // statistic - if !strings.EqualFold(dbRole, constant.OracleDatabasePrimaryRole) || (strings.EqualFold(dbRole, constant.OracleDatabasePrimaryRole) && stringutil.VersionOrdinal(dbVersion) < stringutil.VersionOrdinal(constant.OracleDatabaseTableMigrateRowidRequireVersion)) { - upstreamConsIndexColumns, err := databaseS.GetDatabaseTableHighestSelectivityIndex(attsRule.SchemaNameS, attsRule.TableNameS, "", nil) - if err != nil { - return err - } - // upstream bucket ranges - _, upstreamBuckets, err := processor.ProcessUpstreamDatabaseTableColumnStatisticsBucket( - dbTypeS, - stringutil.StringUpper(cmt.DatasourceS.ConnectCharset), - cmt.Task.CaseFieldRuleS, databaseS, attsRule.SchemaNameS, - attsRule.TableNameS, - upstreamConsIndexColumns, - int64(cmt.TaskParams.ChunkSize), - false) - if err != nil { - return err - } - if len(upstreamBuckets) == 0 { - logger.Warn("csv migrate task table", - zap.String("task_name", cmt.Task.TaskName), - zap.String("task_mode", cmt.Task.TaskMode), - zap.String("task_flow", cmt.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.String("database_version", dbVersion), - zap.String("database_role", dbRole), - zap.String("migrate_method", "scan")) - switch { - case attsRule.EnableChunkStrategy && !strings.EqualFold(attsRule.WhereRange, ""): - whereRange = stringutil.StringBuilder(`1 = 1 AND `, attsRule.WhereRange) - default: - whereRange = `1 = 1` - } - - encChunkS := snappy.Encode(nil, []byte(whereRange)) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - - err = model.Transaction(gCtx, func(txnCtx context.Context) error { - _, err = model.GetIDataMigrateTaskRW().CreateDataMigrateTask(txnCtx, &task.DataMigrateTask{ - TaskName: cmt.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScn, - ColumnDetailO: attsRule.ColumnDetailO, - ColumnDetailS: attsRule.ColumnDetailS, - ColumnDetailT: attsRule.ColumnDetailT, - SqlHintS: attsRule.SqlHintS, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: "", - ConsistentReadS: strconv.FormatBool(cmt.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - CsvFile: filepath.Join(cmt.TaskParams.OutputDir, attsRule.SchemaNameS, attsRule.TableNameS, - stringutil.StringBuilder(attsRule.SchemaNameT, `.`, attsRule.TableNameT, `.0.csv`)), - }) - if err != nil { - return err - } - _, err = model.GetIDataMigrateSummaryRW().CreateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: cmt.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - SnapshotPointS: globalScn, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: 1, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - return nil - } - - logger.Warn("csv migrate task table", - zap.String("task_name", cmt.Task.TaskName), - zap.String("task_mode", cmt.Task.TaskMode), - zap.String("task_flow", cmt.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.String("database_version", dbVersion), - zap.String("database_role", dbRole), - zap.String("migrate_method", "statistic")) - - var metas []*task.DataMigrateTask - for idx, r := range upstreamBuckets { - toStringS, toStringSArgs := r.ToString() - var argsS string - if toStringSArgs != nil { - argsS, err = stringutil.MarshalJSON(toStringSArgs) - if err != nil { - return err - } - } - switch { - case attsRule.EnableChunkStrategy && !strings.EqualFold(attsRule.WhereRange, ""): - whereRange = stringutil.StringBuilder(`((`, toStringS, `) AND (`, attsRule.WhereRange, `))`) - default: - whereRange = toStringS - } - - encChunkS := snappy.Encode(nil, []byte(whereRange)) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - metas = append(metas, &task.DataMigrateTask{ - TaskName: cmt.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScn, - ColumnDetailO: attsRule.ColumnDetailO, - ColumnDetailS: attsRule.ColumnDetailS, - ColumnDetailT: attsRule.ColumnDetailT, - SqlHintS: attsRule.SqlHintS, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: argsS, - ConsistentReadS: strconv.FormatBool(cmt.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - CsvFile: filepath.Join(cmt.TaskParams.OutputDir, attsRule.SchemaNameS, attsRule.TableNameS, - stringutil.StringBuilder(attsRule.SchemaNameT, `.`, attsRule.TableNameT, `.`, strconv.Itoa(idx), `.csv`)), - }) - } - - err = model.GetIDataMigrateTaskRW().CreateInBatchDataMigrateTask(gCtx, metas, int(cmt.TaskParams.WriteThread), int(cmt.TaskParams.BatchSize)) - if err != nil { - return err - } - _, err = model.GetIDataMigrateSummaryRW().CreateDataMigrateSummary(gCtx, &task.DataMigrateSummary{ - TaskName: cmt.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - SnapshotPointS: globalScn, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: uint64(len(upstreamBuckets)), - }) - if err != nil { - return err - } - return nil - } - - chunkTask := uuid.New().String() - chunks, err := databaseS.GetDatabaseTableChunkTask(chunkTask, schemaRoute.SchemaNameS, sourceTable, cmt.TaskParams.ChunkSize, cmt.TaskParams.CallTimeout) - if err != nil { - return err - } - - if len(chunks) == 0 { - switch { - case attsRule.EnableChunkStrategy && !strings.EqualFold(attsRule.WhereRange, ""): - whereRange = stringutil.StringBuilder(`1 = 1 AND `, attsRule.WhereRange) - default: - whereRange = `1 = 1` - } - - encChunkS := snappy.Encode(nil, []byte(whereRange)) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - - err = model.Transaction(gCtx, func(txnCtx context.Context) error { - _, err = model.GetIDataMigrateTaskRW().CreateDataMigrateTask(txnCtx, &task.DataMigrateTask{ - TaskName: cmt.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScn, - ColumnDetailO: attsRule.ColumnDetailO, - ColumnDetailS: attsRule.ColumnDetailS, - ColumnDetailT: attsRule.ColumnDetailT, - SqlHintS: attsRule.SqlHintS, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: "", - ConsistentReadS: strconv.FormatBool(cmt.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - CsvFile: filepath.Join(cmt.TaskParams.OutputDir, attsRule.SchemaNameS, attsRule.TableNameS, - stringutil.StringBuilder(attsRule.SchemaNameT, `.`, attsRule.TableNameT, `.0.csv`)), - }) - if err != nil { - return err - } - _, err = model.GetIDataMigrateSummaryRW().CreateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: cmt.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - SnapshotPointS: globalScn, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: 1, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - return nil - } - - var metas []*task.DataMigrateTask - for i, r := range chunks { - csvFile := filepath.Join(cmt.TaskParams.OutputDir, attsRule.SchemaNameS, attsRule.TableNameS, - stringutil.StringBuilder(attsRule.SchemaNameT, `.`, attsRule.TableNameT, `.`, strconv.Itoa(i), `.csv`)) - switch { - case attsRule.EnableChunkStrategy && !strings.EqualFold(attsRule.WhereRange, ""): - whereRange = stringutil.StringBuilder(r["CMD"], ` AND `, attsRule.WhereRange) - default: - whereRange = r["CMD"] - } - - encChunkS := snappy.Encode(nil, []byte(whereRange)) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - - metas = append(metas, &task.DataMigrateTask{ - TaskName: cmt.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScn, - ColumnDetailO: attsRule.ColumnDetailO, - ColumnDetailS: attsRule.ColumnDetailS, - ColumnDetailT: attsRule.ColumnDetailT, - SqlHintS: attsRule.SqlHintS, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: "", - ConsistentReadS: strconv.FormatBool(cmt.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - CsvFile: csvFile, - }) - } - - err = model.GetIDataMigrateTaskRW().CreateInBatchDataMigrateTask(gCtx, metas, int(cmt.TaskParams.WriteThread), int(cmt.TaskParams.BatchSize)) - if err != nil { - return err - } - _, err = model.GetIDataMigrateSummaryRW().CreateDataMigrateSummary(gCtx, &task.DataMigrateSummary{ - TaskName: cmt.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - SnapshotPointS: globalScn, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: uint64(len(chunks)), - }) - if err != nil { - return err - } - - logger.Info("csv migrate task init", - zap.String("task_name", cmt.Task.TaskName), - zap.String("task_mode", cmt.Task.TaskMode), - zap.String("task_flow", cmt.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.String("cost", time.Now().Sub(startTime).String())) - return nil - } - }) - } - - if err = g.Wait(); err != nil { - logger.Error("csv migrate task init", - zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow), - zap.String("schema_name_s", schemaRoute.SchemaNameS), - zap.Error(err)) - return err - } - - _, err = model.GetITaskRW().UpdateTask(cmt.Ctx, &task.Task{TaskName: cmt.Task.TaskName}, map[string]interface{}{"TaskInit": constant.TaskInitStatusFinished}) - if err != nil { - return err - } - return nil -} diff --git a/database/oracle/taskflow/data_scan.go b/database/oracle/taskflow/data_scan.go deleted file mode 100644 index 94bfb1d..0000000 --- a/database/oracle/taskflow/data_scan.go +++ /dev/null @@ -1,939 +0,0 @@ -/* -Copyright © 2020 Marvin - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package taskflow - -import ( - "context" - "fmt" - "github.com/golang/snappy" - "github.com/google/uuid" - "github.com/wentaojin/dbms/database" - "github.com/wentaojin/dbms/database/processor" - "github.com/wentaojin/dbms/errconcurrent" - "github.com/wentaojin/dbms/logger" - "github.com/wentaojin/dbms/model" - "github.com/wentaojin/dbms/model/datasource" - "github.com/wentaojin/dbms/model/rule" - "github.com/wentaojin/dbms/model/task" - "github.com/wentaojin/dbms/proto/pb" - "github.com/wentaojin/dbms/utils/constant" - "github.com/wentaojin/dbms/utils/stringutil" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" - "strconv" - "strings" - "time" -) - -type DataScanTask struct { - Ctx context.Context - Task *task.Task - DatasourceS *datasource.Datasource - DatasourceT *datasource.Datasource - TaskParams *pb.DataScanParam -} - -func (dst *DataScanTask) Start() error { - schemaTaskTime := time.Now() - logger.Info("data scan task get schema route", - zap.String("task_name", dst.Task.TaskName), zap.String("task_mode", dst.Task.TaskMode), zap.String("task_flow", dst.Task.TaskFlow)) - schemaNameRoute, err := model.GetIMigrateSchemaRouteRW().GetSchemaRouteRule(dst.Ctx, &rule.SchemaRouteRule{TaskName: dst.Task.TaskName}) - if err != nil { - return err - } - schemaNameS := schemaNameRoute.SchemaNameS - - logger.Info("data scan task init database connection", - zap.String("task_name", dst.Task.TaskName), zap.String("task_mode", dst.Task.TaskMode), zap.String("task_flow", dst.Task.TaskFlow)) - - datasourceS, err := model.GetIDatasourceRW().GetDatasource(dst.Ctx, dst.Task.DatasourceNameS) - if err != nil { - return err - } - databaseS, err := database.NewDatabase(dst.Ctx, datasourceS, schemaNameS, int64(dst.TaskParams.CallTimeout)) - if err != nil { - return err - } - defer databaseS.Close() - - logger.Info("data scan task inspect migrate task", - zap.String("task_name", dst.Task.TaskName), zap.String("task_mode", dst.Task.TaskMode), zap.String("task_flow", dst.Task.TaskFlow)) - - _, err = processor.InspectOracleMigrateTask(dst.Task.TaskName, dst.Task.TaskFlow, dst.Task.TaskMode, databaseS, stringutil.StringUpper(dst.DatasourceS.ConnectCharset), stringutil.StringUpper(dst.DatasourceT.ConnectCharset)) - if err != nil { - return err - } - - logger.Info("data scan task init task", - zap.String("task_name", dst.Task.TaskName), zap.String("task_mode", dst.Task.TaskMode), zap.String("task_flow", dst.Task.TaskFlow)) - dbVersionS, err := databaseS.GetDatabaseVersion() - if err != nil { - return err - } - err = dst.initDataScanTask(databaseS, dbVersionS, schemaNameS) - if err != nil { - return err - } - - logger.Info("data scan task get tables", - zap.String("task_name", dst.Task.TaskName), zap.String("task_mode", dst.Task.TaskMode), zap.String("task_flow", dst.Task.TaskFlow)) - - summaries, err := model.GetIDataScanSummaryRW().FindDataScanSummary(dst.Ctx, &task.DataScanSummary{ - TaskName: dst.Task.TaskName, - SchemaNameS: schemaNameS, - }) - if err != nil { - return err - } - - for _, s := range summaries { - startTableTime := time.Now() - logger.Info("data scan task process table", - zap.String("task_name", dst.Task.TaskName), - zap.String("task_mode", dst.Task.TaskMode), - zap.String("task_flow", dst.Task.TaskFlow), - zap.String("schema_name_s", s.SchemaNameS), - zap.String("table_name_s", s.TableNameS)) - - var migrateTasks []*task.DataScanTask - err = model.Transaction(dst.Ctx, func(txnCtx context.Context) error { - // get migrate task tables - migrateTasks, err = model.GetIDataScanTaskRW().FindDataScanTask(txnCtx, - &task.DataScanTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - if err != nil { - return err - } - migrateFailedTasks, err := model.GetIDataScanTaskRW().FindDataScanTask(txnCtx, - &task.DataScanTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusFailed}) - if err != nil { - return err - } - migrateRunningTasks, err := model.GetIDataScanTaskRW().FindDataScanTask(txnCtx, - &task.DataScanTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusRunning}) - if err != nil { - return err - } - migrateStopTasks, err := model.GetIDataScanTaskRW().FindDataScanTask(txnCtx, - &task.DataScanTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusStopped}) - if err != nil { - return err - } - migrateTasks = append(migrateTasks, migrateFailedTasks...) - migrateTasks = append(migrateTasks, migrateRunningTasks...) - migrateTasks = append(migrateTasks, migrateStopTasks...) - return nil - }) - if err != nil { - return err - } - - logger.Info("data scan task process chunks", - zap.String("task_name", dst.Task.TaskName), - zap.String("task_mode", dst.Task.TaskMode), - zap.String("task_flow", dst.Task.TaskFlow), - zap.String("schema_name_s", s.SchemaNameS), - zap.String("table_name_s", s.TableNameS)) - - g := errconcurrent.NewGroup() - g.SetLimit(int(dst.TaskParams.SqlThreadS)) - for _, j := range migrateTasks { - gTime := time.Now() - g.Go(j, gTime, func(j interface{}) error { - dt := j.(*task.DataScanTask) - errW := model.Transaction(dst.Ctx, func(txnCtx context.Context) error { - _, err = model.GetIDataScanTaskRW().UpdateDataScanTask(txnCtx, - &task.DataScanTask{TaskName: dt.TaskName, SchemaNameS: dt.SchemaNameS, TableNameS: dt.TableNameS, ChunkID: dt.ChunkID}, - map[string]interface{}{ - "TaskStatus": constant.TaskDatabaseStatusRunning, - }) - if err != nil { - return err - } - _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ - TaskName: dt.TaskName, - SchemaNameS: dt.SchemaNameS, - TableNameS: dt.TableNameS, - LogDetail: fmt.Sprintf("%v [%v] data scan task [%v] taskflow [%v] source table [%v.%v] chunk [%s] start", - stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeDataScan), - dt.TaskName, - dst.Task.TaskMode, - dt.SchemaNameS, - dt.TableNameS, - dt.ChunkDetailS), - }) - if err != nil { - return err - } - return nil - }) - if errW != nil { - return errW - } - - err = database.IDataScanProcess(&processor.DataScanRow{ - Ctx: dst.Ctx, - StartTime: gTime, - TaskName: dt.TaskName, - TaskMode: dst.Task.TaskMode, - TaskFlow: dst.Task.TaskFlow, - Dst: dt, - DatabaseS: databaseS, - DBCharsetS: stringutil.StringUpper(dst.DatasourceS.ConnectCharset), - }) - if err != nil { - return err - } - return nil - }) - } - - for _, r := range g.Wait() { - if r.Err != nil { - mt := r.Task.(*task.DataScanTask) - logger.Warn("data scan task process tables", - zap.String("task_name", mt.TaskName), zap.String("task_mode", dst.Task.TaskMode), zap.String("task_flow", dst.Task.TaskFlow), - zap.String("schema_name_s", mt.SchemaNameS), - zap.String("table_name_s", mt.TableNameS), - zap.Error(r.Err)) - - errW := model.Transaction(dst.Ctx, func(txnCtx context.Context) error { - _, err = model.GetIDataScanTaskRW().UpdateDataScanTask(txnCtx, - &task.DataScanTask{TaskName: mt.TaskName, SchemaNameS: mt.SchemaNameS, TableNameS: mt.TableNameS, ChunkID: mt.ChunkID}, - map[string]interface{}{ - "TaskStatus": constant.TaskDatabaseStatusFailed, - "Duration": fmt.Sprintf("%f", time.Now().Sub(r.Time).Seconds()), - "ErrorDetail": r.Err.Error(), - }) - if err != nil { - return err - } - _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ - TaskName: mt.TaskName, - SchemaNameS: mt.SchemaNameS, - TableNameS: mt.TableNameS, - LogDetail: fmt.Sprintf("%v [%v] data scan task [%v] taskflow [%v] source table [%v.%v] failed, please see [data_migrate_task] detail", - stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeDataScan), - mt.TaskName, - dst.Task.TaskMode, - mt.SchemaNameS, - mt.TableNameS), - }) - if err != nil { - return err - } - return nil - }) - if errW != nil { - return errW - } - } - } - - endTableTime := time.Now() - err = model.Transaction(dst.Ctx, func(txnCtx context.Context) error { - tableStatusRecs, err := model.GetIDataScanTaskRW().FindDataScanTaskBySchemaTableChunkStatus(txnCtx, &task.DataScanTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - }) - if err != nil { - return err - } - for _, rec := range tableStatusRecs { - switch rec.TaskStatus { - case constant.TaskDatabaseStatusSuccess: - _, err = model.GetIDataScanSummaryRW().UpdateDataScanSummary(txnCtx, &task.DataScanSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkSuccess": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusFailed: - _, err = model.GetIDataScanSummaryRW().UpdateDataScanSummary(txnCtx, &task.DataScanSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkFails": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusWaiting: - _, err = model.GetIDataScanSummaryRW().UpdateDataScanSummary(txnCtx, &task.DataScanSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkWaits": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusRunning: - _, err = model.GetIDataScanSummaryRW().UpdateDataScanSummary(txnCtx, &task.DataScanSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkRuns": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusStopped: - _, err = model.GetIDataScanSummaryRW().UpdateDataScanSummary(txnCtx, &task.DataScanSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkStops": rec.StatusTotals, - }) - if err != nil { - return err - } - default: - return fmt.Errorf("the task [%v] task_mode [%s] task_flow [%v] schema_name_s [%v] table_name_s [%v] task_status [%v] panic, please contact auhtor or reselect", s.TaskName, dst.Task.TaskMode, dst.Task.TaskFlow, rec.SchemaNameS, rec.TableNameS, rec.TaskStatus) - } - } - - _, err = model.GetIDataScanSummaryRW().UpdateDataScanSummary(txnCtx, &task.DataScanSummary{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - }, map[string]interface{}{ - "Duration": fmt.Sprintf("%f", time.Now().Sub(startTableTime).Seconds()), - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - logger.Info("data scan task process table", - zap.String("task_name", dst.Task.TaskName), - zap.String("task_mode", dst.Task.TaskMode), - zap.String("task_flow", dst.Task.TaskFlow), - zap.String("schema_name_s", s.SchemaNameS), - zap.String("table_name_s", s.TableNameS), - zap.String("cost", endTableTime.Sub(startTableTime).String())) - } - logger.Info("data scan task", - zap.String("task_name", dst.Task.TaskName), - zap.String("task_mode", dst.Task.TaskMode), - zap.String("task_flow", dst.Task.TaskFlow), - zap.String("cost", time.Now().Sub(schemaTaskTime).String())) - return nil -} - -func (dst *DataScanTask) initDataScanTask(databaseS database.IDatabase, dbVersion string, schemaNameS string) error { - // delete checkpoint - initFlags, err := model.GetITaskRW().GetTask(dst.Ctx, &task.Task{TaskName: dst.Task.TaskName}) - if err != nil { - return err - } - if !dst.TaskParams.EnableCheckpoint || strings.EqualFold(initFlags.TaskInit, constant.TaskInitStatusNotFinished) { - err := model.GetIDataScanTaskRW().DeleteDataScanTaskName(dst.Ctx, []string{dst.Task.TaskName}) - if err != nil { - return err - } - err = model.GetIDataScanSummaryRW().DeleteDataScanSummaryName(dst.Ctx, []string{dst.Task.TaskName}) - if err != nil { - return err - } - } else if dst.TaskParams.EnableCheckpoint && strings.EqualFold(initFlags.TaskInit, constant.TaskInitStatusFinished) { - logger.Warn("data scan task init skip", - zap.String("task_name", dst.Task.TaskName), - zap.String("task_mode", dst.Task.TaskMode), - zap.String("task_flow", dst.Task.TaskFlow), - zap.String("task_init", constant.TaskInitStatusFinished)) - return nil - } - - dbRole, err := databaseS.GetDatabaseRole() - if err != nil { - return err - } - // filter database table - schemaTaskTables, err := model.GetIMigrateTaskTableRW().FindMigrateTaskTable(dst.Ctx, &rule.MigrateTaskTable{ - TaskName: dst.Task.TaskName, - SchemaNameS: schemaNameS, - }) - if err != nil { - return err - } - var ( - includeTables []string - excludeTables []string - databaseTaskTables []string // task tables - globalScn string - ) - databaseTableTypeMap := make(map[string]string) - - for _, t := range schemaTaskTables { - if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsExclude) { - excludeTables = append(excludeTables, t.TableNameS) - } - if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsNotExclude) { - includeTables = append(includeTables, t.TableNameS) - } - } - - tableObjs, err := databaseS.FilterDatabaseTable(schemaNameS, includeTables, excludeTables) - if err != nil { - return err - } - - // rule case field - for _, t := range tableObjs.TaskTables { - var tabName string - // the according target case field rule convert - if strings.EqualFold(dst.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleLower) { - tabName = stringutil.StringLower(t) - } - if strings.EqualFold(dst.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleUpper) { - tabName = stringutil.StringUpper(t) - } - if strings.EqualFold(dst.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleOrigin) { - tabName = t - } - databaseTaskTables = append(databaseTaskTables, tabName) - } - - // clear the data scan task table - // repeatInitTableMap used for store the data_scan_task table name has be finished, avoid repeated initialization - migrateGroupTasks, err := model.GetIDataScanTaskRW().FindDataScanTaskGroupByTaskSchemaTable(dst.Ctx, dst.Task.TaskName) - if err != nil { - return err - } - repeatInitTableMap := make(map[string]struct{}) - - if len(migrateGroupTasks) > 0 { - taskTablesMap := make(map[string]struct{}) - for _, t := range databaseTaskTables { - taskTablesMap[t] = struct{}{} - } - for _, mt := range migrateGroupTasks { - if mt.SchemaNameS == schemaNameS { - if _, ok := taskTablesMap[mt.TableNameS]; !ok { - err = model.Transaction(dst.Ctx, func(txnCtx context.Context) error { - err = model.GetIDataScanSummaryRW().DeleteDataScanSummary(txnCtx, &task.DataScanSummary{ - TaskName: mt.TaskName, - SchemaNameS: mt.SchemaNameS, - TableNameS: mt.TableNameS, - }) - if err != nil { - return err - } - err = model.GetIDataScanTaskRW().DeleteDataScanTask(txnCtx, &task.DataScanTask{ - TaskName: mt.TaskName, - SchemaNameS: mt.SchemaNameS, - TableNameS: mt.TableNameS, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - continue - } - var summary *task.DataScanSummary - - summary, err = model.GetIDataScanSummaryRW().GetDataScanSummary(dst.Ctx, &task.DataScanSummary{ - TaskName: mt.TaskName, - SchemaNameS: mt.SchemaNameS, - TableNameS: mt.TableNameS, - }) - if err != nil { - return err - } - - if int64(summary.ChunkTotals) != mt.ChunkTotals { - err = model.Transaction(dst.Ctx, func(txnCtx context.Context) error { - err = model.GetIDataScanSummaryRW().DeleteDataScanSummary(txnCtx, &task.DataScanSummary{ - TaskName: mt.TaskName, - SchemaNameS: mt.SchemaNameS, - TableNameS: mt.TableNameS, - }) - if err != nil { - return err - } - err = model.GetIDataScanTaskRW().DeleteDataScanTask(txnCtx, &task.DataScanTask{ - TaskName: mt.TaskName, - SchemaNameS: mt.SchemaNameS, - TableNameS: mt.TableNameS, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - continue - } - - repeatInitTableMap[mt.TableNameS] = struct{}{} - } - } - } - - databaseTableTypeMap, err = databaseS.GetDatabaseTableType(schemaNameS) - if err != nil { - return err - } - - globalScnS, err := databaseS.GetDatabaseConsistentPos() - if err != nil { - return err - } - - globalScn = strconv.FormatUint(globalScnS, 10) - - // database tables - // init database table - dbTypeSli := stringutil.StringSplit(dst.Task.TaskFlow, constant.StringSeparatorAite) - dbTypeS := dbTypeSli[0] - - logger.Info("data scan task start init", - zap.String("task_name", dst.Task.TaskName), zap.String("task_mode", dst.Task.TaskMode), zap.String("task_flow", dst.Task.TaskFlow)) - - g, gCtx := errgroup.WithContext(dst.Ctx) - g.SetLimit(int(dst.TaskParams.TableThread)) - - for _, taskJob := range databaseTaskTables { - sourceTable := taskJob - g.Go(func() error { - select { - case <-gCtx.Done(): - return gCtx.Err() - default: - startTime := time.Now() - if _, ok := repeatInitTableMap[sourceTable]; ok { - // skip - return nil - } - - tableRows, err := databaseS.GetDatabaseTableRows(schemaNameS, sourceTable) - if err != nil { - return err - } - tableSize, err := databaseS.GetDatabaseTableSize(schemaNameS, sourceTable) - if err != nil { - return err - } - - dataRule := &processor.DataScanRule{ - Ctx: gCtx, - TaskName: dst.Task.TaskName, - TaskMode: dst.Task.TaskMode, - TaskFlow: dst.Task.TaskFlow, - SchemaNameS: schemaNameS, - TableNameS: sourceTable, - TableTypeS: databaseTableTypeMap, - DatabaseS: databaseS, - DBCharsetS: dst.DatasourceS.ConnectCharset, - GlobalSqlHintS: dst.TaskParams.SqlHintS, - GlobalSamplerateS: strconv.FormatUint(dst.TaskParams.TableSamplerateS, 10), - } - - attsRule, err := database.IDataScanAttributesRule(dataRule) - if err != nil { - return err - } - - // If the database table ColumnDetailS and GroupColumnS return "" - // it means that the database table does not have a number data type field, ignore and skip init - if strings.EqualFold(attsRule.ColumnDetailS, "") && strings.EqualFold(attsRule.GroupColumnS, "") { - return nil - } - - var whereRange string - size, err := stringutil.StrconvFloatBitSize(attsRule.TableSamplerateS, 64) - if err != nil { - return err - } - - if size > 0.000001 && size < 100 { - logger.Warn("data scan task table", - zap.String("task_name", dst.Task.TaskName), - zap.String("task_mode", dst.Task.TaskMode), - zap.String("task_flow", dst.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.String("database_version", dbVersion), - zap.String("database_role", dbRole), - zap.String("migrate_method", "scan")) - - whereRange = `sample_scan` - encChunkS := snappy.Encode(nil, []byte(whereRange)) - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - - err = model.Transaction(gCtx, func(txnCtx context.Context) error { - _, err = model.GetIDataScanTaskRW().CreateDataScanTask(txnCtx, &task.DataScanTask{ - TaskName: dst.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScn, - ColumnDetailS: attsRule.ColumnDetailS, - GroupColumnS: attsRule.GroupColumnS, - SqlHintS: attsRule.SqlHintS, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: "", - Samplerate: attsRule.TableSamplerateS, - ConsistentReadS: strconv.FormatBool(dst.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - if err != nil { - return err - } - _, err = model.GetIDataScanSummaryRW().CreateDataScanSummary(txnCtx, &task.DataScanSummary{ - TaskName: dst.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SnapshotPointS: globalScn, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: 1, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - return nil - } - - // statistic - if !strings.EqualFold(dbRole, constant.OracleDatabasePrimaryRole) || (strings.EqualFold(dbRole, constant.OracleDatabasePrimaryRole) && stringutil.VersionOrdinal(dbVersion) < stringutil.VersionOrdinal(constant.OracleDatabaseTableMigrateRowidRequireVersion)) { - upstreamConsIndexColumns, err := databaseS.GetDatabaseTableHighestSelectivityIndex(attsRule.SchemaNameS, attsRule.TableNameS, "", nil) - if err != nil { - return err - } - // upstream bucket ranges - _, upstreamBuckets, err := processor.ProcessUpstreamDatabaseTableColumnStatisticsBucket( - dbTypeS, - stringutil.StringUpper(dst.DatasourceS.ConnectCharset), - dst.Task.CaseFieldRuleS, databaseS, attsRule.SchemaNameS, - attsRule.TableNameS, - upstreamConsIndexColumns, - int64(dst.TaskParams.ChunkSize), - false) - if err != nil { - return err - } - if len(upstreamBuckets) == 0 { - logger.Warn("data scan task table", - zap.String("task_name", dst.Task.TaskName), - zap.String("task_mode", dst.Task.TaskMode), - zap.String("task_flow", dst.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.String("database_version", dbVersion), - zap.String("database_role", dbRole), - zap.String("migrate_method", "scan")) - - whereRange = `1 = 1` - - encChunkS := snappy.Encode(nil, []byte(whereRange)) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - - err = model.Transaction(gCtx, func(txnCtx context.Context) error { - _, err = model.GetIDataScanTaskRW().CreateDataScanTask(txnCtx, &task.DataScanTask{ - TaskName: dst.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScn, - ColumnDetailS: attsRule.ColumnDetailS, - GroupColumnS: attsRule.GroupColumnS, - SqlHintS: attsRule.SqlHintS, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: "", - Samplerate: attsRule.TableSamplerateS, - ConsistentReadS: strconv.FormatBool(dst.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - if err != nil { - return err - } - _, err = model.GetIDataScanSummaryRW().CreateDataScanSummary(txnCtx, &task.DataScanSummary{ - TaskName: dst.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SnapshotPointS: globalScn, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: 1, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - return nil - } - - logger.Warn("data scan task table", - zap.String("task_name", dst.Task.TaskName), - zap.String("task_mode", dst.Task.TaskMode), - zap.String("task_flow", dst.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.String("database_version", dbVersion), - zap.String("database_role", dbRole), - zap.String("migrate_method", "statistic")) - - var metas []*task.DataScanTask - for _, r := range upstreamBuckets { - toStringS, toStringSArgs := r.ToString() - var argsS string - if toStringSArgs != nil { - argsS, err = stringutil.MarshalJSON(toStringSArgs) - if err != nil { - return err - } - } - - whereRange = toStringS - encChunkS := snappy.Encode(nil, []byte(whereRange)) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - metas = append(metas, &task.DataScanTask{ - TaskName: dst.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScn, - ColumnDetailS: attsRule.ColumnDetailS, - GroupColumnS: attsRule.GroupColumnS, - SqlHintS: attsRule.SqlHintS, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: argsS, - Samplerate: attsRule.TableSamplerateS, - ConsistentReadS: strconv.FormatBool(dst.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - } - - err = model.GetIDataScanTaskRW().CreateInBatchDataScanTask(gCtx, metas, int(dst.TaskParams.WriteThread), int(dst.TaskParams.BatchSize)) - if err != nil { - return err - } - _, err = model.GetIDataScanSummaryRW().CreateDataScanSummary(gCtx, &task.DataScanSummary{ - TaskName: dst.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SnapshotPointS: globalScn, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: uint64(len(upstreamBuckets)), - }) - if err != nil { - return err - } - return nil - } - - chunkTask := uuid.New().String() - - chunks, err := databaseS.GetDatabaseTableChunkTask(chunkTask, schemaNameS, sourceTable, dst.TaskParams.ChunkSize, dst.TaskParams.CallTimeout) - if err != nil { - return err - } - - if len(chunks) == 0 { - - whereRange = `1 = 1` - - encChunkS := snappy.Encode(nil, []byte(whereRange)) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - - err = model.Transaction(gCtx, func(txnCtx context.Context) error { - _, err = model.GetIDataScanTaskRW().CreateDataScanTask(txnCtx, &task.DataScanTask{ - TaskName: dst.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScn, - ColumnDetailS: attsRule.ColumnDetailS, - GroupColumnS: attsRule.GroupColumnS, - SqlHintS: attsRule.SqlHintS, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: "", - Samplerate: attsRule.TableSamplerateS, - ConsistentReadS: strconv.FormatBool(dst.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - if err != nil { - return err - } - _, err = model.GetIDataScanSummaryRW().CreateDataScanSummary(txnCtx, &task.DataScanSummary{ - TaskName: dst.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SnapshotPointS: globalScn, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: 1, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - return nil - } - - var metas []*task.DataScanTask - for _, r := range chunks { - whereRange = r["CMD"] - - encChunkS := snappy.Encode(nil, []byte(whereRange)) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - - metas = append(metas, &task.DataScanTask{ - TaskName: dst.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScn, - ColumnDetailS: attsRule.ColumnDetailS, - GroupColumnS: attsRule.GroupColumnS, - SqlHintS: attsRule.SqlHintS, - ChunkDetailS: encryptChunkS, - Samplerate: attsRule.TableSamplerateS, - ConsistentReadS: strconv.FormatBool(dst.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - } - - err = model.Transaction(gCtx, func(txnCtx context.Context) error { - err = model.GetIDataScanTaskRW().CreateInBatchDataScanTask(txnCtx, metas, int(dst.TaskParams.WriteThread), int(dst.TaskParams.BatchSize)) - if err != nil { - return err - } - _, err = model.GetIDataScanSummaryRW().CreateDataScanSummary(txnCtx, &task.DataScanSummary{ - TaskName: dst.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SnapshotPointS: globalScn, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: uint64(len(chunks)), - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - logger.Info("data scan task init success", - zap.String("task_name", dst.Task.TaskName), - zap.String("task_mode", dst.Task.TaskMode), - zap.String("task_flow", dst.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.String("cost", time.Now().Sub(startTime).String())) - return nil - } - }) - } - - if err = g.Wait(); err != nil { - logger.Error("data scan task init failed", - zap.String("task_name", dst.Task.TaskName), - zap.String("task_mode", dst.Task.TaskMode), - zap.String("task_flow", dst.Task.TaskFlow), - zap.String("schema_name_s", schemaNameS), - zap.Error(err)) - return err - } - _, err = model.GetITaskRW().UpdateTask(dst.Ctx, &task.Task{TaskName: dst.Task.TaskName}, map[string]interface{}{"TaskInit": constant.TaskInitStatusFinished}) - if err != nil { - return err - } - return nil -} diff --git a/database/oracle/taskflow/stmt_migrate.go b/database/oracle/taskflow/stmt_migrate.go deleted file mode 100644 index fa951de..0000000 --- a/database/oracle/taskflow/stmt_migrate.go +++ /dev/null @@ -1,1030 +0,0 @@ -/* -Copyright © 2020 Marvin - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package taskflow - -import ( - "context" - "database/sql" - "fmt" - "github.com/wentaojin/dbms/database/processor" - "strconv" - "strings" - "time" - - "github.com/golang/snappy" - - "github.com/wentaojin/dbms/errconcurrent" - "golang.org/x/sync/errgroup" - - "github.com/google/uuid" - "github.com/wentaojin/dbms/database" - "github.com/wentaojin/dbms/logger" - "github.com/wentaojin/dbms/model" - "github.com/wentaojin/dbms/model/datasource" - "github.com/wentaojin/dbms/model/rule" - "github.com/wentaojin/dbms/model/task" - "github.com/wentaojin/dbms/proto/pb" - "github.com/wentaojin/dbms/utils/constant" - "github.com/wentaojin/dbms/utils/stringutil" - "go.uber.org/zap" -) - -type StmtMigrateTask struct { - Ctx context.Context - Task *task.Task - DatasourceS *datasource.Datasource - DatasourceT *datasource.Datasource - TaskParams *pb.StatementMigrateParam -} - -func (stm *StmtMigrateTask) Start() error { - schemaTaskTime := time.Now() - logger.Info("stmt migrate task get schema route", - zap.String("task_name", stm.Task.TaskName), zap.String("task_mode", stm.Task.TaskMode), zap.String("task_flow", stm.Task.TaskFlow)) - schemaRoute, err := model.GetIMigrateSchemaRouteRW().GetSchemaRouteRule(stm.Ctx, &rule.SchemaRouteRule{TaskName: stm.Task.TaskName}) - if err != nil { - return err - } - - logger.Info("stmt migrate task init database connection", - zap.String("task_name", stm.Task.TaskName), zap.String("task_mode", stm.Task.TaskMode), zap.String("task_flow", stm.Task.TaskFlow)) - - sourceDatasource, err := model.GetIDatasourceRW().GetDatasource(stm.Ctx, stm.Task.DatasourceNameS) - if err != nil { - return err - } - databaseS, err := database.NewDatabase(stm.Ctx, sourceDatasource, schemaRoute.SchemaNameS, int64(stm.TaskParams.CallTimeout)) - if err != nil { - return err - } - defer databaseS.Close() - databaseT, err := database.NewDatabase(stm.Ctx, stm.DatasourceT, "", int64(stm.TaskParams.CallTimeout)) - if err != nil { - return err - } - defer databaseT.Close() - - logger.Info("stmt migrate task inspect migrate task", - zap.String("task_name", stm.Task.TaskName), zap.String("task_mode", stm.Task.TaskMode), zap.String("task_flow", stm.Task.TaskFlow)) - - _, err = processor.InspectOracleMigrateTask(stm.Task.TaskName, stm.Task.TaskFlow, stm.Task.TaskMode, databaseS, stringutil.StringUpper(stm.DatasourceS.ConnectCharset), stringutil.StringUpper(stm.DatasourceT.ConnectCharset)) - if err != nil { - return err - } - - logger.Info("stmt migrate task init task", - zap.String("task_name", stm.Task.TaskName), zap.String("task_mode", stm.Task.TaskMode), zap.String("task_flow", stm.Task.TaskFlow)) - dbVersionS, err := databaseS.GetDatabaseVersion() - if err != nil { - return err - } - err = stm.initStmtMigrateTask(databaseS, dbVersionS, schemaRoute) - if err != nil { - return err - } - - logger.Info("stmt migrate task get tables", - zap.String("task_name", stm.Task.TaskName), zap.String("task_mode", stm.Task.TaskMode), zap.String("task_flow", stm.Task.TaskFlow)) - - summaries, err := model.GetIDataMigrateSummaryRW().FindDataMigrateSummary(stm.Ctx, &task.DataMigrateSummary{ - TaskName: stm.Task.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - }) - if err != nil { - return err - } - - for _, s := range summaries { - startTableTime := time.Now() - logger.Info("stmt migrate task process table", - zap.String("task_name", stm.Task.TaskName), - zap.String("task_mode", stm.Task.TaskMode), - zap.String("task_flow", stm.Task.TaskFlow), - zap.String("schema_name_s", s.SchemaNameS), - zap.String("table_name_s", s.TableNameS)) - - var migrateTasks []*task.DataMigrateTask - err = model.Transaction(stm.Ctx, func(txnCtx context.Context) error { - // get migrate task tables - migrateTasks, err = model.GetIDataMigrateTaskRW().FindDataMigrateTask(txnCtx, - &task.DataMigrateTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - if err != nil { - return err - } - migrateFailedTasks, err := model.GetIDataMigrateTaskRW().FindDataMigrateTask(txnCtx, - &task.DataMigrateTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusFailed}) - if err != nil { - return err - } - migrateRunningTasks, err := model.GetIDataMigrateTaskRW().FindDataMigrateTask(txnCtx, - &task.DataMigrateTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusRunning}) - if err != nil { - return err - } - migrateStopTasks, err := model.GetIDataMigrateTaskRW().FindDataMigrateTask(txnCtx, - &task.DataMigrateTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusStopped}) - if err != nil { - return err - } - migrateTasks = append(migrateTasks, migrateFailedTasks...) - migrateTasks = append(migrateTasks, migrateRunningTasks...) - migrateTasks = append(migrateTasks, migrateStopTasks...) - return nil - }) - if err != nil { - return err - } - - logger.Info("stmt migrate task process chunks", - zap.String("task_name", stm.Task.TaskName), - zap.String("task_mode", stm.Task.TaskMode), - zap.String("task_flow", stm.Task.TaskFlow), - zap.String("schema_name_s", s.SchemaNameS), - zap.String("table_name_s", s.TableNameS)) - - var ( - sqlTSmt *sql.Stmt - ) - switch { - case strings.EqualFold(stm.Task.TaskFlow, constant.TaskFlowOracleToTiDB) || strings.EqualFold(stm.Task.TaskFlow, constant.TaskFlowOracleToMySQL): - limitOne, err := model.GetIDataMigrateTaskRW().GetDataMigrateTask(stm.Ctx, &task.DataMigrateTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS}) - if err != nil { - return err - } - sqlStr := processor.GenMYSQLCompatibleDatabasePrepareStmt(s.SchemaNameT, s.TableNameT, stm.TaskParams.SqlHintT, limitOne.ColumnDetailT, int(stm.TaskParams.BatchSize), true) - sqlTSmt, err = databaseT.PrepareContext(stm.Ctx, sqlStr) - if err != nil { - return err - } - default: - return fmt.Errorf("oracle current task [%s] schema [%s] task_mode [%s] task_flow [%s] prepare isn't support, please contact author", stm.Task.TaskName, s.SchemaNameS, stm.Task.TaskMode, stm.Task.TaskFlow) - } - - g := errconcurrent.NewGroup() - g.SetLimit(int(stm.TaskParams.SqlThreadS)) - for _, j := range migrateTasks { - gTime := time.Now() - g.Go(j, gTime, func(j interface{}) error { - dt := j.(*task.DataMigrateTask) - errW := model.Transaction(stm.Ctx, func(txnCtx context.Context) error { - _, err = model.GetIDataMigrateTaskRW().UpdateDataMigrateTask(txnCtx, - &task.DataMigrateTask{TaskName: dt.TaskName, SchemaNameS: dt.SchemaNameS, TableNameS: dt.TableNameS, ChunkID: dt.ChunkID}, - map[string]interface{}{ - "TaskStatus": constant.TaskDatabaseStatusRunning, - }) - if err != nil { - return err - } - _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ - TaskName: dt.TaskName, - SchemaNameS: dt.SchemaNameS, - TableNameS: dt.TableNameS, - LogDetail: fmt.Sprintf("%v [%v] stmt migrate task [%v] taskflow [%v] source table [%v.%v] chunk [%s] start", - stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeStmtMigrate), - dt.TaskName, - stm.Task.TaskMode, - dt.SchemaNameS, - dt.TableNameS, - dt.ChunkDetailS), - }) - if err != nil { - return err - } - return nil - }) - if errW != nil { - return errW - } - - err = database.IDataMigrateProcess(&processor.StmtMigrateRow{ - Ctx: stm.Ctx, - TaskMode: stm.Task.TaskMode, - TaskFlow: stm.Task.TaskFlow, - Dmt: dt, - DatabaseS: databaseS, - DatabaseT: databaseT, - DatabaseTStmt: sqlTSmt, - DBCharsetS: constant.MigrateOracleCharsetStringConvertMapping[stringutil.StringUpper(stm.DatasourceS.ConnectCharset)], - DBCharsetT: stringutil.StringUpper(stm.DatasourceT.ConnectCharset), - SqlThreadT: int(stm.TaskParams.SqlThreadT), - BatchSize: int(stm.TaskParams.BatchSize), - CallTimeout: int(stm.TaskParams.CallTimeout), - SafeMode: stm.TaskParams.EnableSafeMode, - ReadChan: make(chan []interface{}, constant.DefaultMigrateTaskQueueSize), - WriteChan: make(chan []interface{}, constant.DefaultMigrateTaskQueueSize), - }) - if err != nil { - return err - } - - errW = model.Transaction(stm.Ctx, func(txnCtx context.Context) error { - _, err = model.GetIDataMigrateTaskRW().UpdateDataMigrateTask(txnCtx, - &task.DataMigrateTask{TaskName: dt.TaskName, SchemaNameS: dt.SchemaNameS, TableNameS: dt.TableNameS, ChunkID: dt.ChunkID}, - map[string]interface{}{ - "TaskStatus": constant.TaskDatabaseStatusSuccess, - "Duration": fmt.Sprintf("%f", time.Now().Sub(gTime).Seconds()), - }) - if err != nil { - return err - } - _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ - TaskName: dt.TaskName, - SchemaNameS: dt.SchemaNameS, - TableNameS: dt.TableNameS, - LogDetail: fmt.Sprintf("%v [%v] stmt migrate task [%v] taskflow [%v] source table [%v.%v] chunk [%s] success", - stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeStmtMigrate), - dt.TaskName, - stm.Task.TaskMode, - dt.SchemaNameS, - dt.TableNameS, - dt.ChunkDetailS), - }) - if err != nil { - return err - } - return nil - }) - if errW != nil { - return errW - } - return nil - }) - } - - for _, r := range g.Wait() { - if r.Err != nil { - mt := r.Task.(*task.DataMigrateTask) - logger.Warn("stmt migrate task process tables", - zap.String("task_name", mt.TaskName), zap.String("task_mode", stm.Task.TaskMode), zap.String("task_flow", stm.Task.TaskFlow), - zap.String("schema_name_s", mt.SchemaNameS), - zap.String("table_name_s", mt.TableNameS), - zap.Error(r.Err)) - - errW := model.Transaction(stm.Ctx, func(txnCtx context.Context) error { - _, err = model.GetIDataMigrateTaskRW().UpdateDataMigrateTask(txnCtx, - &task.DataMigrateTask{TaskName: mt.TaskName, SchemaNameS: mt.SchemaNameS, TableNameS: mt.TableNameS, ChunkID: mt.ChunkID}, - map[string]interface{}{ - "TaskStatus": constant.TaskDatabaseStatusFailed, - "Duration": fmt.Sprintf("%f", time.Now().Sub(r.Time).Seconds()), - "ErrorDetail": r.Err.Error(), - }) - if err != nil { - return err - } - _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ - TaskName: mt.TaskName, - SchemaNameS: mt.SchemaNameS, - TableNameS: mt.TableNameS, - LogDetail: fmt.Sprintf("%v [%v] stmt migrate task [%v] taskflow [%v] source table [%v.%v] failed, please see [data_migrate_task] detail", - stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeStmtMigrate), - mt.TaskName, - stm.Task.TaskMode, - mt.SchemaNameS, - mt.TableNameS), - }) - if err != nil { - return err - } - return nil - }) - if errW != nil { - return errW - } - } - } - - err = sqlTSmt.Close() - if err != nil { - return err - } - - endTableTime := time.Now() - err = model.Transaction(stm.Ctx, func(txnCtx context.Context) error { - tableStatusRecs, err := model.GetIDataMigrateTaskRW().FindDataMigrateTaskBySchemaTableChunkStatus(txnCtx, &task.DataMigrateTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - }) - if err != nil { - return err - } - for _, rec := range tableStatusRecs { - switch rec.TaskStatus { - case constant.TaskDatabaseStatusSuccess: - _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkSuccess": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusFailed: - _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkFails": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusWaiting: - _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkWaits": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusRunning: - _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkRuns": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusStopped: - _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkStops": rec.StatusTotals, - }) - if err != nil { - return err - } - default: - return fmt.Errorf("the task [%v] task_mode [%s] task_flow [%v] schema_name_s [%v] table_name_s [%v] task_status [%v] panic, please contact auhtor or reselect", s.TaskName, stm.Task.TaskMode, stm.Task.TaskFlow, rec.SchemaNameS, rec.TableNameS, rec.TaskStatus) - } - } - - _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - }, map[string]interface{}{ - "Duration": fmt.Sprintf("%f", time.Now().Sub(startTableTime).Seconds()), - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - logger.Info("stmt migrate task process table", - zap.String("task_name", stm.Task.TaskName), - zap.String("task_mode", stm.Task.TaskMode), - zap.String("task_flow", stm.Task.TaskFlow), - zap.String("schema_name_s", s.SchemaNameS), - zap.String("table_name_s", s.TableNameS), - zap.String("cost", endTableTime.Sub(startTableTime).String())) - } - logger.Info("stmt migrate task", - zap.String("task_name", stm.Task.TaskName), zap.String("task_mode", stm.Task.TaskMode), zap.String("task_flow", stm.Task.TaskFlow), - zap.String("cost", time.Now().Sub(schemaTaskTime).String())) - return nil -} - -func (stm *StmtMigrateTask) initStmtMigrateTask(databaseS database.IDatabase, dbVersion string, schemaRoute *rule.SchemaRouteRule) error { - // delete checkpoint - initFlags, err := model.GetITaskRW().GetTask(stm.Ctx, &task.Task{TaskName: stm.Task.TaskName}) - if err != nil { - return err - } - if !stm.TaskParams.EnableCheckpoint || strings.EqualFold(initFlags.TaskInit, constant.TaskInitStatusNotFinished) { - err := model.GetIDataMigrateSummaryRW().DeleteDataMigrateSummaryName(stm.Ctx, []string{schemaRoute.TaskName}) - if err != nil { - return err - } - err = model.GetIDataMigrateTaskRW().DeleteDataMigrateTaskName(stm.Ctx, []string{schemaRoute.TaskName}) - if err != nil { - return err - } - } else if stm.TaskParams.EnableCheckpoint && strings.EqualFold(initFlags.TaskInit, constant.TaskInitStatusFinished) { - logger.Warn("stmt migrate task init skip", - zap.String("task_name", stm.Task.TaskName), - zap.String("task_mode", stm.Task.TaskMode), - zap.String("task_flow", stm.Task.TaskFlow), - zap.String("task_init", constant.TaskInitStatusFinished)) - return nil - } - - dbRole, err := databaseS.GetDatabaseRole() - if err != nil { - return err - } - // filter database table - schemaTaskTables, err := model.GetIMigrateTaskTableRW().FindMigrateTaskTable(stm.Ctx, &rule.MigrateTaskTable{ - TaskName: schemaRoute.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - }) - if err != nil { - return err - } - var ( - includeTables []string - excludeTables []string - databaseTaskTables []string // task tables - globalScn string - ) - databaseTableTypeMap := make(map[string]string) - - for _, t := range schemaTaskTables { - if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsExclude) { - excludeTables = append(excludeTables, t.TableNameS) - } - if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsNotExclude) { - includeTables = append(includeTables, t.TableNameS) - } - } - - tableObjs, err := databaseS.FilterDatabaseTable(schemaRoute.SchemaNameS, includeTables, excludeTables) - if err != nil { - return err - } - - // rule case field - for _, t := range tableObjs.TaskTables { - var tabName string - // the according target case field rule convert - if strings.EqualFold(stm.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleLower) { - tabName = stringutil.StringLower(t) - } - if strings.EqualFold(stm.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleUpper) { - tabName = stringutil.StringUpper(t) - } - if strings.EqualFold(stm.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleOrigin) { - tabName = t - } - databaseTaskTables = append(databaseTaskTables, tabName) - } - - // clear the stmt migrate task table - // repeatInitTableMap used for store the struct_migrate_task table name has be finished, avoid repeated initialization - migrateGroupTasks, err := model.GetIDataMigrateTaskRW().FindDataMigrateTaskGroupByTaskSchemaTable(stm.Ctx, stm.Task.TaskName) - if err != nil { - return err - } - repeatInitTableMap := make(map[string]struct{}) - - if len(migrateGroupTasks) > 0 { - taskTablesMap := make(map[string]struct{}) - for _, t := range databaseTaskTables { - taskTablesMap[t] = struct{}{} - } - for _, mt := range migrateGroupTasks { - if mt.SchemaNameS == schemaRoute.SchemaNameS { - if _, ok := taskTablesMap[mt.TableNameS]; !ok { - err = model.Transaction(stm.Ctx, func(txnCtx context.Context) error { - err = model.GetIDataMigrateSummaryRW().DeleteDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: mt.TaskName, - SchemaNameS: mt.SchemaNameS, - TableNameS: mt.TableNameS, - }) - if err != nil { - return err - } - err = model.GetIDataMigrateTaskRW().DeleteDataMigrateTask(txnCtx, &task.DataMigrateTask{ - TaskName: mt.TaskName, - SchemaNameS: mt.SchemaNameS, - TableNameS: mt.TableNameS, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - continue - } - var summary *task.DataMigrateSummary - - summary, err = model.GetIDataMigrateSummaryRW().GetDataMigrateSummary(stm.Ctx, &task.DataMigrateSummary{ - TaskName: mt.TaskName, - SchemaNameS: mt.SchemaNameS, - TableNameS: mt.TableNameS, - }) - if err != nil { - return err - } - - if int64(summary.ChunkTotals) != mt.ChunkTotals { - err = model.Transaction(stm.Ctx, func(txnCtx context.Context) error { - err = model.GetIDataMigrateSummaryRW().DeleteDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: mt.TaskName, - SchemaNameS: mt.SchemaNameS, - TableNameS: mt.TableNameS, - }) - if err != nil { - return err - } - err = model.GetIDataMigrateTaskRW().DeleteDataMigrateTask(txnCtx, &task.DataMigrateTask{ - TaskName: mt.TaskName, - SchemaNameS: mt.SchemaNameS, - TableNameS: mt.TableNameS, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - continue - } - - repeatInitTableMap[mt.TableNameS] = struct{}{} - } - } - } - - databaseTableTypeMap, err = databaseS.GetDatabaseTableType(schemaRoute.SchemaNameS) - if err != nil { - return err - } - - globalScnS, err := databaseS.GetDatabaseConsistentPos() - if err != nil { - return err - } - - globalScn = strconv.FormatUint(globalScnS, 10) - - // database tables - // init database table - dbTypeSli := stringutil.StringSplit(stm.Task.TaskFlow, constant.StringSeparatorAite) - dbTypeS := dbTypeSli[0] - - logger.Info("stmt migrate task init", - zap.String("task_name", stm.Task.TaskName), zap.String("task_mode", stm.Task.TaskMode), zap.String("task_flow", stm.Task.TaskFlow)) - - g, gCtx := errgroup.WithContext(stm.Ctx) - g.SetLimit(int(stm.TaskParams.TableThread)) - - for _, taskJob := range databaseTaskTables { - sourceTable := taskJob - g.Go(func() error { - select { - case <-gCtx.Done(): - return gCtx.Err() - default: - startTime := time.Now() - if _, ok := repeatInitTableMap[sourceTable]; ok { - // skip - return nil - } - - tableRows, err := databaseS.GetDatabaseTableRows(schemaRoute.SchemaNameS, sourceTable) - if err != nil { - return err - } - tableSize, err := databaseS.GetDatabaseTableSize(schemaRoute.SchemaNameS, sourceTable) - if err != nil { - return err - } - - dataRule := &processor.DataMigrateRule{ - Ctx: gCtx, - TaskMode: stm.Task.TaskMode, - TaskName: stm.Task.TaskName, - TaskFlow: stm.Task.TaskFlow, - DatabaseS: databaseS, - SchemaNameS: schemaRoute.SchemaNameS, - TableNameS: sourceTable, - TableTypeS: databaseTableTypeMap, - DBCharsetS: stm.DatasourceS.ConnectCharset, - CaseFieldRuleS: stm.Task.CaseFieldRuleS, - CaseFieldRuleT: stm.Task.CaseFieldRuleT, - GlobalSqlHintS: stm.TaskParams.SqlHintS, - } - - attsRule, err := database.IDataMigrateAttributesRule(dataRule) - if err != nil { - return err - } - - // only where range - if !attsRule.EnableChunkStrategy && !strings.EqualFold(attsRule.WhereRange, "") { - encChunkS := snappy.Encode(nil, []byte(attsRule.WhereRange)) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - err = model.Transaction(gCtx, func(txnCtx context.Context) error { - _, err = model.GetIDataMigrateTaskRW().CreateDataMigrateTask(txnCtx, &task.DataMigrateTask{ - TaskName: stm.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScn, - ColumnDetailO: attsRule.ColumnDetailO, - ColumnDetailS: attsRule.ColumnDetailS, - ColumnDetailT: attsRule.ColumnDetailT, - SqlHintS: attsRule.SqlHintS, - SqlHintT: stm.TaskParams.SqlHintT, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: "", - ConsistentReadS: strconv.FormatBool(stm.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - if err != nil { - return err - } - _, err = model.GetIDataMigrateSummaryRW().CreateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: stm.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - SnapshotPointS: globalScn, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: 1, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - return nil - } - - var whereRange string - - // statistic - if !strings.EqualFold(dbRole, constant.OracleDatabasePrimaryRole) || (strings.EqualFold(dbRole, constant.OracleDatabasePrimaryRole) && stringutil.VersionOrdinal(dbVersion) < stringutil.VersionOrdinal(constant.OracleDatabaseTableMigrateRowidRequireVersion)) { - upstreamConsIndexColumns, err := databaseS.GetDatabaseTableHighestSelectivityIndex(attsRule.SchemaNameS, attsRule.TableNameS, "", nil) - if err != nil { - return err - } - // upstream bucket ranges - _, upstreamBuckets, err := processor.ProcessUpstreamDatabaseTableColumnStatisticsBucket( - dbTypeS, - stringutil.StringUpper(stm.DatasourceS.ConnectCharset), - stm.Task.CaseFieldRuleS, databaseS, attsRule.SchemaNameS, - attsRule.TableNameS, - upstreamConsIndexColumns, - int64(stm.TaskParams.ChunkSize), - false) - if err != nil { - return err - } - if len(upstreamBuckets) == 0 { - logger.Warn("stmt migrate task table", - zap.String("task_name", stm.Task.TaskName), - zap.String("task_mode", stm.Task.TaskMode), - zap.String("task_flow", stm.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.String("database_version", dbVersion), - zap.String("database_role", dbRole), - zap.String("migrate_method", "scan")) - switch { - case attsRule.EnableChunkStrategy && !strings.EqualFold(attsRule.WhereRange, ""): - whereRange = stringutil.StringBuilder(`1 = 1 AND `, attsRule.WhereRange) - default: - whereRange = `1 = 1` - } - - encChunkS := snappy.Encode(nil, []byte(whereRange)) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - - err = model.Transaction(gCtx, func(txnCtx context.Context) error { - _, err = model.GetIDataMigrateTaskRW().CreateDataMigrateTask(txnCtx, &task.DataMigrateTask{ - TaskName: stm.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScn, - ColumnDetailO: attsRule.ColumnDetailO, - ColumnDetailS: attsRule.ColumnDetailS, - ColumnDetailT: attsRule.ColumnDetailT, - SqlHintS: attsRule.SqlHintS, - SqlHintT: stm.TaskParams.SqlHintT, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: "", - ConsistentReadS: strconv.FormatBool(stm.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - if err != nil { - return err - } - _, err = model.GetIDataMigrateSummaryRW().CreateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: stm.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - SnapshotPointS: globalScn, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: 1, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - return nil - } - - logger.Warn("stmt migrate task table", - zap.String("task_name", stm.Task.TaskName), - zap.String("task_mode", stm.Task.TaskMode), - zap.String("task_flow", stm.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.String("database_version", dbVersion), - zap.String("database_role", dbRole), - zap.String("migrate_method", "statistic")) - var metas []*task.DataMigrateTask - for _, r := range upstreamBuckets { - toStringS, toStringSArg := r.ToString() - if err != nil { - return err - } - var argsS string - if toStringSArg != nil { - argsS, err = stringutil.MarshalJSON(toStringSArg) - if err != nil { - return err - } - } - switch { - case attsRule.EnableChunkStrategy && !strings.EqualFold(attsRule.WhereRange, ""): - whereRange = stringutil.StringBuilder(`((`, toStringS, `) AND (`, attsRule.WhereRange, `))`) - default: - whereRange = toStringS - } - - encChunkS := snappy.Encode(nil, []byte(whereRange)) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - metas = append(metas, &task.DataMigrateTask{ - TaskName: stm.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScn, - ColumnDetailO: attsRule.ColumnDetailO, - ColumnDetailS: attsRule.ColumnDetailS, - ColumnDetailT: attsRule.ColumnDetailT, - SqlHintS: attsRule.SqlHintS, - SqlHintT: stm.TaskParams.SqlHintT, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: argsS, - ConsistentReadS: strconv.FormatBool(stm.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - } - - err = model.GetIDataMigrateTaskRW().CreateInBatchDataMigrateTask(gCtx, metas, int(stm.TaskParams.WriteThread), int(stm.TaskParams.BatchSize)) - if err != nil { - return err - } - _, err = model.GetIDataMigrateSummaryRW().CreateDataMigrateSummary(gCtx, &task.DataMigrateSummary{ - TaskName: stm.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - SnapshotPointS: globalScn, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: uint64(len(upstreamBuckets)), - }) - if err != nil { - return err - } - return nil - } - - chunkTask := uuid.New().String() - - chunks, err := databaseS.GetDatabaseTableChunkTask(chunkTask, schemaRoute.SchemaNameS, sourceTable, stm.TaskParams.ChunkSize, stm.TaskParams.CallTimeout) - if err != nil { - return err - } - - if len(chunks) == 0 { - switch { - case attsRule.EnableChunkStrategy && !strings.EqualFold(attsRule.WhereRange, ""): - whereRange = stringutil.StringBuilder(`1 = 1 AND `, attsRule.WhereRange) - default: - whereRange = `1 = 1` - } - - encChunkS := snappy.Encode(nil, []byte(whereRange)) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - - err = model.Transaction(gCtx, func(txnCtx context.Context) error { - _, err = model.GetIDataMigrateTaskRW().CreateDataMigrateTask(txnCtx, &task.DataMigrateTask{ - TaskName: stm.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScn, - ColumnDetailO: attsRule.ColumnDetailO, - ColumnDetailS: attsRule.ColumnDetailS, - ColumnDetailT: attsRule.ColumnDetailT, - SqlHintS: attsRule.SqlHintS, - SqlHintT: stm.TaskParams.SqlHintT, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: "", - ConsistentReadS: strconv.FormatBool(stm.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - if err != nil { - return err - } - _, err = model.GetIDataMigrateSummaryRW().CreateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ - TaskName: stm.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - SnapshotPointS: globalScn, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: 1, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - return nil - } - - var metas []*task.DataMigrateTask - for _, r := range chunks { - switch { - case attsRule.EnableChunkStrategy && !strings.EqualFold(attsRule.WhereRange, ""): - whereRange = stringutil.StringBuilder(r["CMD"], ` AND `, attsRule.WhereRange) - default: - whereRange = r["CMD"] - } - - encChunkS := snappy.Encode(nil, []byte(whereRange)) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - - metas = append(metas, &task.DataMigrateTask{ - TaskName: stm.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScn, - ColumnDetailO: attsRule.ColumnDetailO, - ColumnDetailS: attsRule.ColumnDetailS, - ColumnDetailT: attsRule.ColumnDetailT, - SqlHintS: attsRule.SqlHintS, - SqlHintT: stm.TaskParams.SqlHintT, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: "", - ConsistentReadS: strconv.FormatBool(stm.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - } - - err = model.GetIDataMigrateTaskRW().CreateInBatchDataMigrateTask(gCtx, metas, int(stm.TaskParams.WriteThread), int(stm.TaskParams.BatchSize)) - if err != nil { - return err - } - _, err = model.GetIDataMigrateSummaryRW().CreateDataMigrateSummary(gCtx, &task.DataMigrateSummary{ - TaskName: stm.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - SnapshotPointS: globalScn, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: uint64(len(chunks)), - }) - if err != nil { - return err - } - - logger.Info("stmt migrate task init", - zap.String("task_name", stm.Task.TaskName), - zap.String("task_mode", stm.Task.TaskMode), - zap.String("task_flow", stm.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.String("cost", time.Now().Sub(startTime).String())) - return nil - } - }) - } - - if err = g.Wait(); err != nil { - logger.Error("stmt migrate task init", - zap.String("task_name", stm.Task.TaskName), zap.String("task_mode", stm.Task.TaskMode), zap.String("task_flow", stm.Task.TaskFlow), - zap.String("schema_name_s", schemaRoute.SchemaNameS), - zap.Error(err)) - return err - } - - _, err = model.GetITaskRW().UpdateTask(stm.Ctx, &task.Task{TaskName: stm.Task.TaskName}, map[string]interface{}{"TaskInit": constant.TaskInitStatusFinished}) - if err != nil { - return err - } - return nil -} diff --git a/database/postgresql/data_compare.go b/database/postgresql/data_compare.go index 6e71967..7e183f6 100644 --- a/database/postgresql/data_compare.go +++ b/database/postgresql/data_compare.go @@ -32,7 +32,7 @@ func (d *Database) GetDatabaseTableStatisticsHistogram(schemeNameS, tableNameS s panic("implement me") } -func (d *Database) GetDatabaseTableHighestSelectivityIndex(schemaNameS, tableNameS string, compareCondField string, ignoreCondFields []string) (*structure.HighestBucket, error) { +func (d *Database) GetDatabaseTableHighestSelectivityIndex(schemaNameS, tableNameS string, compareCondField string, ignoreCondFields []string) (*structure.Selectivity, error) { //TODO implement me panic("implement me") } diff --git a/database/postgresql/struct_migrate.go b/database/postgresql/struct_migrate.go index 808863a..b60e64b 100644 --- a/database/postgresql/struct_migrate.go +++ b/database/postgresql/struct_migrate.go @@ -938,7 +938,7 @@ func (d *Database) GetDatabaseTableSize(schemaName, tableName string) (float64, return size, nil } -func (d *Database) GetDatabaseTableChunkTask(taskName, schemaName, tableName string, chunkSize uint64, callTimeout uint64) ([]map[string]string, error) { +func (d *Database) GetDatabaseTableChunkTask(taskName, schemaName, tableName string, chunkSize uint64, callTimeout uint64, batchSize int, dataChan chan []map[string]string) error { //TODO implement me panic("implement me") } diff --git a/database/processor/csv_migrate_row.go b/database/processor/csv_migrate_row.go index 14f935d..d8ffd68 100644 --- a/database/processor/csv_migrate_row.go +++ b/database/processor/csv_migrate_row.go @@ -95,7 +95,7 @@ func (r *CsvMigrateRow) MigrateRead() error { return fmt.Errorf("the task_flow [%s] task_mode [%s] isn't support, please contact author or reselect", r.TaskFlow, r.TaskMode) } - logger.Info("csv migrate task chunk rows extractor starting", + logger.Info("data migrate task chunk rows extractor starting", zap.String("task_name", r.Dmt.TaskName), zap.String("task_mode", r.TaskMode), zap.String("task_flow", r.TaskFlow), @@ -123,7 +123,7 @@ func (r *CsvMigrateRow) MigrateRead() error { } endTime := time.Now() - logger.Info("csv migrate task chunk rows extractor finished", + logger.Info("data migrate task chunk rows extractor finished", zap.String("task_name", r.Dmt.TaskName), zap.String("task_mode", r.TaskMode), zap.String("task_flow", r.TaskFlow), @@ -148,7 +148,7 @@ func (r *CsvMigrateRow) MigrateProcess() error { func (r *CsvMigrateRow) MigrateApply() error { startTime := time.Now() - logger.Info("csv migrate task chunk rows applier starting", + logger.Info("data migrate task chunk rows applier starting", zap.String("task_name", r.Dmt.TaskName), zap.String("task_mode", r.TaskMode), zap.String("task_flow", r.TaskFlow), @@ -183,7 +183,7 @@ func (r *CsvMigrateRow) MigrateApply() error { return fmt.Errorf("failed to write data row to csv: %v", err) } } - logger.Info("csv migrate task chunk rows applier finished", + logger.Info("data migrate task chunk rows applier finished", zap.String("task_name", r.Dmt.TaskName), zap.String("task_mode", r.TaskMode), zap.String("task_flow", r.TaskFlow), diff --git a/database/processor/data_compare_divider.go b/database/processor/data_compare_divider.go index 6ead7fc..a65cbab 100644 --- a/database/processor/data_compare_divider.go +++ b/database/processor/data_compare_divider.go @@ -26,94 +26,18 @@ import ( "strings" ) -func ProcessUpstreamDatabaseTableColumnStatisticsBucket(dbTypeS, dbCharsetS string, caseFieldRule string, - database database.IDatabase, schemaName, tableName string, cons *structure.HighestBucket, chunkSize int64, enableCollation bool) (*structure.HighestBucket, []*structure.Range, error) { - if cons == nil { - return nil, nil, nil - } - - var chunkRanges []*structure.Range - - // column name charset transform - var newColumns []string - for _, col := range cons.IndexColumn { - var columnName string - switch stringutil.StringUpper(dbTypeS) { - case constant.DatabaseTypeOracle: - convertUtf8Raws, err := stringutil.CharsetConvert([]byte(col), constant.MigrateOracleCharsetStringConvertMapping[stringutil.StringUpper(dbCharsetS)], constant.CharsetUTF8MB4) - if err != nil { - return nil, nil, nil - } - columnName = stringutil.BytesToString(convertUtf8Raws) - - if strings.EqualFold(caseFieldRule, constant.ParamValueDataCompareCaseFieldRuleLower) { - columnName = strings.ToLower(stringutil.BytesToString(convertUtf8Raws)) - } - if strings.EqualFold(caseFieldRule, constant.ParamValueDataCompareCaseFieldRuleUpper) { - columnName = strings.ToUpper(stringutil.BytesToString(convertUtf8Raws)) - } - - case constant.DatabaseTypeMySQL, constant.DatabaseTypeTiDB: - convertUtf8Raws, err := stringutil.CharsetConvert([]byte(col), constant.MigrateMySQLCompatibleCharsetStringConvertMapping[stringutil.StringUpper(dbCharsetS)], constant.CharsetUTF8MB4) - if err != nil { - return nil, nil, nil - } - columnName = stringutil.BytesToString(convertUtf8Raws) - - if strings.EqualFold(caseFieldRule, constant.ParamValueDataCompareCaseFieldRuleLower) { - columnName = strings.ToLower(stringutil.BytesToString(convertUtf8Raws)) - } - if strings.EqualFold(caseFieldRule, constant.ParamValueDataCompareCaseFieldRuleUpper) { - columnName = strings.ToUpper(stringutil.BytesToString(convertUtf8Raws)) - } - default: - return nil, nil, fmt.Errorf("the database type [%s] is not supported, please contact author or reselect", dbTypeS) - } - - newColumns = append(newColumns, columnName) - } - - cons.IndexColumn = newColumns - - for _, b := range cons.Buckets { - switch stringutil.StringUpper(dbTypeS) { - case constant.DatabaseTypeOracle: - convertUtf8Raws, err := stringutil.CharsetConvert([]byte(b.LowerBound), constant.MigrateOracleCharsetStringConvertMapping[stringutil.StringUpper(dbCharsetS)], constant.CharsetUTF8MB4) - if err != nil { - return nil, nil, err - } - b.LowerBound = stringutil.BytesToString(convertUtf8Raws) - - convertUtf8Raws, err = stringutil.CharsetConvert([]byte(b.UpperBound), constant.MigrateOracleCharsetStringConvertMapping[stringutil.StringUpper(dbCharsetS)], constant.CharsetUTF8MB4) - if err != nil { - return nil, nil, err - } - b.UpperBound = stringutil.BytesToString(convertUtf8Raws) - case constant.DatabaseTypeMySQL, constant.DatabaseTypeTiDB: - convertUtf8Raws, err := stringutil.CharsetConvert([]byte(b.LowerBound), constant.MigrateMySQLCompatibleCharsetStringConvertMapping[stringutil.StringUpper(dbCharsetS)], constant.CharsetUTF8MB4) - if err != nil { - return nil, nil, err - } - b.LowerBound = stringutil.BytesToString(convertUtf8Raws) - - convertUtf8Raws, err = stringutil.CharsetConvert([]byte(b.UpperBound), constant.MigrateMySQLCompatibleCharsetStringConvertMapping[stringutil.StringUpper(dbCharsetS)], constant.CharsetUTF8MB4) - if err != nil { - return nil, nil, err - } - b.UpperBound = stringutil.BytesToString(convertUtf8Raws) - default: - return nil, nil, fmt.Errorf("the database type [%s] is not supported, please contact author or reselect", dbTypeS) - } - } - - // collation enable setting - for i, _ := range cons.ColumnCollation { - if !enableCollation { - // ignore collation setting, fill "" - cons.ColumnCollation[i] = constant.DataCompareDisabledCollationSettingFillEmptyString - } - } +type Divide struct { + DBTypeS string + DBCharsetS string + SchemaNameS string + TableNameS string + ChunkSize int64 + DatabaseS database.IDatabase + Cons *structure.Selectivity + RangeC chan []*structure.Range +} +func (d *Divide) ProcessUpstreamStatisticsBucket() error { // divide buckets var ( lowerValues, upperValues []string @@ -124,51 +48,51 @@ func ProcessUpstreamDatabaseTableColumnStatisticsBucket(dbTypeS, dbCharsetS stri // `bucketID` is the bucket id of one chunk. bucketID := 0 - halfChunkSize := chunkSize >> 1 + halfChunkSize := d.ChunkSize >> 1 // `firstBucket` is the first bucket of one chunk. - for i := bucketID; i < len(cons.Buckets); i++ { - count := cons.Buckets[i].Count - latestCount - if count < chunkSize { + for i := bucketID; i < len(d.Cons.Buckets); i++ { + count := d.Cons.Buckets[i].Count - latestCount + if count < d.ChunkSize { // merge more buckets into one chunk logger.Info("divide database bucket value", - zap.String("schema_name_s", schemaName), - zap.String("table_name_s", tableName), + zap.String("schema_name_s", d.SchemaNameS), + zap.String("table_name_s", d.TableNameS), zap.Int("current_bucket_id", bucketID), zap.Int64("bucket_counts", count), - zap.Int64("chunk_size", chunkSize), - zap.String("origin_lower_value", cons.Buckets[i].LowerBound), - zap.String("origin_upper_value", cons.Buckets[i].UpperBound), + zap.Int64("chunk_size", d.ChunkSize), + zap.String("origin_lower_value", d.Cons.Buckets[i].LowerBound), + zap.String("origin_upper_value", d.Cons.Buckets[i].UpperBound), zap.Any("new_lower_values", lowerValues), zap.Any("new_upper_values", upperValues), zap.String("chunk_action", "skip")) continue } - upperValues, err = ExtractDatabaseTableStatisticsValuesFromBuckets(dbTypeS, cons.Buckets[i].UpperBound, cons.IndexColumn) + upperValues, err = ExtractDatabaseTableStatisticsValuesFromBuckets(d.DBTypeS, d.Cons.Buckets[i].UpperBound, d.Cons.IndexColumn) if err != nil { - return nil, nil, err + return err } logger.Debug("divide database bucket value", - zap.String("schema_name_s", schemaName), - zap.String("table_name_s", tableName), + zap.String("schema_name_s", d.SchemaNameS), + zap.String("table_name_s", d.TableNameS), zap.Int("current_bucket_id", bucketID), zap.Int64("bucket_counts", count), - zap.Int64("chunk_size", chunkSize), - zap.String("origin_lower_value", cons.Buckets[i].LowerBound), - zap.String("origin_upper_value", cons.Buckets[i].UpperBound), + zap.Int64("chunk_size", d.ChunkSize), + zap.String("origin_lower_value", d.Cons.Buckets[i].LowerBound), + zap.String("origin_upper_value", d.Cons.Buckets[i].UpperBound), zap.String("chunk_action", "divide")) var chunkRange *structure.Range - switch stringutil.StringUpper(dbTypeS) { + switch stringutil.StringUpper(d.DBTypeS) { case constant.DatabaseTypeOracle: - chunkRange = structure.NewChunkRange(dbTypeS, dbCharsetS, constant.BuildInOracleCharsetAL32UTF8) + chunkRange = structure.NewChunkRange(d.DBTypeS, d.DBCharsetS, constant.BuildInOracleCharsetAL32UTF8) case constant.DatabaseTypeMySQL, constant.DatabaseTypeTiDB: - chunkRange = structure.NewChunkRange(dbTypeS, dbCharsetS, constant.BuildInMYSQLCharsetUTF8MB4) + chunkRange = structure.NewChunkRange(d.DBTypeS, d.DBCharsetS, constant.BuildInMYSQLCharsetUTF8MB4) default: - return nil, nil, fmt.Errorf("the database type [%s] is not supported, please contact author or reselect", dbTypeS) + return fmt.Errorf("the database type [%s] is not supported, please contact author or reselect", d.DBTypeS) } - for j, columnName := range cons.IndexColumn { + for j, columnName := range d.Cons.IndexColumn { var lowerValue, upperValue string if len(lowerValues) > 0 { lowerValue = lowerValues[j] @@ -176,29 +100,29 @@ func ProcessUpstreamDatabaseTableColumnStatisticsBucket(dbTypeS, dbCharsetS stri if len(upperValues) > 0 { upperValue = upperValues[j] } - err = chunkRange.Update(columnName, cons.ColumnCollation[j], cons.ColumnDatatype[j], cons.DatetimePrecision[j], lowerValue, upperValue, len(lowerValues) > 0, len(upperValues) > 0) + err = chunkRange.Update(columnName, d.Cons.ColumnCollation[j], d.Cons.ColumnDatatype[j], d.Cons.DatetimePrecision[j], lowerValue, upperValue, len(lowerValues) > 0, len(upperValues) > 0) if err != nil { - return nil, nil, err + return err } } // count >= chunkSize if i == bucketID { - chunkCnt := int((count + halfChunkSize) / chunkSize) - buckets, err := DivideDatabaseTableColumnStatisticsBucket(database, schemaName, tableName, cons, chunkRange, chunkCnt) + chunkCnt := int((count + halfChunkSize) / d.ChunkSize) + ranges, err := DivideDatabaseTableColumnStatisticsBucket(d.DatabaseS, d.SchemaNameS, d.TableNameS, d.Cons, chunkRange, chunkCnt) if err != nil { - return nil, nil, err + return err } - chunkRanges = append(chunkRanges, buckets...) + d.RangeC <- ranges logger.Debug("divide database bucket value", - zap.String("schema_name_s", schemaName), - zap.String("table_name_s", tableName), + zap.String("schema_name_s", d.SchemaNameS), + zap.String("table_name_s", d.TableNameS), zap.Int("current_bucket_id", bucketID), zap.Int64("bucket_counts", count), - zap.Int64("chunk_size", chunkSize), - zap.String("origin_lower_value", cons.Buckets[i].LowerBound), - zap.String("origin_upper_value", cons.Buckets[i].UpperBound), + zap.Int64("chunk_size", d.ChunkSize), + zap.String("origin_lower_value", d.Cons.Buckets[i].LowerBound), + zap.String("origin_upper_value", d.Cons.Buckets[i].UpperBound), zap.Any("new_lower_values", lowerValues), zap.Any("new_upper_values", upperValues), zap.Any("chunk_ranges", chunkRange), @@ -206,131 +130,70 @@ func ProcessUpstreamDatabaseTableColumnStatisticsBucket(dbTypeS, dbCharsetS stri } else { // merge bucket // use multi-buckets so chunkCnt = 1 - buckets, err := DivideDatabaseTableColumnStatisticsBucket(database, schemaName, tableName, cons, chunkRange, 1) + ranges, err := DivideDatabaseTableColumnStatisticsBucket(d.DatabaseS, d.SchemaNameS, d.TableNameS, d.Cons, chunkRange, 1) if err != nil { - return nil, nil, err + return err } - chunkRanges = append(chunkRanges, buckets...) + d.RangeC <- ranges + logger.Debug("divide database bucket value", - zap.String("schema_name_s", schemaName), - zap.String("table_name_s", tableName), + zap.String("schema_name_s", d.SchemaNameS), + zap.String("table_name_s", d.TableNameS), zap.Int("current_bucket_id", bucketID), zap.Int64("bucket_counts", count), - zap.Int64("chunk_size", chunkSize), - zap.String("origin_lower_value", cons.Buckets[i].LowerBound), - zap.String("origin_upper_value", cons.Buckets[i].UpperBound), + zap.Int64("chunk_size", d.ChunkSize), + zap.String("origin_lower_value", d.Cons.Buckets[i].LowerBound), + zap.String("origin_upper_value", d.Cons.Buckets[i].UpperBound), zap.Any("new_lower_values", lowerValues), zap.Any("new_upper_values", upperValues), zap.Any("chunk_ranges", chunkRange), zap.String("chunk_action", "random")) } - latestCount = cons.Buckets[i].Count + latestCount = d.Cons.Buckets[i].Count lowerValues = upperValues bucketID = i + 1 } // merge the rest keys into one chunk var chunkRange *structure.Range - switch stringutil.StringUpper(dbTypeS) { + switch stringutil.StringUpper(d.DBTypeS) { case constant.DatabaseTypeOracle: - chunkRange = structure.NewChunkRange(dbTypeS, dbCharsetS, constant.BuildInOracleCharsetAL32UTF8) + chunkRange = structure.NewChunkRange(d.DBTypeS, d.DBCharsetS, constant.BuildInOracleCharsetAL32UTF8) case constant.DatabaseTypeMySQL, constant.DatabaseTypeTiDB: - chunkRange = structure.NewChunkRange(dbTypeS, dbCharsetS, constant.BuildInMYSQLCharsetUTF8MB4) + chunkRange = structure.NewChunkRange(d.DBTypeS, d.DBCharsetS, constant.BuildInMYSQLCharsetUTF8MB4) default: - return nil, nil, fmt.Errorf("the database type [%s] is not supported, please contact author or reselect", dbTypeS) + return fmt.Errorf("the database type [%s] is not supported, please contact author or reselect", d.DBTypeS) } if len(lowerValues) > 0 { - for j, columnName := range cons.IndexColumn { - err = chunkRange.Update(columnName, cons.ColumnCollation[j], cons.ColumnDatatype[j], cons.DatetimePrecision[j], lowerValues[j], "", true, false) + for j, columnName := range d.Cons.IndexColumn { + err = chunkRange.Update(columnName, d.Cons.ColumnCollation[j], d.Cons.ColumnDatatype[j], d.Cons.DatetimePrecision[j], lowerValues[j], "", true, false) if err != nil { - return nil, nil, err + return err } } } // When the table is much less than chunkSize, // it will return a chunk include the whole table. - buckets, err := DivideDatabaseTableColumnStatisticsBucket(database, schemaName, tableName, cons, chunkRange, 1) + ranges, err := DivideDatabaseTableColumnStatisticsBucket(d.DatabaseS, d.SchemaNameS, d.TableNameS, d.Cons, chunkRange, 1) if err != nil { - return nil, nil, err + return err } - chunkRanges = append(chunkRanges, buckets...) + d.RangeC <- ranges logger.Debug("divide database bucket value", - zap.String("schema_name_s", schemaName), - zap.String("table_name_s", tableName), + zap.String("schema_name_s", d.SchemaNameS), + zap.String("table_name_s", d.TableNameS), zap.Int("last_bucket_id", bucketID), - zap.Int64("chunk_size", chunkSize), + zap.Int64("chunk_size", d.ChunkSize), zap.Any("new_lower_values", lowerValues), zap.Any("new_upper_values", upperValues), zap.Any("chunk_ranges", chunkRange), zap.String("chunk_action", "merge")) - return cons, chunkRanges, nil + return nil } -func ReverseUpstreamHighestBucketDownstreamRule(taskFlow, dbTypeT, dbCharsetS string, columnDatatypeT []string, cons *structure.HighestBucket, columnRouteRule map[string]string) (*structure.Rule, error) { - cons.ColumnDatatype = columnDatatypeT - - var columnCollationDownStreams []string - - switch stringutil.StringUpper(dbTypeT) { - case constant.DatabaseTypeOracle: - for _, c := range cons.ColumnCollation { - if !strings.EqualFold(c, constant.DataCompareDisabledCollationSettingFillEmptyString) { - collationTStr := constant.MigrateTableStructureDatabaseCollationMap[taskFlow][stringutil.StringUpper(c)][constant.MigrateTableStructureDatabaseCharsetMap[taskFlow][dbCharsetS]] - collationTSli := stringutil.StringSplit(collationTStr, constant.StringSeparatorSlash) - // get first collation - columnCollationDownStreams = append(columnCollationDownStreams, collationTSli[0]) - } else { - columnCollationDownStreams = append(columnCollationDownStreams, c) - } - } - case constant.DatabaseTypeMySQL, constant.DatabaseTypeTiDB: - for _, c := range cons.ColumnCollation { - if !strings.EqualFold(c, constant.DataCompareDisabledCollationSettingFillEmptyString) { - collationTStr := constant.MigrateTableStructureDatabaseCollationMap[taskFlow][stringutil.StringUpper(c)][constant.MigrateTableStructureDatabaseCharsetMap[taskFlow][dbCharsetS]] - collationTSli := stringutil.StringSplit(collationTStr, constant.StringSeparatorSlash) - // get first collation - columnCollationDownStreams = append(columnCollationDownStreams, collationTSli[0]) - } else { - columnCollationDownStreams = append(columnCollationDownStreams, c) - } - } - default: - return nil, fmt.Errorf("unsupported the downstream database type: %s", dbTypeT) - } - - if len(columnCollationDownStreams) > 0 { - cons.ColumnCollation = columnCollationDownStreams - } - - columnDatatypeM := make(map[string]string) - columnCollationM := make(map[string]string) - columnDatePrecisionM := make(map[string]string) - - for i, c := range cons.IndexColumn { - columnDatatypeM[c] = cons.ColumnDatatype[i] - columnCollationM[c] = cons.ColumnCollation[i] - columnDatePrecisionM[c] = cons.DatetimePrecision[i] - } - - logger.Info("reverse data compare task init table chunk", - zap.Any("upstreamConsIndexColumns", &structure.Rule{ - IndexColumnRule: columnRouteRule, - ColumnDatatypeRule: columnDatatypeM, - ColumnCollationRule: columnCollationM, - DatetimePrecisionRule: columnDatePrecisionM, - })) - - return &structure.Rule{ - IndexColumnRule: columnRouteRule, - ColumnDatatypeRule: columnDatatypeM, - ColumnCollationRule: columnCollationM, - DatetimePrecisionRule: columnDatePrecisionM, - }, nil -} - -func ProcessDownstreamDatabaseTableColumnStatisticsBucket(dbTypeT, dbCharsetT string, bs []*structure.Range, r *structure.Rule) ([]*structure.Range, error) { +func ProcessDownstreamStatisticsBucket(dbTypeT, dbCharsetT string, bs []*structure.Range, r *structure.Rule) ([]*structure.Range, error) { var ranges []*structure.Range for _, b := range bs { var bounds []*structure.Bound @@ -401,7 +264,7 @@ func ProcessDownstreamDatabaseTableColumnStatisticsBucket(dbTypeT, dbCharsetT st return ranges, nil } -func GetDownstreamDatabaseTableColumnDatatype(schemaNameT, tableNameT string, databaseT database.IDatabase, originColumnNameSli []string, columnNameRouteRule map[string]string) ([]string, error) { +func GetDownstreamTableColumnDatatype(schemaNameT, tableNameT string, databaseT database.IDatabase, originColumnNameSli []string, columnNameRouteRule map[string]string) ([]string, error) { var ( columnNameSliT []string columnTypeSliT []string @@ -436,98 +299,3 @@ func GetDownstreamDatabaseTableColumnDatatype(schemaNameT, tableNameT string, da } return columnTypeSliT, nil } - -// ExtractDatabaseTableStatisticsValuesFromBuckets analyze upperBound or lowerBound to string for each column. -// upperBound and lowerBound are looks like '(123, abc)' for multiple fields, or '123' for one field. -func ExtractDatabaseTableStatisticsValuesFromBuckets(divideDbType, valueString string, columnNames []string) ([]string, error) { - switch stringutil.StringUpper(divideDbType) { - case constant.DatabaseTypeTiDB: - // FIXME: maybe some values contains '(', ')' or ', ' - vStr := strings.Trim(valueString, "()") - values := strings.Split(vStr, ", ") - if len(values) != len(columnNames) { - return nil, fmt.Errorf("extract database type [%s] value %s failed, values %v not match columnNames %v", divideDbType, valueString, values, columnNames) - } - return values, nil - case constant.DatabaseTypeOracle: - values := strings.Split(valueString, constant.StringSeparatorComma) - if len(values) != len(columnNames) { - return nil, fmt.Errorf("extract database type [%s] value %s failed, values %v not match columnNames %v", divideDbType, valueString, values, columnNames) - } - return values, nil - default: - return nil, fmt.Errorf("extract database type [%s] value %s is not supported, please contact author or reselect", divideDbType, valueString) - } -} - -// DivideDatabaseTableColumnStatisticsBucket splits a chunk to multiple chunks by random -// Notice: If the `count <= 1`, it will skip splitting and return `chunk` as a slice directly. -func DivideDatabaseTableColumnStatisticsBucket(database database.IDatabase, schemaName, tableName string, cons *structure.HighestBucket, chunkRange *structure.Range, divideCountCnt int) ([]*structure.Range, error) { - var chunkRanges []*structure.Range - - if divideCountCnt <= 1 { - chunkRanges = append(chunkRanges, chunkRange) - return chunkRanges, nil - } - - chunkConds, chunkArgs := chunkRange.ToString() - - randomValueSli, err := database.GetDatabaseTableRandomValues(schemaName, tableName, cons.IndexColumn, chunkConds, chunkArgs, divideCountCnt-1, cons.ColumnCollation) - if err != nil { - return nil, err - } - - logger.Debug("divide database bucket value by random", zap.Stringer("chunk", chunkRange), zap.Int("random values num", len(randomValueSli)), zap.Any("random values", randomValueSli)) - - for i := 0; i <= len(randomValueSli); i++ { - newChunk := chunkRange.Copy() - - for j, columnName := range cons.IndexColumn { - if i == 0 { - if len(randomValueSli) == 0 { - // randomValues is empty, so chunks will append chunk itself - break - } - err = newChunk.Update( - columnName, - cons.ColumnCollation[j], - cons.ColumnDatatype[j], - cons.DatetimePrecision[j], - "", randomValueSli[i][j], false, true) - if err != nil { - return chunkRanges, err - } - } else if i == len(randomValueSli) { - // bucket upper newChunk.Bounds[j].Upper - err = newChunk.Update( - columnName, - cons.ColumnCollation[j], - cons.ColumnDatatype[j], - cons.DatetimePrecision[j], - randomValueSli[i-1][j], newChunk.Bounds[j].Upper, true, true) - if err != nil { - return chunkRanges, err - } - } else { - err = newChunk.Update( - columnName, - cons.ColumnCollation[j], - cons.ColumnDatatype[j], - cons.DatetimePrecision[j], - randomValueSli[i-1][j], - randomValueSli[i][j], true, true) - if err != nil { - return chunkRanges, err - } - } - } - chunkRanges = append(chunkRanges, newChunk) - } - - logger.Debug("divide database bucket value by random", - zap.Int("divide chunk range num", len(chunkRanges)), - zap.Stringer("origin chunk range", chunkRange), - zap.Any("new chunk range", chunkRanges)) - - return chunkRanges, nil -} diff --git a/database/processor/data_compare_task.go b/database/processor/data_compare_task.go new file mode 100644 index 0000000..b747b3a --- /dev/null +++ b/database/processor/data_compare_task.go @@ -0,0 +1,986 @@ +/* +Copyright © 2020 Marvin + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package processor + +import ( + "context" + "fmt" + "github.com/google/uuid" + "github.com/wentaojin/dbms/errconcurrent" + "github.com/wentaojin/dbms/utils/structure" + "strconv" + "strings" + "time" + + "github.com/golang/snappy" + + "github.com/wentaojin/dbms/database" + "github.com/wentaojin/dbms/logger" + "github.com/wentaojin/dbms/model" + "github.com/wentaojin/dbms/model/rule" + "github.com/wentaojin/dbms/model/task" + "github.com/wentaojin/dbms/proto/pb" + "github.com/wentaojin/dbms/utils/constant" + "github.com/wentaojin/dbms/utils/stringutil" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" +) + +type DataCompareTask struct { + Ctx context.Context + Task *task.Task + DatabaseS database.IDatabase + DatabaseT database.IDatabase + SchemaNameS string + + DBCharsetS string + DBCharsetT string + TaskParams *pb.DataCompareParam + + WaiterC chan *WaitingRecs + ResumeC chan *WaitingRecs +} + +func (dmt *DataCompareTask) Init() error { + defer func() { + close(dmt.WaiterC) + close(dmt.ResumeC) + }() + logger.Info("data compare task init table", + zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) + + if !dmt.TaskParams.EnableCheckpoint { + err := model.GetIDataCompareSummaryRW().DeleteDataCompareSummaryName(dmt.Ctx, []string{dmt.Task.TaskName}) + if err != nil { + return err + } + err = model.GetIDataCompareTaskRW().DeleteDataCompareTaskName(dmt.Ctx, []string{dmt.Task.TaskName}) + if err != nil { + return err + } + } + logger.Warn("data compare task checkpoint skip", + zap.String("task_name", dmt.Task.TaskName), + zap.String("task_mode", dmt.Task.TaskMode), + zap.String("task_flow", dmt.Task.TaskFlow), + zap.Bool("enable_checkpoint", dmt.TaskParams.EnableCheckpoint)) + + // filter database table + schemaTaskTables, err := model.GetIMigrateTaskTableRW().FindMigrateTaskTable(dmt.Ctx, &rule.MigrateTaskTable{ + TaskName: dmt.Task.TaskName, + SchemaNameS: dmt.SchemaNameS, + }) + if err != nil { + return err + } + var ( + includeTables []string + excludeTables []string + databaseTaskTables []string // task tables + globalScnS, globalScnT string + ) + databaseTableTypeMap := make(map[string]string) + databaseTaskTablesMap := make(map[string]struct{}) + + for _, t := range schemaTaskTables { + if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsExclude) { + excludeTables = append(excludeTables, t.TableNameS) + } + if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsNotExclude) { + includeTables = append(includeTables, t.TableNameS) + } + } + + tableObjs, err := dmt.DatabaseS.FilterDatabaseTable(dmt.SchemaNameS, includeTables, excludeTables) + if err != nil { + return err + } + + // rule case field + for _, t := range tableObjs.TaskTables { + var tabName string + // the according target case field rule convert + if strings.EqualFold(dmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleLower) { + tabName = stringutil.StringLower(t) + } + if strings.EqualFold(dmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleUpper) { + tabName = stringutil.StringUpper(t) + } + if strings.EqualFold(dmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleOrigin) { + tabName = t + } + databaseTaskTables = append(databaseTaskTables, tabName) + databaseTaskTablesMap[tabName] = struct{}{} + } + + // compare the task table + // the database task table is exist, and the config task table isn't exist, the clear the database task table + summaries, err := model.GetIDataCompareSummaryRW().FindDataCompareSummary(dmt.Ctx, &task.DataCompareSummary{TaskName: dmt.Task.TaskName, SchemaNameS: dmt.SchemaNameS}) + if err != nil { + return err + } + for _, s := range summaries { + _, ok := databaseTaskTablesMap[s.TableNameS] + + if !ok || strings.EqualFold(s.InitFlag, constant.TaskInitStatusNotFinished) { + err = model.Transaction(dmt.Ctx, func(txnCtx context.Context) error { + err = model.GetIDataCompareSummaryRW().DeleteDataCompareSummary(txnCtx, &task.DataCompareSummary{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }) + if err != nil { + return err + } + err = model.GetIDataCompareTaskRW().DeleteDataCompareTask(txnCtx, &task.DataCompareTask{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + } + } + + databaseTableTypeMap, err = dmt.DatabaseS.GetDatabaseTableType(dmt.SchemaNameS) + if err != nil { + return err + } + + switch dmt.Task.TaskFlow { + case constant.TaskFlowOracleToTiDB: + globalScn, err := dmt.DatabaseS.GetDatabaseConsistentPos() + if err != nil { + return err + } + + if dmt.TaskParams.EnableConsistentRead { + globalScnS = strconv.FormatUint(globalScn, 10) + } + if !dmt.TaskParams.EnableConsistentRead && !strings.EqualFold(dmt.TaskParams.ConsistentReadPointS, "") { + globalScnS = dmt.TaskParams.ConsistentReadPointS + } + if !strings.EqualFold(dmt.TaskParams.ConsistentReadPointT, "") { + globalScnT = dmt.TaskParams.ConsistentReadPointT + } + case constant.TaskFlowOracleToMySQL: + globalScn, err := dmt.DatabaseS.GetDatabaseConsistentPos() + if err != nil { + return err + } + + if dmt.TaskParams.EnableConsistentRead { + globalScnS = strconv.FormatUint(globalScn, 10) + } + if !dmt.TaskParams.EnableConsistentRead && !strings.EqualFold(dmt.TaskParams.ConsistentReadPointS, "") { + globalScnS = dmt.TaskParams.ConsistentReadPointS + } + // ignore params dmt.TaskParams.ConsistentReadPointT, mysql database is not support + case constant.TaskFlowTiDBToOracle: + if !strings.EqualFold(dmt.TaskParams.ConsistentReadPointS, "") { + globalScnS = dmt.TaskParams.ConsistentReadPointS + } + + if !strings.EqualFold(dmt.TaskParams.ConsistentReadPointT, "") { + globalScnT = dmt.TaskParams.ConsistentReadPointT + } + case constant.TaskFlowMySQLToOracle: + // ignore params dmt.TaskParams.ConsistentReadPointS, mysql database is not support + + if !strings.EqualFold(dmt.TaskParams.ConsistentReadPointT, "") { + globalScnT = dmt.TaskParams.ConsistentReadPointT + } + } + + // database tables + // init database table + dbTypeSli := stringutil.StringSplit(dmt.Task.TaskFlow, constant.StringSeparatorAite) + dbTypeS := dbTypeSli[0] + dbTypeT := dbTypeSli[1] + + logger.Info("data compare task init", + zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) + + g, gCtx := errgroup.WithContext(dmt.Ctx) + g.SetLimit(int(dmt.TaskParams.TableThread)) + + for _, taskJob := range databaseTaskTables { + sourceTable := taskJob + g.Go(func() error { + select { + case <-gCtx.Done(): + return gCtx.Err() + default: + startTime := time.Now() + s, err := model.GetIDataCompareSummaryRW().GetDataCompareSummary(gCtx, &task.DataCompareSummary{ + TaskName: dmt.Task.TaskName, + SchemaNameS: dmt.SchemaNameS, + TableNameS: sourceTable, + }) + if err != nil { + return err + } + if strings.EqualFold(s.InitFlag, constant.TaskInitStatusFinished) { + // the database task has init flag,skip + dmt.ResumeC <- &WaitingRecs{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + } + return nil + } + + tableRows, err := dmt.DatabaseS.GetDatabaseTableRows(dmt.SchemaNameS, sourceTable) + if err != nil { + return err + } + tableSize, err := dmt.DatabaseS.GetDatabaseTableSize(dmt.SchemaNameS, sourceTable) + if err != nil { + return err + } + + dataRule := &DataCompareRule{ + Ctx: gCtx, + TaskMode: dmt.Task.TaskMode, + TaskName: dmt.Task.TaskName, + TaskFlow: dmt.Task.TaskFlow, + DatabaseS: dmt.DatabaseS, + DatabaseT: dmt.DatabaseT, + SchemaNameS: dmt.SchemaNameS, + TableNameS: sourceTable, + TableTypeS: databaseTableTypeMap, + OnlyDatabaseCompareRow: dmt.TaskParams.OnlyCompareRow, + DisableDatabaseCompareMd5: dmt.TaskParams.DisableMd5Checksum, + DBCharsetS: dmt.DBCharsetS, + DBCharsetT: dmt.DBCharsetT, + CaseFieldRuleS: dmt.Task.CaseFieldRuleS, + CaseFieldRuleT: dmt.Task.CaseFieldRuleT, + GlobalSqlHintS: dmt.TaskParams.SqlHintS, + GlobalSqlHintT: dmt.TaskParams.SqlHintT, + GlobalIgnoreConditionFields: dmt.TaskParams.IgnoreConditionFields, + } + + attsRule, err := database.IDataCompareAttributesRule(dataRule) + if err != nil { + return err + } + + logger.Info("data compare task init table start", + zap.String("task_name", dmt.Task.TaskName), + zap.String("task_mode", dmt.Task.TaskMode), + zap.String("task_flow", dmt.Task.TaskFlow), + zap.String("schema_name_s", attsRule.SchemaNameS), + zap.String("table_name_s", attsRule.TableNameS)) + + err = dmt.ProcessStatisticsScan( + gCtx, + dbTypeS, + dbTypeT, + globalScnS, + globalScnT, + tableRows, + tableSize, + attsRule) + if err != nil { + return err + } + + logger.Info("data compare task init table finished", + zap.String("task_name", dmt.Task.TaskName), + zap.String("task_mode", dmt.Task.TaskMode), + zap.String("task_flow", dmt.Task.TaskFlow), + zap.String("schema_name_s", attsRule.SchemaNameS), + zap.String("table_name_s", attsRule.TableNameS), + zap.String("cost", time.Now().Sub(startTime).String())) + return nil + } + }) + } + + if err = g.Wait(); err != nil { + logger.Error("data compare task init", + zap.String("task_name", dmt.Task.TaskName), + zap.String("task_mode", dmt.Task.TaskMode), + zap.String("task_flow", dmt.Task.TaskFlow), + zap.String("schema_name_s", dmt.SchemaNameS), + zap.Error(err)) + return err + } + _, err = model.GetITaskRW().UpdateTask(dmt.Ctx, &task.Task{TaskName: dmt.Task.TaskName}, map[string]interface{}{"TaskInit": constant.TaskInitStatusFinished}) + if err != nil { + return err + } + return nil +} + +func (dmt *DataCompareTask) Run() error { + logger.Info("data compare task run table", + zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) + + for s := range dmt.WaiterC { + err := dmt.Process(s) + if err != nil { + return err + } + } + return nil +} + +func (dmt *DataCompareTask) Resume() error { + logger.Info("data compare task resume table", + zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) + + for s := range dmt.ResumeC { + err := dmt.Process(s) + if err != nil { + return err + } + } + return nil +} + +func (dmt *DataCompareTask) Process(s *WaitingRecs) error { + startTime := time.Now() + logger.Info("data compare task process table", + zap.String("task_name", dmt.Task.TaskName), + zap.String("task_mode", dmt.Task.TaskMode), + zap.String("task_flow", dmt.Task.TaskFlow), + zap.String("schema_name_s", s.SchemaNameS), + zap.String("table_name_s", s.TableNameS)) + + var ( + migrateTasks []*task.DataCompareTask + err error + ) + err = model.Transaction(dmt.Ctx, func(txnCtx context.Context) error { + // get migrate task tables + migrateTasks, err = model.GetIDataCompareTaskRW().FindDataCompareTask(txnCtx, + &task.DataCompareTask{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + TaskStatus: constant.TaskDatabaseStatusWaiting, + }) + if err != nil { + return err + } + migrateFailedTasks, err := model.GetIDataCompareTaskRW().FindDataCompareTask(txnCtx, + &task.DataCompareTask{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + TaskStatus: constant.TaskDatabaseStatusFailed}) + if err != nil { + return err + } + migrateRunningTasks, err := model.GetIDataCompareTaskRW().FindDataCompareTask(txnCtx, + &task.DataCompareTask{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + TaskStatus: constant.TaskDatabaseStatusRunning}) + if err != nil { + return err + } + migrateStopTasks, err := model.GetIDataCompareTaskRW().FindDataCompareTask(txnCtx, + &task.DataCompareTask{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + TaskStatus: constant.TaskDatabaseStatusStopped}) + if err != nil { + return err + } + migrateTasks = append(migrateTasks, migrateFailedTasks...) + migrateTasks = append(migrateTasks, migrateRunningTasks...) + migrateTasks = append(migrateTasks, migrateStopTasks...) + return nil + }) + if err != nil { + return err + } + + logger.Info("data compare task process chunks", + zap.String("task_name", dmt.Task.TaskName), + zap.String("task_mode", dmt.Task.TaskMode), + zap.String("task_flow", dmt.Task.TaskFlow), + zap.String("schema_name_s", s.SchemaNameS), + zap.String("table_name_s", s.TableNameS)) + + g := errconcurrent.NewGroup() + g.SetLimit(int(dmt.TaskParams.SqlThread)) + for _, j := range migrateTasks { + gTime := time.Now() + g.Go(j, gTime, func(j interface{}) error { + dt := j.(*task.DataCompareTask) + errW := model.Transaction(dmt.Ctx, func(txnCtx context.Context) error { + _, err = model.GetIDataCompareTaskRW().UpdateDataCompareTask(txnCtx, + &task.DataCompareTask{TaskName: dt.TaskName, SchemaNameS: dt.SchemaNameS, TableNameS: dt.TableNameS, ChunkID: dt.ChunkID}, + map[string]interface{}{ + "TaskStatus": constant.TaskDatabaseStatusRunning, + }) + if err != nil { + return err + } + // clear data compare chunk result + err = model.GetIDataCompareResultRW().DeleteDataCompareResult(txnCtx, &task.DataCompareResult{ + TaskName: dt.TaskName, + SchemaNameS: dt.SchemaNameS, + TableNameS: dt.TableNameS, + ChunkID: dt.ChunkID, + }) + if err != nil { + return err + } + _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ + TaskName: dt.TaskName, + SchemaNameS: dt.SchemaNameS, + TableNameS: dt.TableNameS, + LogDetail: fmt.Sprintf("%v [%v] data compare task [%v] taskflow [%v] source table [%v.%v] chunk [%s] start", + stringutil.CurrentTimeFormatString(), + stringutil.StringLower(dmt.Task.TaskMode), + dt.TaskName, + dmt.Task.TaskFlow, + dt.SchemaNameS, + dt.TableNameS, + dt.ChunkDetailS), + }) + if err != nil { + return err + } + return nil + }) + if errW != nil { + return errW + } + + var dbCharsetS, dbCharsetT string + switch { + case strings.EqualFold(dmt.Task.TaskFlow, constant.TaskFlowOracleToTiDB) || strings.EqualFold(dmt.Task.TaskFlow, constant.TaskFlowOracleToMySQL): + dbCharsetS = constant.MigrateOracleCharsetStringConvertMapping[stringutil.StringUpper(dmt.DBCharsetS)] + dbCharsetT = constant.MigrateMySQLCompatibleCharsetStringConvertMapping[stringutil.StringUpper(dmt.DBCharsetT)] + case strings.EqualFold(dmt.Task.TaskFlow, constant.TaskFlowTiDBToOracle) || strings.EqualFold(dmt.Task.TaskFlow, constant.TaskFlowMySQLToOracle): + dbCharsetS = constant.MigrateMySQLCompatibleCharsetStringConvertMapping[stringutil.StringUpper(dmt.DBCharsetS)] + dbCharsetT = constant.MigrateOracleCharsetStringConvertMapping[stringutil.StringUpper(dmt.DBCharsetT)] + default: + return fmt.Errorf("the task [%s] schema [%s] taskflow [%s] column rule isn't support, please contact author", dmt.Task.TaskName, dt.SchemaNameS, dmt.Task.TaskFlow) + } + + err = database.IDataCompareProcess(&DataCompareRow{ + Ctx: dmt.Ctx, + TaskMode: dmt.Task.TaskMode, + TaskFlow: dmt.Task.TaskFlow, + StartTime: gTime, + Dmt: dt, + DatabaseS: dmt.DatabaseS, + DatabaseT: dmt.DatabaseT, + BatchSize: int(dmt.TaskParams.BatchSize), + WriteThread: int(dmt.TaskParams.WriteThread), + CallTimeout: int(dmt.TaskParams.CallTimeout), + DBCharsetS: dbCharsetS, + DBCharsetT: dbCharsetT, + RepairStmtFlow: dmt.TaskParams.RepairStmtFlow, + }) + if err != nil { + return err + } + return nil + }) + } + + for _, r := range g.Wait() { + if r.Err != nil { + smt := r.Task.(*task.DataCompareTask) + logger.Warn("data compare task process tables", + zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow), + zap.String("schema_name_s", smt.SchemaNameS), + zap.String("table_name_s", smt.TableNameS), + zap.Error(r.Err)) + + errW := model.Transaction(dmt.Ctx, func(txnCtx context.Context) error { + _, err = model.GetIDataCompareTaskRW().UpdateDataCompareTask(txnCtx, + &task.DataCompareTask{TaskName: smt.TaskName, SchemaNameS: smt.SchemaNameS, TableNameS: smt.TableNameS, ChunkID: smt.ChunkID}, + map[string]interface{}{ + "TaskStatus": constant.TaskDatabaseStatusFailed, + "Duration": fmt.Sprintf("%f", time.Now().Sub(r.Time).Seconds()), + "ErrorDetail": r.Err.Error(), + }) + if err != nil { + return err + } + _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ + TaskName: smt.TaskName, + SchemaNameS: smt.SchemaNameS, + TableNameS: smt.TableNameS, + LogDetail: fmt.Sprintf("%v [%v] data compare task [%v] taskflow [%v] source table [%v.%v] failed, please see [data_compare_task] detail", + stringutil.CurrentTimeFormatString(), + stringutil.StringLower(dmt.Task.TaskMode), + smt.TaskName, + dmt.Task.TaskFlow, + smt.SchemaNameS, + smt.TableNameS), + }) + if err != nil { + return err + } + return nil + }) + if errW != nil { + return errW + } + } + } + + endTableTime := time.Now() + err = model.Transaction(dmt.Ctx, func(txnCtx context.Context) error { + var equalChunks uint64 + tableStatusRecs, err := model.GetIDataCompareTaskRW().FindDataCompareTaskBySchemaTableChunkStatus(txnCtx, &task.DataCompareTask{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }) + if err != nil { + return err + } + for _, rec := range tableStatusRecs { + switch rec.TaskStatus { + case constant.TaskDatabaseStatusEqual: + _, err = model.GetIDataCompareSummaryRW().UpdateDataCompareSummary(txnCtx, &task.DataCompareSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkEquals": rec.StatusTotals, + }) + if err != nil { + return err + } + equalChunks = equalChunks + uint64(rec.StatusTotals) + case constant.TaskDatabaseStatusNotEqual: + _, err = model.GetIDataCompareSummaryRW().UpdateDataCompareSummary(txnCtx, &task.DataCompareSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkNotEquals": rec.StatusTotals, + }) + if err != nil { + return err + } + case constant.TaskDatabaseStatusFailed: + _, err = model.GetIDataCompareSummaryRW().UpdateDataCompareSummary(txnCtx, &task.DataCompareSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkFails": rec.StatusTotals, + }) + if err != nil { + return err + } + case constant.TaskDatabaseStatusWaiting: + _, err = model.GetIDataCompareSummaryRW().UpdateDataCompareSummary(txnCtx, &task.DataCompareSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkWaits": rec.StatusTotals, + }) + if err != nil { + return err + } + case constant.TaskDatabaseStatusRunning: + _, err = model.GetIDataCompareSummaryRW().UpdateDataCompareSummary(txnCtx, &task.DataCompareSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkRuns": rec.StatusTotals, + }) + if err != nil { + return err + } + case constant.TaskDatabaseStatusStopped: + _, err = model.GetIDataCompareSummaryRW().UpdateDataCompareSummary(txnCtx, &task.DataCompareSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkStops": rec.StatusTotals, + }) + if err != nil { + return err + } + default: + return fmt.Errorf("the task [%v] task_mode [%s] task_flow [%v] schema_name_s [%v] table_name_s [%v] task_status [%v] panic, please contact auhtor or reselect", s.TaskName, dmt.Task.TaskMode, dmt.Task.TaskFlow, rec.SchemaNameS, rec.TableNameS, rec.TaskStatus) + } + } + + summary, err := model.GetIDataCompareSummaryRW().GetDataCompareSummary(txnCtx, &task.DataCompareSummary{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }) + if err != nil { + return err + } + if summary.ChunkTotals == equalChunks { + _, err = model.GetIDataCompareSummaryRW().UpdateDataCompareSummary(txnCtx, &task.DataCompareSummary{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }, map[string]interface{}{ + "CompareFlag": constant.TaskCompareStatusFinished, + "Duration": fmt.Sprintf("%f", time.Now().Sub(startTime).Seconds()), + }) + if err != nil { + return err + } + } else { + _, err = model.GetIDataCompareSummaryRW().UpdateDataCompareSummary(txnCtx, &task.DataCompareSummary{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }, map[string]interface{}{ + "CompareFlag": constant.TaskCompareStatusNotFinished, + "Duration": fmt.Sprintf("%f", time.Now().Sub(startTime).Seconds()), + }) + if err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + + logger.Info("data compare task process table", + zap.String("task_name", dmt.Task.TaskName), + zap.String("task_mode", dmt.Task.TaskMode), + zap.String("task_flow", dmt.Task.TaskFlow), + zap.String("schema_name_s", s.SchemaNameS), + zap.String("table_name_s", s.TableNameS), + zap.String("cost", endTableTime.Sub(startTime).String())) + + return nil +} + +func (dmt *DataCompareTask) ProcessStatisticsScan(ctx context.Context, dbTypeS, dbTypeT, globalScnS, globalScnT string, tableRows uint64, tableSize float64, attsRule *database.DataCompareAttributesRule) error { + h, err := dmt.DatabaseS.GetDatabaseTableHighestSelectivityIndex( + attsRule.SchemaNameS, + attsRule.TableNameS, + attsRule.CompareConditionFieldS, + attsRule.IgnoreConditionFields) + if err != nil { + return err + } + + logger.Debug("data compare task init table chunk", + zap.String("task_name", dmt.Task.TaskName), + zap.String("task_mode", dmt.Task.TaskMode), + zap.String("task_flow", dmt.Task.TaskFlow), + zap.String("schema_name_s", attsRule.SchemaNameS), + zap.String("table_name_s", attsRule.TableNameS), + zap.Any("origin upstream bucket", h)) + + if h == nil { + err = dmt.ProcessTableScan(ctx, attsRule.SchemaNameS, attsRule.TableNameS, globalScnS, globalScnT, tableRows, tableSize, attsRule) + if err != nil { + return err + } + return nil + } + + // upstream bucket ranges + err = h.TransSelectivity( + dbTypeS, + dmt.DBCharsetS, + dmt.Task.CaseFieldRuleS, + dmt.TaskParams.EnableCollationSetting) + if err != nil { + return err + } + + columnDatatypeSliT, err := GetDownstreamTableColumnDatatype(attsRule.SchemaNameT, attsRule.TableNameT, dmt.DatabaseT, h.IndexColumn, attsRule.ColumnNameRouteRule) + if err != nil { + return err + } + + // downstream bucket rule + selecRules, err := h.TransSelectivityRule(dmt.Task.TaskFlow, dbTypeT, dmt.DBCharsetS, columnDatatypeSliT, attsRule.ColumnNameRouteRule) + if err != nil { + return err + } + + logger.Debug("data compare task init table chunk", + zap.String("task_name", dmt.Task.TaskName), + zap.String("task_mode", dmt.Task.TaskMode), + zap.String("task_flow", dmt.Task.TaskFlow), + zap.String("schema_name_s", attsRule.SchemaNameS), + zap.String("table_name_s", attsRule.TableNameS), + zap.String("process_method", "statistic"), + zap.Any("downstream selectivity rule", selecRules), + zap.Any("new upstream bucket", h)) + + rangeC := make(chan []*structure.Range, constant.DefaultMigrateTaskQueueSize) + d := &Divide{ + DBTypeS: dbTypeS, + DBCharsetS: dmt.DBCharsetS, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + ChunkSize: int64(dmt.TaskParams.ChunkSize), + DatabaseS: dmt.DatabaseS, + Cons: h, + RangeC: rangeC, + } + g, ctx := errgroup.WithContext(ctx) + + g.Go(func() error { + defer close(rangeC) + err = d.ProcessUpstreamStatisticsBucket() + if err != nil { + return err + } + return nil + }) + + g.Go(func() error { + totalChunks := 0 + for r := range rangeC { + downRanges, err := ProcessDownstreamStatisticsBucket(dbTypeT, dmt.DBCharsetT, r, selecRules) + if err != nil { + return err + } + logger.Debug("data compare task init table chunk", + zap.String("task_name", dmt.Task.TaskName), + zap.String("task_mode", dmt.Task.TaskMode), + zap.String("task_flow", dmt.Task.TaskFlow), + zap.String("schema_name_s", attsRule.SchemaNameS), + zap.String("table_name_s", attsRule.TableNameS), + zap.Any("downstream selectivity rule", selecRules), + zap.Any("new upstream bucket", h), + zap.Any("current upstream range", r), + zap.Any("current downstream range", downRanges)) + statsRanges, err := dmt.PrepareStatisticsRange(globalScnS, globalScnT, attsRule, r, downRanges) + if err != nil { + return err + } + if len(statsRanges) > 0 { + err = model.GetIDataCompareTaskRW().CreateInBatchDataCompareTask(ctx, statsRanges, int(dmt.TaskParams.WriteThread), int(dmt.TaskParams.BatchSize)) + if err != nil { + return err + } + totalChunks = totalChunks + len(statsRanges) + } + return nil + } + + if totalChunks == 0 { + err := dmt.ProcessTableScan(ctx, attsRule.SchemaNameS, attsRule.TableNameS, globalScnS, globalScnT, tableRows, tableSize, attsRule) + if err != nil { + return err + } + return nil + } + _, err = model.GetIDataCompareSummaryRW().CreateDataCompareSummary(ctx, &task.DataCompareSummary{ + TaskName: dmt.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SchemaNameT: attsRule.SchemaNameT, + TableNameT: attsRule.TableNameT, + SnapshotPointS: globalScnS, + SnapshotPointT: globalScnT, + TableRowsS: tableRows, + TableSizeS: tableSize, + ChunkTotals: uint64(totalChunks), + InitFlag: constant.TaskInitStatusFinished, + CompareFlag: constant.TaskCompareStatusNotFinished, + }) + if err != nil { + return err + } + return nil + }) + + dmt.WaiterC <- &WaitingRecs{ + TaskName: dmt.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + } + return nil +} + +func (dmt *DataCompareTask) ProcessTableScan(ctx context.Context, schemaNameS, tableNameS, globalScnS, globalScnT string, tableRows uint64, tableSize float64, attsRule *database.DataCompareAttributesRule) error { + var encChunkS, encChunkT []byte + if !strings.EqualFold(attsRule.CompareConditionRangeS, "") { + encChunkS = snappy.Encode(nil, []byte(attsRule.CompareConditionRangeS)) + } else { + encChunkS = snappy.Encode(nil, []byte("1 = 1")) + } + if !strings.EqualFold(attsRule.CompareConditionRangeT, "") { + encChunkT = snappy.Encode(nil, []byte(attsRule.CompareConditionRangeT)) + } else { + encChunkT = snappy.Encode(nil, []byte("1 = 1")) + } + + logger.Warn("data compare task init table chunk", + zap.String("task_name", dmt.Task.TaskName), + zap.String("task_mode", dmt.Task.TaskMode), + zap.String("task_flow", dmt.Task.TaskFlow), + zap.String("schema_name_s", attsRule.SchemaNameS), + zap.String("table_name_s", attsRule.TableNameS), + zap.Any("upstream bucket range", string(encChunkS))) + + encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) + if err != nil { + return err + } + encryptChunkT, err := stringutil.Encrypt(stringutil.BytesToString(encChunkT), []byte(constant.DefaultDataEncryptDecryptKey)) + if err != nil { + return err + } + err = model.Transaction(ctx, func(txnCtx context.Context) error { + _, err = model.GetIDataCompareTaskRW().CreateDataCompareTask(txnCtx, &task.DataCompareTask{ + TaskName: dmt.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SchemaNameT: attsRule.SchemaNameT, + TableNameT: attsRule.TableNameT, + TableTypeS: attsRule.TableTypeS, + SnapshotPointS: globalScnS, + SnapshotPointT: globalScnT, + CompareMethod: attsRule.CompareMethod, + ColumnDetailSO: attsRule.ColumnDetailSO, + ColumnDetailS: attsRule.ColumnDetailS, + ColumnDetailTO: attsRule.ColumnDetailTO, + ColumnDetailT: attsRule.ColumnDetailT, + SqlHintS: attsRule.SqlHintS, + SqlHintT: attsRule.SqlHintT, + ChunkID: uuid.New().String(), + ChunkDetailS: encryptChunkS, + ChunkDetailArgS: "", + ChunkDetailT: encryptChunkT, + ChunkDetailArgT: "", + ConsistentReadS: strconv.FormatBool(dmt.TaskParams.EnableConsistentRead), + TaskStatus: constant.TaskDatabaseStatusWaiting, + }) + if err != nil { + return err + } + _, err = model.GetIDataCompareSummaryRW().CreateDataCompareSummary(txnCtx, &task.DataCompareSummary{ + TaskName: dmt.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SchemaNameT: attsRule.SchemaNameT, + TableNameT: attsRule.TableNameT, + SnapshotPointS: globalScnS, + SnapshotPointT: globalScnT, + TableRowsS: tableRows, + TableSizeS: tableSize, + ChunkTotals: 1, + InitFlag: constant.TaskInitStatusFinished, + CompareFlag: constant.TaskCompareStatusNotFinished, + }) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + + dmt.WaiterC <- &WaitingRecs{ + TaskName: dmt.Task.TaskName, + SchemaNameS: schemaNameS, + TableNameS: tableNameS, + } + return nil +} + +func (dmt *DataCompareTask) PrepareStatisticsRange(globalScnS, globalScnT string, attsRule *database.DataCompareAttributesRule, upRanges, downRanges []*structure.Range) ([]*task.DataCompareTask, error) { + var metas []*task.DataCompareTask + for i, r := range upRanges { + toStringS, toStringArgsS := r.ToString() + toStringT, toStringArgsT := downRanges[i].ToString() + + if !strings.EqualFold(attsRule.CompareConditionRangeS, "") { + toStringS = fmt.Sprintf("%s AND (%s)", toStringS, attsRule.CompareConditionRangeS) + } + if !strings.EqualFold(attsRule.CompareConditionRangeT, "") { + toStringT = fmt.Sprintf("%s AND (%s)", toStringT, attsRule.CompareConditionRangeT) + } + + encChunkS := snappy.Encode(nil, []byte(toStringS)) + encChunkT := snappy.Encode(nil, []byte(toStringT)) + + encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) + if err != nil { + return metas, err + } + encryptChunkT, err := stringutil.Encrypt(stringutil.BytesToString(encChunkT), []byte(constant.DefaultDataEncryptDecryptKey)) + if err != nil { + return metas, err + } + + var argsS, argsT string + if toStringArgsS != nil { + argsS, err = stringutil.MarshalJSON(toStringArgsS) + if err != nil { + return metas, err + } + } + if toStringArgsT != nil { + argsT, err = stringutil.MarshalJSON(toStringArgsT) + if err != nil { + return metas, err + } + } + + metas = append(metas, &task.DataCompareTask{ + TaskName: dmt.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SchemaNameT: attsRule.SchemaNameT, + TableNameT: attsRule.TableNameT, + TableTypeS: attsRule.TableTypeS, + SnapshotPointS: globalScnS, + SnapshotPointT: globalScnT, + CompareMethod: attsRule.CompareMethod, + ColumnDetailSO: attsRule.ColumnDetailSO, + ColumnDetailS: attsRule.ColumnDetailS, + ColumnDetailTO: attsRule.ColumnDetailTO, + ColumnDetailT: attsRule.ColumnDetailT, + SqlHintS: attsRule.SqlHintS, + SqlHintT: attsRule.SqlHintT, + ChunkID: uuid.New().String(), + ChunkDetailS: encryptChunkS, + ChunkDetailArgS: argsS, + ChunkDetailT: encryptChunkT, + ChunkDetailArgT: argsT, + ConsistentReadS: strconv.FormatBool(dmt.TaskParams.EnableConsistentRead), + TaskStatus: constant.TaskDatabaseStatusWaiting, + }) + } + return metas, nil +} diff --git a/database/processor/data_migrate_task.go b/database/processor/data_migrate_task.go new file mode 100644 index 0000000..865f25c --- /dev/null +++ b/database/processor/data_migrate_task.go @@ -0,0 +1,1233 @@ +/* +Copyright © 2020 Marvin + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package processor + +import ( + "context" + "database/sql" + "fmt" + "github.com/golang/snappy" + "github.com/google/uuid" + "github.com/wentaojin/dbms/database" + "github.com/wentaojin/dbms/errconcurrent" + "github.com/wentaojin/dbms/logger" + "github.com/wentaojin/dbms/model" + "github.com/wentaojin/dbms/model/rule" + "github.com/wentaojin/dbms/model/task" + "github.com/wentaojin/dbms/proto/pb" + "github.com/wentaojin/dbms/utils/constant" + "github.com/wentaojin/dbms/utils/stringutil" + "github.com/wentaojin/dbms/utils/structure" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "path/filepath" + "strconv" + "strings" + "time" +) + +type DataMigrateTask struct { + Ctx context.Context + Task *task.Task + DBRoleS string + DBVersionS string + DBCharsetS string + DBCharsetT string + DatabaseS database.IDatabase + DatabaseT database.IDatabase + SchemaNameS string + TableThread uint64 + GlobalSqlHintS string + GlobalSqlHintT string + EnableCheckpoint bool + EnableConsistentRead bool + ChunkSize uint64 + BatchSize uint64 + WriteThread uint64 + CallTimeout uint64 + SqlThreadS uint64 + + StmtParams *pb.StatementMigrateParam + CsvParams *pb.CsvMigrateParam + + WaiterC chan *WaitingRecs + ResumeC chan *WaitingRecs +} + +func (cmt *DataMigrateTask) Init() error { + defer func() { + close(cmt.WaiterC) + close(cmt.ResumeC) + }() + startTime := time.Now() + logger.Info("data migrate task init start", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("startTime", startTime.String())) + + if !cmt.EnableCheckpoint { + err := model.GetIDataMigrateSummaryRW().DeleteDataMigrateSummaryName(cmt.Ctx, []string{cmt.Task.TaskName}) + if err != nil { + return err + } + err = model.GetIDataMigrateTaskRW().DeleteDataMigrateTaskName(cmt.Ctx, []string{cmt.Task.TaskName}) + if err != nil { + return err + } + } + logger.Warn("data migrate task checkpoint skip", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.Bool("enable_checkpoint", cmt.EnableCheckpoint)) + + // filter database table + schemaTaskTables, err := model.GetIMigrateTaskTableRW().FindMigrateTaskTable(cmt.Ctx, &rule.MigrateTaskTable{ + TaskName: cmt.Task.TaskName, + SchemaNameS: cmt.SchemaNameS, + }) + if err != nil { + return err + } + var ( + includeTables []string + excludeTables []string + databaseTaskTables []string // task tables + globalScn string + ) + databaseTableTypeMap := make(map[string]string) + databaseTaskTablesMap := make(map[string]struct{}) + + for _, t := range schemaTaskTables { + if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsExclude) { + excludeTables = append(excludeTables, t.TableNameS) + } + if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsNotExclude) { + includeTables = append(includeTables, t.TableNameS) + } + } + + tableObjs, err := cmt.DatabaseS.FilterDatabaseTable(cmt.SchemaNameS, includeTables, excludeTables) + if err != nil { + return err + } + + // rule case field + for _, t := range tableObjs.TaskTables { + var tabName string + // the according target case field rule convert + if strings.EqualFold(cmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleLower) { + tabName = stringutil.StringLower(t) + } + if strings.EqualFold(cmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleUpper) { + tabName = stringutil.StringUpper(t) + } + if strings.EqualFold(cmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleOrigin) { + tabName = t + } + databaseTaskTables = append(databaseTaskTables, tabName) + databaseTaskTablesMap[tabName] = struct{}{} + } + + logger.Info("data migrate task compare table", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow)) + // compare the task table + // the database task table is exist, and the config task table isn't exist, the clear the database task table + summaries, err := model.GetIDataMigrateSummaryRW().FindDataMigrateSummary(cmt.Ctx, &task.DataMigrateSummary{TaskName: cmt.Task.TaskName, SchemaNameS: cmt.SchemaNameS}) + if err != nil { + return err + } + for _, s := range summaries { + _, ok := databaseTaskTablesMap[s.TableNameS] + + if !ok || strings.EqualFold(s.InitFlag, constant.TaskInitStatusNotFinished) { + err = model.Transaction(cmt.Ctx, func(txnCtx context.Context) error { + err = model.GetIDataMigrateSummaryRW().DeleteDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }) + if err != nil { + return err + } + err = model.GetIDataMigrateTaskRW().DeleteDataMigrateTask(txnCtx, &task.DataMigrateTask{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + } + } + + databaseTableTypeMap, err = cmt.DatabaseS.GetDatabaseTableType(cmt.SchemaNameS) + if err != nil { + return err + } + + globalScnS, err := cmt.DatabaseS.GetDatabaseConsistentPos() + if err != nil { + return err + } + globalScn = strconv.FormatUint(globalScnS, 10) + + // database tables + // init database table + dbTypeSli := stringutil.StringSplit(cmt.Task.TaskFlow, constant.StringSeparatorAite) + dbTypeS := dbTypeSli[0] + + initTable := time.Now() + logger.Info("data migrate task init table", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("startTime", initTable.String())) + g, gCtx := errgroup.WithContext(cmt.Ctx) + g.SetLimit(int(cmt.TableThread)) + + for _, taskJob := range databaseTaskTables { + sourceTable := taskJob + g.Go(func() error { + select { + case <-gCtx.Done(): + return gCtx.Err() + default: + initTableTime := time.Now() + s, err := model.GetIDataMigrateSummaryRW().GetDataMigrateSummary(gCtx, &task.DataMigrateSummary{ + TaskName: cmt.Task.TaskName, + SchemaNameS: cmt.SchemaNameS, + TableNameS: sourceTable, + }) + if err != nil { + return err + } + if strings.EqualFold(s.InitFlag, constant.TaskInitStatusFinished) { + // the database task has init flag,skip + cmt.ResumeC <- &WaitingRecs{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + } + return nil + } + + tableRows, err := cmt.DatabaseS.GetDatabaseTableRows(cmt.SchemaNameS, sourceTable) + if err != nil { + return err + } + tableSize, err := cmt.DatabaseS.GetDatabaseTableSize(cmt.SchemaNameS, sourceTable) + if err != nil { + return err + } + + dataRule := &DataMigrateRule{ + Ctx: gCtx, + TaskMode: cmt.Task.TaskMode, + TaskName: cmt.Task.TaskName, + TaskFlow: cmt.Task.TaskFlow, + DatabaseS: cmt.DatabaseS, + SchemaNameS: cmt.SchemaNameS, + TableNameS: sourceTable, + TableTypeS: databaseTableTypeMap, + DBCharsetS: cmt.DBCharsetS, + CaseFieldRuleS: cmt.Task.CaseFieldRuleS, + CaseFieldRuleT: cmt.Task.CaseFieldRuleT, + GlobalSqlHintS: cmt.GlobalSqlHintS, + } + + attsRule, err := database.IDataMigrateAttributesRule(dataRule) + if err != nil { + return err + } + + // only where range + if !attsRule.EnableChunkStrategy && !strings.EqualFold(attsRule.WhereRange, "") { + encChunkS := snappy.Encode(nil, []byte(attsRule.WhereRange)) + + encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) + if err != nil { + return err + } + + var csvFile string + if strings.EqualFold(cmt.Task.TaskMode, constant.TaskModeCSVMigrate) { + csvDir := filepath.Join(cmt.CsvParams.OutputDir, attsRule.SchemaNameS, attsRule.TableNameS, uuid.NewString()) + err = stringutil.PathNotExistOrCreate(csvDir) + if err != nil { + return err + } + csvFile = filepath.Join(csvDir, stringutil.StringBuilder(attsRule.SchemaNameT, `.`, attsRule.TableNameT, `.0.csv`)) + } + migrateTask := &task.DataMigrateTask{ + TaskName: cmt.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SchemaNameT: attsRule.SchemaNameT, + TableNameT: attsRule.TableNameT, + TableTypeS: attsRule.TableTypeS, + SnapshotPointS: globalScn, + ColumnDetailO: attsRule.ColumnDetailO, + ColumnDetailS: attsRule.ColumnDetailS, + ColumnDetailT: attsRule.ColumnDetailT, + SqlHintS: attsRule.SqlHintS, + ChunkID: uuid.New().String(), + ChunkDetailS: encryptChunkS, + ChunkDetailArgS: "", + ConsistentReadS: strconv.FormatBool(cmt.EnableConsistentRead), + TaskStatus: constant.TaskDatabaseStatusWaiting, + CsvFile: csvFile, + } + err = model.Transaction(gCtx, func(txnCtx context.Context) error { + _, err = model.GetIDataMigrateTaskRW().CreateDataMigrateTask(txnCtx, migrateTask) + if err != nil { + return err + } + _, err = model.GetIDataMigrateSummaryRW().CreateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ + TaskName: cmt.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SchemaNameT: attsRule.SchemaNameT, + TableNameT: attsRule.TableNameT, + SnapshotPointS: globalScn, + TableRowsS: tableRows, + TableSizeS: tableSize, + InitFlag: constant.TaskInitStatusFinished, + MigrateFlag: constant.TaskMigrateStatusNotFinished, + ChunkTotals: 1, + }) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + + cmt.WaiterC <- &WaitingRecs{ + TaskName: cmt.Task.TaskName, + SchemaNameS: cmt.SchemaNameS, + TableNameS: sourceTable, + } + return nil + } + + // statistic + if !strings.EqualFold(cmt.DBRoleS, constant.OracleDatabasePrimaryRole) || (strings.EqualFold(cmt.DBRoleS, constant.OracleDatabasePrimaryRole) && stringutil.VersionOrdinal(cmt.DBVersionS) < stringutil.VersionOrdinal(constant.OracleDatabaseTableMigrateRowidRequireVersion)) { + err = cmt.ProcessStatisticsScan( + gCtx, + dbTypeS, + globalScn, + tableRows, + tableSize, + attsRule) + if err != nil { + return err + } + return nil + } + + err = cmt.ProcessChunkScan( + gCtx, + globalScn, + tableRows, + tableSize, + attsRule) + if err != nil { + return err + } + logger.Info("data migrate task init table finished", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("schema_name_s", attsRule.SchemaNameS), + zap.String("table_name_s", attsRule.TableNameS), + zap.String("cost", time.Now().Sub(initTableTime).String())) + return nil + } + }) + } + + if err = g.Wait(); err != nil { + logger.Error("data migrate task init table failed", + zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("schema_name_s", cmt.SchemaNameS), + zap.Error(err)) + return err + } + logger.Info("data migrate task init table", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("cost", time.Now().Sub(initTable).String())) + + logger.Info("data migrate task init success", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("cost", time.Now().Sub(startTime).String())) + return nil +} + +func (cmt *DataMigrateTask) Run() error { + startTime := time.Now() + logger.Info("data migrate task run starting", + zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow)) + + for s := range cmt.WaiterC { + err := cmt.Process(s) + if err != nil { + return err + } + } + + logger.Info("data migrate task run success", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("cost", time.Now().Sub(startTime).String())) + return nil +} + +func (cmt *DataMigrateTask) Resume() error { + startTime := time.Now() + logger.Info("data migrate task resume starting", + zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow)) + + for s := range cmt.ResumeC { + err := cmt.Process(s) + if err != nil { + return err + } + } + logger.Info("data migrate task resume success", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("cost", time.Now().Sub(startTime).String())) + return nil +} + +func (cmt *DataMigrateTask) Process(s *WaitingRecs) error { + startTableTime := time.Now() + + summary, err := model.GetIDataMigrateSummaryRW().GetDataMigrateSummary(cmt.Ctx, &task.DataMigrateSummary{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }) + if err != nil { + return err + } + if strings.EqualFold(summary.MigrateFlag, constant.TaskMigrateStatusFinished) { + logger.Warn("data migrate task process", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("schema_name_s", s.SchemaNameS), + zap.String("table_name_s", s.TableNameS), + zap.String("migrate_flag", summary.MigrateFlag), + zap.String("action", "migrate skip")) + return nil + } + + if strings.EqualFold(summary.InitFlag, constant.TaskInitStatusNotFinished) { + return fmt.Errorf("the task_name [%s] task_mode [%s] task_flow [%s] schema_name_s [%s] table_name_s [%s] init status not finished, disabled migrate", s.TableNameS, cmt.Task.TaskMode, cmt.Task.TaskFlow, s.SchemaNameS, s.TableNameS) + } + + var ( + sqlTSmt *sql.Stmt + ) + if strings.EqualFold(cmt.Task.TaskMode, constant.TaskModeCSVMigrate) { + err = stringutil.PathNotExistOrCreate(filepath.Join(cmt.CsvParams.OutputDir, s.SchemaNameS, s.TableNameS)) + if err != nil { + return err + } + statfs, err := stringutil.GetDiskUsage(cmt.CsvParams.OutputDir) + if err != nil { + return err + } + // MB + diskFactor, err := stringutil.StrconvFloatBitSize(cmt.CsvParams.DiskUsageFactor, 64) + if err != nil { + return err + } + estmTableSizeMB := summary.TableSizeS * diskFactor + + totalSpace := statfs.Blocks * uint64(statfs.Bsize) / 1024 / 1024 + freeSpace := statfs.Bfree * uint64(statfs.Bsize) / 1024 / 1024 + usedSpace := totalSpace - freeSpace + + if freeSpace < uint64(estmTableSizeMB) { + logger.Warn("data migrate task disk usage", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("schema_name_s", s.SchemaNameS), + zap.String("table_name_s", s.TableNameS), + zap.String("output_dir", cmt.CsvParams.OutputDir), + zap.Uint64("disk total space(MB)", totalSpace), + zap.Uint64("disk used space(MB)", usedSpace), + zap.Uint64("disk free space(MB)", freeSpace), + zap.Uint64("estimate table space(MB)", uint64(estmTableSizeMB))) + _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(cmt.Ctx, &task.DataMigrateSummary{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }, map[string]interface{}{ + "Refused": fmt.Sprintf("the output [%s] current disk quota isn't enough, total space(MB): [%v], used space(MB): [%v], free space(MB): [%v], estimate space(MB): [%v]", cmt.CsvParams.OutputDir, totalSpace, usedSpace, freeSpace, estmTableSizeMB), + }) + if err != nil { + return err + } + // skip + return nil + } + + logger.Info("data migrate task process table", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("schema_name_s", s.SchemaNameS), + zap.String("table_name_s", s.TableNameS), + zap.String("output_dir", cmt.CsvParams.OutputDir), + zap.Uint64("disk total space(MB)", totalSpace), + zap.Uint64("disk used space(MB)", usedSpace), + zap.Uint64("disk free space(MB)", freeSpace), + zap.Uint64("estimate table space(MB)", uint64(estmTableSizeMB)), + zap.String("startTime", startTableTime.String())) + + } else if strings.EqualFold(cmt.Task.TaskMode, constant.TaskModeStmtMigrate) { + logger.Info("data migrate task process table", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("schema_name_s", s.SchemaNameS), + zap.String("table_name_s", s.TableNameS), + zap.String("startTime", startTableTime.String())) + + switch cmt.Task.TaskFlow { + case constant.TaskFlowOracleToTiDB, constant.TaskFlowOracleToMySQL: + limitOne, err := model.GetIDataMigrateTaskRW().GetDataMigrateTask(cmt.Ctx, &task.DataMigrateTask{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS}) + if err != nil { + return err + } + sqlStr := GenMYSQLCompatibleDatabasePrepareStmt(s.SchemaNameT, s.TableNameT, cmt.GlobalSqlHintT, limitOne.ColumnDetailT, int(cmt.BatchSize), true) + sqlTSmt, err = cmt.DatabaseT.PrepareContext(cmt.Ctx, sqlStr) + if err != nil { + return err + } + default: + return fmt.Errorf("the task_name [%s] schema [%s] task_mode [%s] task_flow [%s] prepare isn't support, please contact author", cmt.Task.TaskName, s.SchemaNameS, cmt.Task.TaskMode, cmt.Task.TaskFlow) + } + } + + var migrateTasks []*task.DataMigrateTask + migrateTasks, err = model.GetIDataMigrateTaskRW().FindDataMigrateTaskTableStatus(cmt.Ctx, + s.TaskName, + s.SchemaNameS, + s.TableNameS, + []string{constant.TaskDatabaseStatusWaiting, constant.TaskDatabaseStatusFailed, constant.TaskDatabaseStatusRunning, constant.TaskDatabaseStatusStopped}, + ) + if err != nil { + return err + } + + logger.Info("data migrate task process chunks", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("schema_name_s", s.SchemaNameS), + zap.String("table_name_s", s.TableNameS)) + + g := errconcurrent.NewGroup() + g.SetLimit(int(cmt.SqlThreadS)) + + for _, j := range migrateTasks { + gTime := time.Now() + g.Go(j, gTime, func(j interface{}) error { + dt := j.(*task.DataMigrateTask) + errW := model.Transaction(cmt.Ctx, func(txnCtx context.Context) error { + _, err := model.GetIDataMigrateTaskRW().UpdateDataMigrateTask(txnCtx, + &task.DataMigrateTask{TaskName: dt.TaskName, SchemaNameS: dt.SchemaNameS, TableNameS: dt.TableNameS, ChunkID: dt.ChunkID}, + map[string]interface{}{ + "TaskStatus": constant.TaskDatabaseStatusRunning, + }) + if err != nil { + return err + } + _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ + TaskName: dt.TaskName, + SchemaNameS: dt.SchemaNameS, + TableNameS: dt.TableNameS, + LogDetail: fmt.Sprintf("%v [%v] data migrate task [%v] taskflow [%v] source table [%v.%v] chunk [%s] start", + stringutil.CurrentTimeFormatString(), + stringutil.StringLower(cmt.Task.TaskMode), + dt.TaskName, + cmt.Task.TaskFlow, + dt.SchemaNameS, + dt.TableNameS, + dt.ChunkDetailS), + }) + if err != nil { + return err + } + return nil + }) + if errW != nil { + return errW + } + + if strings.EqualFold(cmt.Task.TaskMode, constant.TaskModeCSVMigrate) { + err = database.IDataMigrateProcess(&CsvMigrateRow{ + Ctx: cmt.Ctx, + TaskMode: cmt.Task.TaskMode, + TaskFlow: cmt.Task.TaskFlow, + BufioSize: constant.DefaultMigrateTaskBufferIOSize, + Dmt: dt, + DatabaseS: cmt.DatabaseS, + DBCharsetS: constant.MigrateOracleCharsetStringConvertMapping[stringutil.StringUpper(cmt.DBCharsetS)], + DBCharsetT: stringutil.StringUpper(cmt.DBCharsetT), + TaskParams: cmt.CsvParams, + ReadChan: make(chan []string, constant.DefaultMigrateTaskQueueSize), + WriteChan: make(chan string, constant.DefaultMigrateTaskQueueSize), + }) + if err != nil { + return err + } + } else if strings.EqualFold(cmt.Task.TaskMode, constant.TaskModeStmtMigrate) { + err = database.IDataMigrateProcess(&StmtMigrateRow{ + Ctx: cmt.Ctx, + TaskMode: cmt.Task.TaskMode, + TaskFlow: cmt.Task.TaskFlow, + Dmt: dt, + DatabaseS: cmt.DatabaseS, + DatabaseT: cmt.DatabaseT, + DatabaseTStmt: sqlTSmt, + DBCharsetS: constant.MigrateOracleCharsetStringConvertMapping[cmt.DBCharsetS], + DBCharsetT: stringutil.StringUpper(cmt.DBCharsetT), + SqlThreadT: int(cmt.StmtParams.SqlThreadT), + BatchSize: int(cmt.BatchSize), + CallTimeout: int(cmt.CallTimeout), + SafeMode: cmt.StmtParams.EnableSafeMode, + ReadChan: make(chan []interface{}, constant.DefaultMigrateTaskQueueSize), + WriteChan: make(chan []interface{}, constant.DefaultMigrateTaskQueueSize), + }) + if err != nil { + return err + } + } + + errW = model.Transaction(cmt.Ctx, func(txnCtx context.Context) error { + _, err = model.GetIDataMigrateTaskRW().UpdateDataMigrateTask(txnCtx, + &task.DataMigrateTask{TaskName: dt.TaskName, SchemaNameS: dt.SchemaNameS, TableNameS: dt.TableNameS, ChunkID: dt.ChunkID}, + map[string]interface{}{ + "TaskStatus": constant.TaskDatabaseStatusSuccess, + "Duration": fmt.Sprintf("%f", time.Now().Sub(gTime).Seconds()), + }) + if err != nil { + return err + } + _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ + TaskName: dt.TaskName, + SchemaNameS: dt.SchemaNameS, + TableNameS: dt.TableNameS, + LogDetail: fmt.Sprintf("%v [%v] data migrate task [%v] taskflow [%v] source table [%v.%v] chunk [%s] success", + stringutil.CurrentTimeFormatString(), + stringutil.StringLower(cmt.Task.TaskMode), + dt.TaskName, + cmt.Task.TaskFlow, + dt.SchemaNameS, + dt.TableNameS, + dt.ChunkDetailS), + }) + if err != nil { + return err + } + return nil + }) + if errW != nil { + return errW + } + return nil + }) + } + + for _, r := range g.Wait() { + if r.Err != nil { + smt := r.Task.(*task.DataMigrateTask) + logger.Warn("data migrate task process tables", + zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("schema_name_s", smt.SchemaNameS), + zap.String("table_name_s", smt.TableNameS), + zap.Error(r.Err)) + + errW := model.Transaction(cmt.Ctx, func(txnCtx context.Context) error { + _, err = model.GetIDataMigrateTaskRW().UpdateDataMigrateTask(txnCtx, + &task.DataMigrateTask{TaskName: smt.TaskName, SchemaNameS: smt.SchemaNameS, TableNameS: smt.TableNameS, ChunkID: smt.ChunkID}, + map[string]interface{}{ + "TaskStatus": constant.TaskDatabaseStatusFailed, + "Duration": fmt.Sprintf("%f", time.Now().Sub(r.Time).Seconds()), + "ErrorDetail": r.Err.Error(), + }) + if err != nil { + return err + } + _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ + TaskName: smt.TaskName, + SchemaNameS: smt.SchemaNameS, + TableNameS: smt.TableNameS, + LogDetail: fmt.Sprintf("%v [%v] data migrate task [%v] taskflow [%v] source table [%v.%v] failed, please see [data_migrate_task] detail", + stringutil.CurrentTimeFormatString(), + stringutil.StringLower(cmt.Task.TaskMode), + smt.TaskName, + cmt.Task.TaskFlow, + smt.SchemaNameS, + smt.TableNameS), + }) + if err != nil { + return err + } + return nil + }) + if errW != nil { + return errW + } + } + } + + endTableTime := time.Now() + err = model.Transaction(cmt.Ctx, func(txnCtx context.Context) error { + var successChunks int64 + tableStatusRecs, err := model.GetIDataMigrateTaskRW().FindDataMigrateTaskBySchemaTableChunkStatus(txnCtx, &task.DataMigrateTask{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }) + if err != nil { + return err + } + for _, rec := range tableStatusRecs { + switch rec.TaskStatus { + case constant.TaskDatabaseStatusSuccess: + _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkSuccess": rec.StatusTotals, + }) + if err != nil { + return err + } + successChunks = successChunks + rec.StatusTotals + case constant.TaskDatabaseStatusFailed: + _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkFails": rec.StatusTotals, + }) + if err != nil { + return err + } + case constant.TaskDatabaseStatusWaiting: + _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkWaits": rec.StatusTotals, + }) + if err != nil { + return err + } + case constant.TaskDatabaseStatusRunning: + _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkRuns": rec.StatusTotals, + }) + if err != nil { + return err + } + case constant.TaskDatabaseStatusStopped: + _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkStops": rec.StatusTotals, + }) + if err != nil { + return err + } + default: + return fmt.Errorf("the task [%v] task_mode [%s] task_flow [%v] schema_name_s [%v] table_name_s [%v] task_status [%v] panic, please contact auhtor or reselect", s.TaskName, cmt.Task.TaskMode, cmt.Task.TaskFlow, rec.SchemaNameS, rec.TableNameS, rec.TaskStatus) + } + } + + summar, err := model.GetIDataMigrateSummaryRW().GetDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }) + if err != nil { + return err + } + + logger.Info("data migrate task process summary", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("schema_name_s", s.SchemaNameS), + zap.String("table_name_s", s.TableNameS), + zap.Uint64("chunk_totals", summar.ChunkTotals), + zap.Int64("success_chunks", successChunks)) + + if int64(summar.ChunkTotals) == successChunks { + _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }, map[string]interface{}{ + "MigrateFlag": constant.TaskMigrateStatusFinished, + "Duration": fmt.Sprintf("%f", time.Now().Sub(startTableTime).Seconds()), + }) + if err != nil { + return err + } + } else { + _, err = model.GetIDataMigrateSummaryRW().UpdateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }, map[string]interface{}{ + "MigrateFlag": constant.TaskMigrateStatusNotFinished, + "Duration": fmt.Sprintf("%f", time.Now().Sub(startTableTime).Seconds()), + }) + if err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + + logger.Info("data migrate task process table", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("schema_name_s", s.SchemaNameS), + zap.String("table_name_s", s.TableNameS), + zap.String("cost", endTableTime.Sub(startTableTime).String())) + return nil +} + +func (cmt *DataMigrateTask) ProcessStatisticsScan(ctx context.Context, dbTypeS, globalScn string, tableRows uint64, tableSize float64, attsRule *database.DataMigrateAttributesRule) error { + h, err := cmt.DatabaseS.GetDatabaseTableHighestSelectivityIndex( + attsRule.SchemaNameS, + attsRule.TableNameS, + "", + nil) + if err != nil { + return err + } + if h == nil { + err = cmt.ProcessTableScan(ctx, attsRule.SchemaNameS, attsRule.TableNameS, globalScn, tableRows, tableSize, attsRule) + if err != nil { + return err + } + return nil + } + + // upstream bucket ranges + err = h.TransSelectivity( + dbTypeS, + stringutil.StringUpper(cmt.DBCharsetS), + cmt.Task.CaseFieldRuleS, + false) + if err != nil { + return err + } + + logger.Warn("data migrate task table", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("schema_name_s", attsRule.SchemaNameS), + zap.String("table_name_s", attsRule.TableNameS), + zap.String("database_version", cmt.DBVersionS), + zap.String("database_role", cmt.DBRoleS), + zap.String("migrate_method", "statistic")) + + rangeC := make(chan []*structure.Range, constant.DefaultMigrateTaskQueueSize) + d := &Divide{ + DBTypeS: dbTypeS, + DBCharsetS: stringutil.StringUpper(cmt.DBCharsetS), + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + ChunkSize: int64(cmt.ChunkSize), + DatabaseS: cmt.DatabaseS, + Cons: h, + RangeC: rangeC, + } + g, ctx := errgroup.WithContext(ctx) + + g.Go(func() error { + defer close(rangeC) + err = d.ProcessUpstreamStatisticsBucket() + if err != nil { + return err + } + return nil + }) + + g.Go(func() error { + totalChunks := 0 + chunkID := 0 + for ranges := range rangeC { + var statsRanges []*task.DataMigrateTask + for _, r := range ranges { + statsRange, err := cmt.PrepareStatisticsRange(globalScn, attsRule, r, chunkID) + if err != nil { + return err + } + statsRanges = append(statsRanges, statsRange) + chunkID++ + } + + if len(statsRanges) > 0 { + err = model.GetIDataMigrateTaskRW().CreateInBatchDataMigrateTask(ctx, statsRanges, int(cmt.WriteThread), int(cmt.BatchSize)) + if err != nil { + return err + } + totalChunks = totalChunks + len(statsRanges) + } + return nil + } + + if totalChunks == 0 { + err := cmt.ProcessTableScan(ctx, attsRule.SchemaNameS, attsRule.TableNameS, globalScn, tableRows, tableSize, attsRule) + if err != nil { + return err + } + return nil + } + _, err = model.GetIDataMigrateSummaryRW().CreateDataMigrateSummary(cmt.Ctx, &task.DataMigrateSummary{ + TaskName: cmt.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SchemaNameT: attsRule.SchemaNameT, + TableNameT: attsRule.TableNameT, + SnapshotPointS: globalScn, + TableRowsS: tableRows, + TableSizeS: tableSize, + ChunkTotals: uint64(totalChunks), + InitFlag: constant.TaskInitStatusFinished, + MigrateFlag: constant.TaskMigrateStatusNotFinished, + }) + if err != nil { + return err + } + return nil + }) + + cmt.WaiterC <- &WaitingRecs{ + TaskName: cmt.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + } + return nil +} + +func (cmt *DataMigrateTask) ProcessTableScan(ctx context.Context, schemaNameS, tableNameS, globalScn string, tableRows uint64, tableSize float64, attsRule *database.DataMigrateAttributesRule) error { + var whereRange string + switch { + case attsRule.EnableChunkStrategy && !strings.EqualFold(attsRule.WhereRange, ""): + whereRange = stringutil.StringBuilder(`1 = 1 AND `, attsRule.WhereRange) + default: + whereRange = `1 = 1` + } + logger.Warn("data migrate task table", + zap.String("task_name", cmt.Task.TaskName), + zap.String("task_mode", cmt.Task.TaskMode), + zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("schema_name_s", attsRule.SchemaNameS), + zap.String("table_name_s", attsRule.TableNameS), + zap.String("database_version", cmt.DBVersionS), + zap.String("database_role", cmt.DBRoleS), + zap.String("where_range", whereRange)) + + encChunkS := snappy.Encode(nil, []byte(whereRange)) + + encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) + if err != nil { + return err + } + + var csvFile string + if strings.EqualFold(cmt.Task.TaskMode, constant.TaskModeCSVMigrate) { + csvDir := filepath.Join(cmt.CsvParams.OutputDir, attsRule.SchemaNameS, attsRule.TableNameS, uuid.NewString()) + err = stringutil.PathNotExistOrCreate(csvDir) + if err != nil { + return err + } + csvFile = filepath.Join(csvDir, stringutil.StringBuilder(attsRule.SchemaNameT, `.`, attsRule.TableNameT, `.0.csv`)) + } + migrateTask := &task.DataMigrateTask{ + TaskName: cmt.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SchemaNameT: attsRule.SchemaNameT, + TableNameT: attsRule.TableNameT, + TableTypeS: attsRule.TableTypeS, + SnapshotPointS: globalScn, + ColumnDetailO: attsRule.ColumnDetailO, + ColumnDetailS: attsRule.ColumnDetailS, + ColumnDetailT: attsRule.ColumnDetailT, + SqlHintS: attsRule.SqlHintS, + ChunkID: uuid.New().String(), + ChunkDetailS: encryptChunkS, + ChunkDetailArgS: "", + ConsistentReadS: strconv.FormatBool(cmt.EnableConsistentRead), + TaskStatus: constant.TaskDatabaseStatusWaiting, + CsvFile: csvFile, + } + err = model.Transaction(ctx, func(txnCtx context.Context) error { + _, err = model.GetIDataMigrateTaskRW().CreateDataMigrateTask(txnCtx, migrateTask) + if err != nil { + return err + } + _, err = model.GetIDataMigrateSummaryRW().CreateDataMigrateSummary(txnCtx, &task.DataMigrateSummary{ + TaskName: cmt.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SchemaNameT: attsRule.SchemaNameT, + TableNameT: attsRule.TableNameT, + SnapshotPointS: globalScn, + TableRowsS: tableRows, + TableSizeS: tableSize, + ChunkTotals: 1, + InitFlag: constant.TaskInitStatusFinished, + MigrateFlag: constant.TaskMigrateStatusNotFinished, + }) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + + cmt.WaiterC <- &WaitingRecs{ + TaskName: cmt.Task.TaskName, + SchemaNameS: schemaNameS, + TableNameS: tableNameS, + } + return nil +} + +func (cmt *DataMigrateTask) ProcessChunkScan(ctx context.Context, globalScn string, tableRows uint64, tableSize float64, attsRule *database.DataMigrateAttributesRule) error { + chunkCh := make(chan []map[string]string, constant.DefaultMigrateTaskQueueSize) + + gC := errgroup.Group{} + + gC.Go(func() error { + defer close(chunkCh) + err := cmt.DatabaseS.GetDatabaseTableChunkTask( + uuid.New().String(), attsRule.SchemaNameS, attsRule.TableNameS, cmt.ChunkSize, cmt.CallTimeout, int(cmt.BatchSize), chunkCh) + if err != nil { + return err + } + return nil + }) + + gC.Go(func() error { + var whereRange string + totalChunkRecs := 0 + chunkID := 0 + + for chunks := range chunkCh { + // batch commit + var ( + metas []*task.DataMigrateTask + ) + for _, r := range chunks { + var csvFile string + if strings.EqualFold(cmt.Task.TaskMode, constant.TaskModeCSVMigrate) { + csvFile = filepath.Join(cmt.CsvParams.OutputDir, attsRule.SchemaNameS, attsRule.TableNameS, stringutil.StringBuilder(attsRule.SchemaNameT, `.`, attsRule.TableNameT, `.`, strconv.Itoa(chunkID), `.csv`)) + } + + switch { + case attsRule.EnableChunkStrategy && !strings.EqualFold(attsRule.WhereRange, ""): + whereRange = stringutil.StringBuilder(r["CMD"], ` AND `, attsRule.WhereRange) + default: + whereRange = r["CMD"] + } + + encChunkS := snappy.Encode(nil, []byte(whereRange)) + + encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) + if err != nil { + return err + } + + metas = append(metas, &task.DataMigrateTask{ + TaskName: cmt.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SchemaNameT: attsRule.SchemaNameT, + TableNameT: attsRule.TableNameT, + TableTypeS: attsRule.TableTypeS, + SnapshotPointS: globalScn, + ColumnDetailO: attsRule.ColumnDetailO, + ColumnDetailS: attsRule.ColumnDetailS, + ColumnDetailT: attsRule.ColumnDetailT, + SqlHintS: attsRule.SqlHintS, + ChunkID: uuid.New().String(), + ChunkDetailS: encryptChunkS, + ChunkDetailArgS: "", + ConsistentReadS: strconv.FormatBool(cmt.EnableConsistentRead), + TaskStatus: constant.TaskDatabaseStatusWaiting, + CsvFile: csvFile, + }) + + chunkID++ + } + + chunkRecs := len(metas) + if chunkRecs > 0 { + err := model.GetIDataMigrateTaskRW().CreateInBatchDataMigrateTask(ctx, metas, int(cmt.WriteThread), int(cmt.BatchSize)) + if err != nil { + return err + } + totalChunkRecs = totalChunkRecs + chunkRecs + } + } + + if totalChunkRecs == 0 { + err := cmt.ProcessTableScan(ctx, attsRule.SchemaNameS, attsRule.TableNameS, globalScn, tableRows, tableSize, attsRule) + if err != nil { + return err + } + return nil + } + + _, err := model.GetIDataMigrateSummaryRW().CreateDataMigrateSummary(ctx, &task.DataMigrateSummary{ + TaskName: cmt.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SchemaNameT: attsRule.SchemaNameT, + TableNameT: attsRule.TableNameT, + SnapshotPointS: globalScn, + TableRowsS: tableRows, + TableSizeS: tableSize, + ChunkTotals: uint64(totalChunkRecs), + InitFlag: constant.TaskInitStatusFinished, + MigrateFlag: constant.TaskMigrateStatusNotFinished, + }) + if err != nil { + return err + } + return nil + }) + + err := gC.Wait() + if err != nil { + return err + } + + cmt.WaiterC <- &WaitingRecs{ + TaskName: cmt.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + } + return nil +} + +func (cmt *DataMigrateTask) PrepareStatisticsRange(globalScn string, attsRule *database.DataMigrateAttributesRule, r *structure.Range, chunkID int) (*task.DataMigrateTask, error) { + toStringS, toStringSArgs := r.ToString() + var ( + argsS string + err error + ) + if toStringSArgs != nil { + argsS, err = stringutil.MarshalJSON(toStringSArgs) + if err != nil { + return nil, err + } + } + var ( + whereRange string + csvFile string + ) + switch { + case attsRule.EnableChunkStrategy && !strings.EqualFold(attsRule.WhereRange, ""): + whereRange = stringutil.StringBuilder(`((`, toStringS, `) AND (`, attsRule.WhereRange, `))`) + default: + whereRange = toStringS + } + + encChunkS := snappy.Encode(nil, []byte(whereRange)) + + encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) + if err != nil { + return nil, err + } + + if strings.EqualFold(cmt.Task.TaskMode, constant.TaskModeCSVMigrate) { + csvFile = filepath.Join(cmt.CsvParams.OutputDir, attsRule.SchemaNameS, attsRule.TableNameS, stringutil.StringBuilder(attsRule.SchemaNameT, `.`, attsRule.TableNameT, `.`, strconv.Itoa(chunkID), `.csv`)) + } + + return &task.DataMigrateTask{ + TaskName: cmt.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SchemaNameT: attsRule.SchemaNameT, + TableNameT: attsRule.TableNameT, + TableTypeS: attsRule.TableTypeS, + SnapshotPointS: globalScn, + ColumnDetailO: attsRule.ColumnDetailO, + ColumnDetailS: attsRule.ColumnDetailS, + ColumnDetailT: attsRule.ColumnDetailT, + SqlHintS: attsRule.SqlHintS, + ChunkID: uuid.New().String(), + ChunkDetailS: encryptChunkS, + ChunkDetailArgS: argsS, + ConsistentReadS: strconv.FormatBool(cmt.EnableConsistentRead), + TaskStatus: constant.TaskDatabaseStatusWaiting, + CsvFile: csvFile, + }, nil +} diff --git a/database/processor/data_scan_task.go b/database/processor/data_scan_task.go new file mode 100644 index 0000000..3299f92 --- /dev/null +++ b/database/processor/data_scan_task.go @@ -0,0 +1,992 @@ +/* +Copyright © 2020 Marvin + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package processor + +import ( + "context" + "fmt" + "github.com/golang/snappy" + "github.com/google/uuid" + "github.com/wentaojin/dbms/database" + "github.com/wentaojin/dbms/errconcurrent" + "github.com/wentaojin/dbms/logger" + "github.com/wentaojin/dbms/model" + "github.com/wentaojin/dbms/model/rule" + "github.com/wentaojin/dbms/model/task" + "github.com/wentaojin/dbms/proto/pb" + "github.com/wentaojin/dbms/utils/constant" + "github.com/wentaojin/dbms/utils/stringutil" + "github.com/wentaojin/dbms/utils/structure" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + "strconv" + "strings" + "time" +) + +type DataScanTask struct { + Ctx context.Context + Task *task.Task + DatabaseS database.IDatabase + SchemaNameS string + DBRoleS string + DBCharsetS string + DBVersionS string + + TaskParams *pb.DataScanParam + WaiterC chan *WaitingRecs + ResumeC chan *WaitingRecs +} + +func (dst *DataScanTask) Init() error { + defer func() { + close(dst.WaiterC) + close(dst.ResumeC) + }() + logger.Info("data scan task init table", + zap.String("task_name", dst.Task.TaskName), zap.String("task_mode", dst.Task.TaskMode), zap.String("task_flow", dst.Task.TaskFlow)) + + if !dst.TaskParams.EnableCheckpoint { + err := model.GetIDataScanTaskRW().DeleteDataScanTaskName(dst.Ctx, []string{dst.Task.TaskName}) + if err != nil { + return err + } + err = model.GetIDataScanSummaryRW().DeleteDataScanSummaryName(dst.Ctx, []string{dst.Task.TaskName}) + if err != nil { + return err + } + } + + logger.Warn("data scan task checkpoint skip", + zap.String("task_name", dst.Task.TaskName), + zap.String("task_mode", dst.Task.TaskMode), + zap.String("task_flow", dst.Task.TaskFlow), + zap.Bool("enable_checkpoint", dst.TaskParams.EnableCheckpoint)) + + // filter database table + schemaTaskTables, err := model.GetIMigrateTaskTableRW().FindMigrateTaskTable(dst.Ctx, &rule.MigrateTaskTable{ + TaskName: dst.Task.TaskName, + SchemaNameS: dst.SchemaNameS, + }) + if err != nil { + return err + } + var ( + includeTables []string + excludeTables []string + databaseTaskTables []string // task tables + globalScn string + ) + databaseTableTypeMap := make(map[string]string) + databaseTaskTablesMap := make(map[string]struct{}) + + for _, t := range schemaTaskTables { + if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsExclude) { + excludeTables = append(excludeTables, t.TableNameS) + } + if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsNotExclude) { + includeTables = append(includeTables, t.TableNameS) + } + } + + tableObjs, err := dst.DatabaseS.FilterDatabaseTable(dst.SchemaNameS, includeTables, excludeTables) + if err != nil { + return err + } + + // rule case field + for _, t := range tableObjs.TaskTables { + var tabName string + // the according target case field rule convert + if strings.EqualFold(dst.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleLower) { + tabName = stringutil.StringLower(t) + } + if strings.EqualFold(dst.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleUpper) { + tabName = stringutil.StringUpper(t) + } + if strings.EqualFold(dst.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleOrigin) { + tabName = t + } + databaseTaskTables = append(databaseTaskTables, tabName) + databaseTaskTablesMap[tabName] = struct{}{} + } + + // compare the task table + // the database task table is exist, and the config task table isn't exist, the clear the database task table + summaries, err := model.GetIDataScanSummaryRW().FindDataScanSummary(dst.Ctx, &task.DataScanSummary{TaskName: dst.Task.TaskName, SchemaNameS: dst.SchemaNameS}) + if err != nil { + return err + } + for _, s := range summaries { + _, ok := databaseTaskTablesMap[s.TableNameS] + + if !ok || strings.EqualFold(s.InitFlag, constant.TaskInitStatusNotFinished) { + err = model.Transaction(dst.Ctx, func(txnCtx context.Context) error { + err := model.GetIDataScanTaskRW().DeleteDataScanTaskName(txnCtx, []string{dst.Task.TaskName}) + if err != nil { + return err + } + err = model.GetIDataScanSummaryRW().DeleteDataScanSummaryName(txnCtx, []string{dst.Task.TaskName}) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + } + } + + databaseTableTypeMap, err = dst.DatabaseS.GetDatabaseTableType(dst.SchemaNameS) + if err != nil { + return err + } + + globalScnS, err := dst.DatabaseS.GetDatabaseConsistentPos() + if err != nil { + return err + } + + globalScn = strconv.FormatUint(globalScnS, 10) + + // database tables + // init database table + dbTypeSli := stringutil.StringSplit(dst.Task.TaskFlow, constant.StringSeparatorAite) + dbTypeS := dbTypeSli[0] + + logger.Info("data scan task start init", + zap.String("task_name", dst.Task.TaskName), zap.String("task_mode", dst.Task.TaskMode), zap.String("task_flow", dst.Task.TaskFlow)) + + g, gCtx := errgroup.WithContext(dst.Ctx) + g.SetLimit(int(dst.TaskParams.TableThread)) + + for _, taskJob := range databaseTaskTables { + sourceTable := taskJob + g.Go(func() error { + select { + case <-gCtx.Done(): + return gCtx.Err() + default: + startTime := time.Now() + s, err := model.GetIDataMigrateSummaryRW().GetDataMigrateSummary(gCtx, &task.DataMigrateSummary{ + TaskName: dst.Task.TaskName, + SchemaNameS: dst.SchemaNameS, + TableNameS: sourceTable, + }) + if err != nil { + return err + } + if strings.EqualFold(s.InitFlag, constant.TaskInitStatusFinished) { + // the database task has init flag,skip + dst.ResumeC <- &WaitingRecs{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + } + return nil + } + + tableRows, err := dst.DatabaseS.GetDatabaseTableRows(dst.SchemaNameS, sourceTable) + if err != nil { + return err + } + tableSize, err := dst.DatabaseS.GetDatabaseTableSize(dst.SchemaNameS, sourceTable) + if err != nil { + return err + } + + dataRule := &DataScanRule{ + Ctx: gCtx, + TaskName: dst.Task.TaskName, + TaskMode: dst.Task.TaskMode, + TaskFlow: dst.Task.TaskFlow, + SchemaNameS: dst.SchemaNameS, + TableNameS: sourceTable, + TableTypeS: databaseTableTypeMap, + DatabaseS: dst.DatabaseS, + DBCharsetS: dst.DBCharsetS, + GlobalSqlHintS: dst.TaskParams.SqlHintS, + GlobalSamplerateS: strconv.FormatUint(dst.TaskParams.TableSamplerateS, 10), + } + + attsRule, err := database.IDataScanAttributesRule(dataRule) + if err != nil { + return err + } + + // If the database table ColumnDetailS and GroupColumnS return "" + // it means that the database table does not have a number data type field, ignore and skip init + if strings.EqualFold(attsRule.ColumnDetailS, "") && strings.EqualFold(attsRule.GroupColumnS, "") { + return nil + } + + var whereRange string + size, err := stringutil.StrconvFloatBitSize(attsRule.TableSamplerateS, 64) + if err != nil { + return err + } + + if size > 0.000001 && size < 100 { + logger.Warn("data scan task table", + zap.String("task_name", dst.Task.TaskName), + zap.String("task_mode", dst.Task.TaskMode), + zap.String("task_flow", dst.Task.TaskFlow), + zap.String("schema_name_s", attsRule.SchemaNameS), + zap.String("table_name_s", attsRule.TableNameS), + zap.String("database_version", dst.DBVersionS), + zap.String("database_role", dst.DBRoleS), + zap.String("migrate_method", "scan")) + + whereRange = `sample_scan` + encChunkS := snappy.Encode(nil, []byte(whereRange)) + encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) + if err != nil { + return err + } + + err = model.Transaction(gCtx, func(txnCtx context.Context) error { + _, err = model.GetIDataScanTaskRW().CreateDataScanTask(txnCtx, &task.DataScanTask{ + TaskName: dst.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + TableTypeS: attsRule.TableTypeS, + SnapshotPointS: globalScn, + ColumnDetailS: attsRule.ColumnDetailS, + GroupColumnS: attsRule.GroupColumnS, + SqlHintS: attsRule.SqlHintS, + ChunkID: uuid.New().String(), + ChunkDetailS: encryptChunkS, + ChunkDetailArgS: "", + Samplerate: attsRule.TableSamplerateS, + ConsistentReadS: strconv.FormatBool(dst.TaskParams.EnableConsistentRead), + TaskStatus: constant.TaskDatabaseStatusWaiting, + }) + if err != nil { + return err + } + _, err = model.GetIDataScanSummaryRW().CreateDataScanSummary(txnCtx, &task.DataScanSummary{ + TaskName: dst.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SnapshotPointS: globalScn, + TableRowsS: tableRows, + TableSizeS: tableSize, + ChunkTotals: 1, + InitFlag: constant.TaskInitStatusFinished, + ScanFlag: constant.TaskScanStatusNotFinished, + }) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + return nil + } + + // statistic + if !strings.EqualFold(dst.DBRoleS, constant.OracleDatabasePrimaryRole) || (strings.EqualFold(dst.DBRoleS, constant.OracleDatabasePrimaryRole) && stringutil.VersionOrdinal(dst.DBVersionS) < stringutil.VersionOrdinal(constant.OracleDatabaseTableMigrateRowidRequireVersion)) { + err = dst.ProcessStatisticsScan( + gCtx, + dbTypeS, + dst.SchemaNameS, + sourceTable, + globalScn, + tableRows, + tableSize, + attsRule) + if err != nil { + return err + } + return nil + } + + err = dst.ProcessChunkScan(gCtx, dst.SchemaNameS, sourceTable, globalScn, tableRows, tableSize, attsRule) + if err != nil { + return err + } + + logger.Info("data scan task init success", + zap.String("task_name", dst.Task.TaskName), + zap.String("task_mode", dst.Task.TaskMode), + zap.String("task_flow", dst.Task.TaskFlow), + zap.String("schema_name_s", attsRule.SchemaNameS), + zap.String("table_name_s", attsRule.TableNameS), + zap.String("cost", time.Now().Sub(startTime).String())) + return nil + } + }) + } + + if err = g.Wait(); err != nil { + logger.Error("data scan task init failed", + zap.String("task_name", dst.Task.TaskName), + zap.String("task_mode", dst.Task.TaskMode), + zap.String("task_flow", dst.Task.TaskFlow), + zap.String("schema_name_s", dst.SchemaNameS), + zap.Error(err)) + return err + } + return nil +} + +func (dst *DataScanTask) Run() error { + logger.Info("data scan task run table", + zap.String("task_name", dst.Task.TaskName), zap.String("task_mode", dst.Task.TaskMode), zap.String("task_flow", dst.Task.TaskFlow)) + + for s := range dst.WaiterC { + err := dst.Process(s) + if err != nil { + return err + } + } + return nil +} + +func (dst *DataScanTask) Resume() error { + logger.Info("data scan task resume table", + zap.String("task_name", dst.Task.TaskName), zap.String("task_mode", dst.Task.TaskMode), zap.String("task_flow", dst.Task.TaskFlow)) + + for s := range dst.ResumeC { + err := dst.Process(s) + if err != nil { + return err + } + } + return nil +} + +func (dst *DataScanTask) Process(s *WaitingRecs) error { + startTableTime := time.Now() + summary, err := model.GetIDataScanSummaryRW().GetDataScanSummary(dst.Ctx, &task.DataScanSummary{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }) + if err != nil { + return err + } + if strings.EqualFold(summary.ScanFlag, constant.TaskScanStatusFinished) { + logger.Warn("data migrate task init", + zap.String("task_name", dst.Task.TaskName), + zap.String("task_mode", dst.Task.TaskMode), + zap.String("task_flow", dst.Task.TaskFlow), + zap.String("schema_name_s", s.SchemaNameS), + zap.String("table_name_s", s.TableNameS), + zap.String("scan_flag", summary.ScanFlag), + zap.String("action", "scan skip")) + return nil + } + + if strings.EqualFold(summary.InitFlag, constant.TaskInitStatusNotFinished) { + return fmt.Errorf("the task_name [%s] task_mode [%s] task_flow [%s] schema_name_s [%s] table_name_s [%s] init status not finished, disabled scan", s.TableNameS, dst.Task.TaskMode, dst.Task.TaskFlow, s.SchemaNameS, s.TableNameS) + } + + logger.Info("data scan task process table", + zap.String("task_name", dst.Task.TaskName), + zap.String("task_mode", dst.Task.TaskMode), + zap.String("task_flow", dst.Task.TaskFlow), + zap.String("schema_name_s", s.SchemaNameS), + zap.String("table_name_s", s.TableNameS)) + + var migrateTasks []*task.DataScanTask + err = model.Transaction(dst.Ctx, func(txnCtx context.Context) error { + // get migrate task tables + migrateTasks, err = model.GetIDataScanTaskRW().FindDataScanTask(txnCtx, + &task.DataScanTask{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + TaskStatus: constant.TaskDatabaseStatusWaiting, + }) + if err != nil { + return err + } + migrateFailedTasks, err := model.GetIDataScanTaskRW().FindDataScanTask(txnCtx, + &task.DataScanTask{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + TaskStatus: constant.TaskDatabaseStatusFailed}) + if err != nil { + return err + } + migrateRunningTasks, err := model.GetIDataScanTaskRW().FindDataScanTask(txnCtx, + &task.DataScanTask{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + TaskStatus: constant.TaskDatabaseStatusRunning}) + if err != nil { + return err + } + migrateStopTasks, err := model.GetIDataScanTaskRW().FindDataScanTask(txnCtx, + &task.DataScanTask{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + TaskStatus: constant.TaskDatabaseStatusStopped}) + if err != nil { + return err + } + migrateTasks = append(migrateTasks, migrateFailedTasks...) + migrateTasks = append(migrateTasks, migrateRunningTasks...) + migrateTasks = append(migrateTasks, migrateStopTasks...) + return nil + }) + if err != nil { + return err + } + + logger.Info("data scan task process chunks", + zap.String("task_name", dst.Task.TaskName), + zap.String("task_mode", dst.Task.TaskMode), + zap.String("task_flow", dst.Task.TaskFlow), + zap.String("schema_name_s", s.SchemaNameS), + zap.String("table_name_s", s.TableNameS)) + + g := errconcurrent.NewGroup() + g.SetLimit(int(dst.TaskParams.SqlThreadS)) + for _, j := range migrateTasks { + gTime := time.Now() + g.Go(j, gTime, func(j interface{}) error { + dt := j.(*task.DataScanTask) + errW := model.Transaction(dst.Ctx, func(txnCtx context.Context) error { + _, err = model.GetIDataScanTaskRW().UpdateDataScanTask(txnCtx, + &task.DataScanTask{TaskName: dt.TaskName, SchemaNameS: dt.SchemaNameS, TableNameS: dt.TableNameS, ChunkID: dt.ChunkID}, + map[string]interface{}{ + "TaskStatus": constant.TaskDatabaseStatusRunning, + }) + if err != nil { + return err + } + _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ + TaskName: dt.TaskName, + SchemaNameS: dt.SchemaNameS, + TableNameS: dt.TableNameS, + LogDetail: fmt.Sprintf("%v [%v] data scan task [%v] taskflow [%v] source table [%v.%v] chunk [%s] start", + stringutil.CurrentTimeFormatString(), + stringutil.StringLower(dst.Task.TaskMode), + dt.TaskName, + dst.Task.TaskFlow, + dt.SchemaNameS, + dt.TableNameS, + dt.ChunkDetailS), + }) + if err != nil { + return err + } + return nil + }) + if errW != nil { + return errW + } + + err = database.IDataScanProcess(&DataScanRow{ + Ctx: dst.Ctx, + StartTime: gTime, + TaskName: dt.TaskName, + TaskMode: dst.Task.TaskMode, + TaskFlow: dst.Task.TaskFlow, + Dst: dt, + DatabaseS: dst.DatabaseS, + DBCharsetS: dst.DBCharsetS, + }) + if err != nil { + return err + } + return nil + }) + } + + for _, r := range g.Wait() { + if r.Err != nil { + mt := r.Task.(*task.DataScanTask) + logger.Warn("data scan task process tables", + zap.String("task_name", mt.TaskName), zap.String("task_mode", dst.Task.TaskMode), zap.String("task_flow", dst.Task.TaskFlow), + zap.String("schema_name_s", mt.SchemaNameS), + zap.String("table_name_s", mt.TableNameS), + zap.Error(r.Err)) + + errW := model.Transaction(dst.Ctx, func(txnCtx context.Context) error { + _, err = model.GetIDataScanTaskRW().UpdateDataScanTask(txnCtx, + &task.DataScanTask{TaskName: mt.TaskName, SchemaNameS: mt.SchemaNameS, TableNameS: mt.TableNameS, ChunkID: mt.ChunkID}, + map[string]interface{}{ + "TaskStatus": constant.TaskDatabaseStatusFailed, + "Duration": fmt.Sprintf("%f", time.Now().Sub(r.Time).Seconds()), + "ErrorDetail": r.Err.Error(), + }) + if err != nil { + return err + } + _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ + TaskName: mt.TaskName, + SchemaNameS: mt.SchemaNameS, + TableNameS: mt.TableNameS, + LogDetail: fmt.Sprintf("%v [%v] data scan task [%v] taskflow [%v] source table [%v.%v] failed, please see [data_migrate_task] detail", + stringutil.CurrentTimeFormatString(), + stringutil.StringLower(dst.Task.TaskMode), + mt.TaskName, + dst.Task.TaskFlow, + mt.SchemaNameS, + mt.TableNameS), + }) + if err != nil { + return err + } + return nil + }) + if errW != nil { + return errW + } + } + } + + endTableTime := time.Now() + err = model.Transaction(dst.Ctx, func(txnCtx context.Context) error { + var successChunks int64 + + tableStatusRecs, err := model.GetIDataScanTaskRW().FindDataScanTaskBySchemaTableChunkStatus(txnCtx, &task.DataScanTask{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }) + if err != nil { + return err + } + for _, rec := range tableStatusRecs { + switch rec.TaskStatus { + case constant.TaskDatabaseStatusSuccess: + _, err = model.GetIDataScanSummaryRW().UpdateDataScanSummary(txnCtx, &task.DataScanSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkSuccess": rec.StatusTotals, + }) + if err != nil { + return err + } + + successChunks = successChunks + rec.StatusTotals + case constant.TaskDatabaseStatusFailed: + _, err = model.GetIDataScanSummaryRW().UpdateDataScanSummary(txnCtx, &task.DataScanSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkFails": rec.StatusTotals, + }) + if err != nil { + return err + } + case constant.TaskDatabaseStatusWaiting: + _, err = model.GetIDataScanSummaryRW().UpdateDataScanSummary(txnCtx, &task.DataScanSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkWaits": rec.StatusTotals, + }) + if err != nil { + return err + } + case constant.TaskDatabaseStatusRunning: + _, err = model.GetIDataScanSummaryRW().UpdateDataScanSummary(txnCtx, &task.DataScanSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkRuns": rec.StatusTotals, + }) + if err != nil { + return err + } + case constant.TaskDatabaseStatusStopped: + _, err = model.GetIDataScanSummaryRW().UpdateDataScanSummary(txnCtx, &task.DataScanSummary{ + TaskName: rec.TaskName, + SchemaNameS: rec.SchemaNameS, + TableNameS: rec.TableNameS, + }, map[string]interface{}{ + "ChunkStops": rec.StatusTotals, + }) + if err != nil { + return err + } + default: + return fmt.Errorf("the task [%v] task_mode [%s] task_flow [%v] schema_name_s [%v] table_name_s [%v] task_status [%v] panic, please contact auhtor or reselect", s.TaskName, dst.Task.TaskMode, dst.Task.TaskFlow, rec.SchemaNameS, rec.TableNameS, rec.TaskStatus) + } + } + + summar, err := model.GetIDataScanSummaryRW().GetDataScanSummary(txnCtx, &task.DataScanSummary{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }) + if err != nil { + return err + } + + if int64(summar.ChunkTotals) == successChunks { + _, err = model.GetIDataScanSummaryRW().UpdateDataScanSummary(txnCtx, &task.DataScanSummary{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }, map[string]interface{}{ + "Duration": fmt.Sprintf("%f", time.Now().Sub(startTableTime).Seconds()), + "ScanFlag": constant.TaskScanStatusFinished, + }) + if err != nil { + return err + } + } else { + _, err = model.GetIDataScanSummaryRW().UpdateDataScanSummary(txnCtx, &task.DataScanSummary{ + TaskName: s.TaskName, + SchemaNameS: s.SchemaNameS, + TableNameS: s.TableNameS, + }, map[string]interface{}{ + "Duration": fmt.Sprintf("%f", time.Now().Sub(startTableTime).Seconds()), + "ScanFlag": constant.TaskScanStatusNotFinished, + }) + if err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + + logger.Info("data scan task process table", + zap.String("task_name", dst.Task.TaskName), + zap.String("task_mode", dst.Task.TaskMode), + zap.String("task_flow", dst.Task.TaskFlow), + zap.String("schema_name_s", s.SchemaNameS), + zap.String("table_name_s", s.TableNameS), + zap.String("cost", endTableTime.Sub(startTableTime).String())) + return nil +} + +func (dst *DataScanTask) ProcessStatisticsScan(ctx context.Context, dbTypeS, schemaNameS, tableNameS, globalScn string, tableRows uint64, tableSize float64, attsRule *database.DataScanAttributesRule) error { + h, err := dst.DatabaseS.GetDatabaseTableHighestSelectivityIndex( + attsRule.SchemaNameS, + attsRule.TableNameS, + "", + nil) + if err != nil { + return err + } + if h == nil { + err = dst.ProcessTableScan(ctx, schemaNameS, tableNameS, globalScn, tableRows, tableSize, attsRule) + if err != nil { + return err + } + return nil + } + + // upstream bucket ranges + err = h.TransSelectivity( + dbTypeS, + stringutil.StringUpper(dst.DBCharsetS), + dst.Task.CaseFieldRuleS, + false) + if err != nil { + return err + } + + logger.Warn("data scan task table", + zap.String("task_name", dst.Task.TaskName), + zap.String("task_mode", dst.Task.TaskMode), + zap.String("task_flow", dst.Task.TaskFlow), + zap.String("schema_name_s", attsRule.SchemaNameS), + zap.String("table_name_s", attsRule.TableNameS), + zap.String("database_version", dst.DBVersionS), + zap.String("database_role", dst.DBRoleS), + zap.String("migrate_method", "statistic")) + + rangeC := make(chan []*structure.Range, constant.DefaultMigrateTaskQueueSize) + d := &Divide{ + DBTypeS: dbTypeS, + DBCharsetS: stringutil.StringUpper(dst.DBCharsetS), + SchemaNameS: schemaNameS, + TableNameS: tableNameS, + ChunkSize: int64(dst.TaskParams.ChunkSize), + DatabaseS: dst.DatabaseS, + Cons: h, + RangeC: rangeC, + } + g, ctx := errgroup.WithContext(ctx) + + g.Go(func() error { + defer close(rangeC) + err = d.ProcessUpstreamStatisticsBucket() + if err != nil { + return err + } + return nil + }) + + g.Go(func() error { + totalChunks := 0 + for r := range rangeC { + statsRanges, err := dst.PrepareStatisticsRange(globalScn, attsRule, r) + if err != nil { + return err + } + if len(statsRanges) > 0 { + err = model.GetIDataScanTaskRW().CreateInBatchDataScanTask(ctx, statsRanges, int(dst.TaskParams.WriteThread), int(dst.TaskParams.BatchSize)) + if err != nil { + return err + } + totalChunks = totalChunks + len(statsRanges) + } + return nil + } + + if totalChunks == 0 { + err := dst.ProcessTableScan(ctx, schemaNameS, tableNameS, globalScn, tableRows, tableSize, attsRule) + if err != nil { + return err + } + return nil + } + + _, err = model.GetIDataScanSummaryRW().CreateDataScanSummary(ctx, &task.DataScanSummary{ + TaskName: dst.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SnapshotPointS: globalScn, + TableRowsS: tableRows, + TableSizeS: tableSize, + ChunkTotals: uint64(totalChunks), + InitFlag: constant.TaskInitStatusFinished, + ScanFlag: constant.TaskScanStatusNotFinished, + }) + if err != nil { + return err + } + return nil + }) + + dst.WaiterC <- &WaitingRecs{ + TaskName: dst.Task.TaskName, + SchemaNameS: schemaNameS, + TableNameS: tableNameS, + } + return nil +} + +func (dst *DataScanTask) ProcessTableScan(ctx context.Context, schemaNameS, tableNameS, globalScn string, tableRows uint64, tableSize float64, attsRule *database.DataScanAttributesRule) error { + var whereRange string + whereRange = `1 = 1` + + encChunkS := snappy.Encode(nil, []byte(whereRange)) + + encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) + if err != nil { + return err + } + + err = model.Transaction(ctx, func(txnCtx context.Context) error { + _, err = model.GetIDataScanTaskRW().CreateDataScanTask(txnCtx, &task.DataScanTask{ + TaskName: dst.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + TableTypeS: attsRule.TableTypeS, + SnapshotPointS: globalScn, + ColumnDetailS: attsRule.ColumnDetailS, + GroupColumnS: attsRule.GroupColumnS, + SqlHintS: attsRule.SqlHintS, + ChunkID: uuid.New().String(), + ChunkDetailS: encryptChunkS, + ChunkDetailArgS: "", + Samplerate: attsRule.TableSamplerateS, + ConsistentReadS: strconv.FormatBool(dst.TaskParams.EnableConsistentRead), + TaskStatus: constant.TaskDatabaseStatusWaiting, + }) + if err != nil { + return err + } + _, err = model.GetIDataScanSummaryRW().CreateDataScanSummary(txnCtx, &task.DataScanSummary{ + TaskName: dst.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SnapshotPointS: globalScn, + TableRowsS: tableRows, + TableSizeS: tableSize, + ChunkTotals: 1, + InitFlag: constant.TaskInitStatusFinished, + ScanFlag: constant.TaskScanStatusNotFinished, + }) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + + dst.WaiterC <- &WaitingRecs{ + TaskName: dst.Task.TaskName, + SchemaNameS: schemaNameS, + TableNameS: tableNameS, + } + return nil +} + +func (dst *DataScanTask) ProcessChunkScan(ctx context.Context, schemaNameS, tableNameS, globalScn string, tableRows uint64, tableSize float64, attsRule *database.DataScanAttributesRule) error { + chunkCh := make(chan []map[string]string, constant.DefaultMigrateTaskQueueSize) + + gC := errgroup.Group{} + + gC.Go(func() error { + defer close(chunkCh) + err := dst.DatabaseS.GetDatabaseTableChunkTask( + uuid.New().String(), schemaNameS, tableNameS, dst.TaskParams.ChunkSize, dst.TaskParams.CallTimeout, int(dst.TaskParams.BatchSize), chunkCh) + if err != nil { + return err + } + return nil + }) + + gC.Go(func() error { + var whereRange string + totalChunkRecs := 0 + + for chunks := range chunkCh { + // batch commit + var metas []*task.DataScanTask + + for _, r := range chunks { + whereRange = r["CMD"] + + encChunkS := snappy.Encode(nil, []byte(whereRange)) + + encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) + if err != nil { + return err + } + + metas = append(metas, &task.DataScanTask{ + TaskName: dst.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + TableTypeS: attsRule.TableTypeS, + SnapshotPointS: globalScn, + ColumnDetailS: attsRule.ColumnDetailS, + GroupColumnS: attsRule.GroupColumnS, + SqlHintS: attsRule.SqlHintS, + ChunkDetailS: encryptChunkS, + Samplerate: attsRule.TableSamplerateS, + ConsistentReadS: strconv.FormatBool(dst.TaskParams.EnableConsistentRead), + TaskStatus: constant.TaskDatabaseStatusWaiting, + }) + } + + chunkRecs := len(metas) + if chunkRecs > 0 { + err := model.GetIDataScanTaskRW().CreateInBatchDataScanTask(ctx, metas, int(dst.TaskParams.WriteThread), int(dst.TaskParams.BatchSize)) + if err != nil { + return err + } + totalChunkRecs = totalChunkRecs + chunkRecs + } + } + + if totalChunkRecs == 0 { + err := dst.ProcessTableScan(ctx, schemaNameS, tableNameS, globalScn, tableRows, tableSize, attsRule) + if err != nil { + return err + } + return nil + } + + _, err := model.GetIDataScanSummaryRW().CreateDataScanSummary(ctx, &task.DataScanSummary{ + TaskName: dst.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + SnapshotPointS: globalScn, + TableRowsS: tableRows, + TableSizeS: tableSize, + ChunkTotals: uint64(totalChunkRecs), + InitFlag: constant.TaskInitStatusFinished, + ScanFlag: constant.TaskScanStatusNotFinished, + }) + if err != nil { + return err + } + return nil + }) + + err := gC.Wait() + if err != nil { + return err + } + + dst.WaiterC <- &WaitingRecs{ + TaskName: dst.Task.TaskName, + SchemaNameS: schemaNameS, + TableNameS: tableNameS, + } + return nil +} + +func (dst *DataScanTask) PrepareStatisticsRange(globalScn string, attsRule *database.DataScanAttributesRule, ranges []*structure.Range) ([]*task.DataScanTask, error) { + var ( + metas []*task.DataScanTask + err error + ) + + for _, r := range ranges { + toStringS, toStringSArgs := r.ToString() + var argsS string + if toStringSArgs != nil { + argsS, err = stringutil.MarshalJSON(toStringSArgs) + if err != nil { + return nil, err + } + } + + encChunkS := snappy.Encode(nil, []byte(toStringS)) + + encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) + if err != nil { + return nil, err + } + metas = append(metas, &task.DataScanTask{ + TaskName: dst.Task.TaskName, + SchemaNameS: attsRule.SchemaNameS, + TableNameS: attsRule.TableNameS, + TableTypeS: attsRule.TableTypeS, + SnapshotPointS: globalScn, + ColumnDetailS: attsRule.ColumnDetailS, + GroupColumnS: attsRule.GroupColumnS, + SqlHintS: attsRule.SqlHintS, + ChunkID: uuid.New().String(), + ChunkDetailS: encryptChunkS, + ChunkDetailArgS: argsS, + Samplerate: attsRule.TableSamplerateS, + ConsistentReadS: strconv.FormatBool(dst.TaskParams.EnableConsistentRead), + TaskStatus: constant.TaskDatabaseStatusWaiting, + }) + } + return metas, nil +} diff --git a/database/processor/migrate_task_table.go b/database/processor/migrate_task_table.go new file mode 100644 index 0000000..1afaee1 --- /dev/null +++ b/database/processor/migrate_task_table.go @@ -0,0 +1,24 @@ +/* +Copyright © 2020 Marvin + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package processor + +type WaitingRecs struct { + TaskName string + SchemaNameS string + TableNameS string + SchemaNameT string + TableNameT string +} diff --git a/database/processor/processor_migrate_stats.go b/database/processor/processor_migrate_stats.go new file mode 100644 index 0000000..960a058 --- /dev/null +++ b/database/processor/processor_migrate_stats.go @@ -0,0 +1,122 @@ +/* +Copyright © 2020 Marvin + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package processor + +import ( + "fmt" + "github.com/wentaojin/dbms/database" + "github.com/wentaojin/dbms/logger" + "github.com/wentaojin/dbms/utils/constant" + "github.com/wentaojin/dbms/utils/stringutil" + "github.com/wentaojin/dbms/utils/structure" + "go.uber.org/zap" + "strings" +) + +// ExtractDatabaseTableStatisticsValuesFromBuckets analyze upperBound or lowerBound to string for each column. +// upperBound and lowerBound are looks like '(123, abc)' for multiple fields, or '123' for one field. +func ExtractDatabaseTableStatisticsValuesFromBuckets(divideDbType, valueString string, columnNames []string) ([]string, error) { + switch stringutil.StringUpper(divideDbType) { + case constant.DatabaseTypeTiDB: + // FIXME: maybe some values contains '(', ')' or ', ' + vStr := strings.Trim(valueString, "()") + values := strings.Split(vStr, ", ") + if len(values) != len(columnNames) { + return nil, fmt.Errorf("extract database type [%s] value %s failed, values %v not match columnNames %v", divideDbType, valueString, values, columnNames) + } + return values, nil + case constant.DatabaseTypeOracle: + values := strings.Split(valueString, constant.StringSeparatorComma) + if len(values) != len(columnNames) { + return nil, fmt.Errorf("extract database type [%s] value %s failed, values %v not match columnNames %v", divideDbType, valueString, values, columnNames) + } + return values, nil + default: + return nil, fmt.Errorf("extract database type [%s] value %s is not supported, please contact author or reselect", divideDbType, valueString) + } +} + +// DivideDatabaseTableColumnStatisticsBucket splits a chunk to multiple chunks by random +// Notice: If the `count <= 1`, it will skip splitting and return `chunk` as a slice directly. +func DivideDatabaseTableColumnStatisticsBucket(database database.IDatabase, schemaName, tableName string, cons *structure.Selectivity, chunkRange *structure.Range, divideCountCnt int) ([]*structure.Range, error) { + var chunkRanges []*structure.Range + + if divideCountCnt <= 1 { + chunkRanges = append(chunkRanges, chunkRange) + return chunkRanges, nil + } + + chunkConds, chunkArgs := chunkRange.ToString() + + randomValueSli, err := database.GetDatabaseTableRandomValues(schemaName, tableName, cons.IndexColumn, chunkConds, chunkArgs, divideCountCnt-1, cons.ColumnCollation) + if err != nil { + return nil, err + } + + logger.Debug("divide database bucket value by random", zap.Stringer("chunk", chunkRange), zap.Int("random values num", len(randomValueSli)), zap.Any("random values", randomValueSli)) + + for i := 0; i <= len(randomValueSli); i++ { + newChunk := chunkRange.Copy() + + for j, columnName := range cons.IndexColumn { + if i == 0 { + if len(randomValueSli) == 0 { + // randomValues is empty, so chunks will append chunk itself + break + } + err = newChunk.Update( + columnName, + cons.ColumnCollation[j], + cons.ColumnDatatype[j], + cons.DatetimePrecision[j], + "", randomValueSli[i][j], false, true) + if err != nil { + return chunkRanges, err + } + } else if i == len(randomValueSli) { + // bucket upper newChunk.Bounds[j].Upper + err = newChunk.Update( + columnName, + cons.ColumnCollation[j], + cons.ColumnDatatype[j], + cons.DatetimePrecision[j], + randomValueSli[i-1][j], newChunk.Bounds[j].Upper, true, true) + if err != nil { + return chunkRanges, err + } + } else { + err = newChunk.Update( + columnName, + cons.ColumnCollation[j], + cons.ColumnDatatype[j], + cons.DatetimePrecision[j], + randomValueSli[i-1][j], + randomValueSli[i][j], true, true) + if err != nil { + return chunkRanges, err + } + } + } + chunkRanges = append(chunkRanges, newChunk) + } + + logger.Debug("divide database bucket value by random", + zap.Int("divide chunk range num", len(chunkRanges)), + zap.Stringer("origin chunk range", chunkRange), + zap.Any("new chunk range", chunkRanges)) + + return chunkRanges, nil +} diff --git a/database/oracle/taskflow/sql_migrate.go b/database/processor/sql_migrate_task.go similarity index 60% rename from database/oracle/taskflow/sql_migrate.go rename to database/processor/sql_migrate_task.go index f2dc1ab..3e96c29 100644 --- a/database/oracle/taskflow/sql_migrate.go +++ b/database/processor/sql_migrate_task.go @@ -13,75 +13,216 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package taskflow +package processor import ( "context" "fmt" - "github.com/wentaojin/dbms/database/processor" - "strconv" - "strings" - "time" - - "github.com/wentaojin/dbms/model/rule" - "github.com/wentaojin/dbms/database" "github.com/wentaojin/dbms/errconcurrent" "github.com/wentaojin/dbms/logger" "github.com/wentaojin/dbms/model" - "github.com/wentaojin/dbms/model/datasource" + "github.com/wentaojin/dbms/model/rule" "github.com/wentaojin/dbms/model/task" "github.com/wentaojin/dbms/proto/pb" "github.com/wentaojin/dbms/utils/constant" "github.com/wentaojin/dbms/utils/stringutil" "go.uber.org/zap" + "strconv" + "strings" + "time" ) type SqlMigrateTask struct { - Ctx context.Context - Task *task.Task - DatasourceS *datasource.Datasource - DatasourceT *datasource.Datasource - TaskParams *pb.SqlMigrateParam + Ctx context.Context + Task *task.Task + DatabaseS database.IDatabase + DatabaseT database.IDatabase + DBRoleS string + DBCharsetS string + DBCharsetT string + DBVersionS string + + TaskParams *pb.SqlMigrateParam } -func (smt *SqlMigrateTask) Start() error { - schemaStartTime := time.Now() - logger.Info("sql migrate task init database connection", +func (smt *SqlMigrateTask) Init() error { + logger.Info("sql migrate task init table", + zap.String("task_name", smt.Task.TaskName), zap.String("task_mode", smt.Task.TaskMode), zap.String("task_flow", smt.Task.TaskFlow)) + + if !smt.TaskParams.EnableCheckpoint { + err := model.GetISqlMigrateTaskRW().DeleteSqlMigrateTaskName(smt.Ctx, []string{smt.Task.TaskName}) + if err != nil { + return err + } + err = model.GetISqlMigrateSummaryRW().DeleteSqlMigrateSummaryName(smt.Ctx, []string{smt.Task.TaskName}) + if err != nil { + return err + } + } + logger.Warn("sql migrate task init skip", zap.String("task_name", smt.Task.TaskName), zap.String("task_mode", smt.Task.TaskMode), - zap.String("task_flow", smt.Task.TaskFlow)) - databaseS, err := database.NewDatabase(smt.Ctx, smt.DatasourceS, "", int64(smt.TaskParams.CallTimeout)) + zap.String("task_flow", smt.Task.TaskFlow), + zap.Bool("enable_checkpoint", smt.TaskParams.EnableCheckpoint)) + + // compare the task table + // the database task table is exist, and the config task table isn't exist, the clear the database task table + s, err := model.GetISqlMigrateSummaryRW().GetSqlMigrateSummary(smt.Ctx, &task.SqlMigrateSummary{TaskName: smt.Task.TaskName}) if err != nil { return err } - defer databaseS.Close() - databaseT, err := database.NewDatabase(smt.Ctx, smt.DatasourceT, "", int64(smt.TaskParams.CallTimeout)) + if strings.EqualFold(s.InitFlag, constant.TaskInitStatusNotFinished) { + err = model.Transaction(smt.Ctx, func(txnCtx context.Context) error { + err = model.GetISqlMigrateSummaryRW().DeleteSqlMigrateSummary(txnCtx, &task.SqlMigrateSummary{ + TaskName: s.TaskName, + }) + if err != nil { + return err + } + err = model.GetISqlMigrateTaskRW().DeleteSqlMigrateTask(txnCtx, &task.SqlMigrateTask{ + TaskName: s.TaskName, + }) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + } + + var globalScn string + + globalScnS, err := smt.DatabaseS.GetDatabaseConsistentPos() if err != nil { return err } - defer databaseT.Close() - logger.Info("sql migrate task inspect migrate task", + globalScn = strconv.FormatUint(globalScnS, 10) + + logger.Info("sql migrate task init sql", zap.String("task_name", smt.Task.TaskName), zap.String("task_mode", smt.Task.TaskMode), zap.String("task_flow", smt.Task.TaskFlow)) - _, err = processor.InspectOracleMigrateTask(smt.Task.TaskName, smt.Task.TaskFlow, smt.Task.TaskMode, databaseS, stringutil.StringUpper(smt.DatasourceS.ConnectCharset), stringutil.StringUpper(smt.DatasourceT.ConnectCharset)) + + migrateSqlRules, err := model.GetISqlMigrateRuleRW().FindSqlMigrateRule(smt.Ctx, &rule.SqlMigrateRule{ + TaskName: smt.Task.TaskName, + }) if err != nil { return err } - logger.Info("sql migrate task init migrate task", - zap.String("task_name", smt.Task.TaskName), - zap.String("task_mode", smt.Task.TaskMode), - zap.String("task_flow", smt.Task.TaskFlow)) - err = smt.InitSqlMigrateTask(databaseS) + var sqlMigrateTasks []*task.SqlMigrateTask + for _, sr := range migrateSqlRules { + columnRouteRule := make(map[string]string) + err = stringutil.UnmarshalJSON([]byte(sr.ColumnRouteRule), &columnRouteRule) + if err != nil { + return err + } + dataRule := &SqlMigrateRule{ + Ctx: smt.Ctx, + TaskName: smt.Task.TaskName, + TaskMode: smt.Task.TaskMode, + TaskFlow: smt.Task.TaskFlow, + SchemaNameT: sr.SchemaNameT, + TableNameT: sr.TableNameT, + SqlHintT: sr.SqlHintT, + GlobalSqlHintT: smt.TaskParams.SqlHintT, + DatabaseS: smt.DatabaseS, + DBCharsetS: stringutil.StringUpper(smt.DBCharsetS), + SqlQueryS: sr.SqlQueryS, + ColumnRouteRule: columnRouteRule, + CaseFieldRuleS: smt.Task.CaseFieldRuleS, + CaseFieldRuleT: smt.Task.CaseFieldRuleT, + } + attrs, err := database.ISqlMigrateAttributesRule(dataRule) + if err != nil { + return err + } + sqlMigrateTasks = append(sqlMigrateTasks, &task.SqlMigrateTask{ + TaskName: smt.Task.TaskName, + SchemaNameT: attrs.SchemaNameT, + TableNameT: attrs.TableNameT, + SnapshotPointS: globalScn, + ColumnDetailO: attrs.ColumnDetailO, + ColumnDetailS: attrs.ColumnDetailS, + ColumnDetailT: attrs.ColumnDetailT, + SqlHintT: attrs.SqlHintT, + ConsistentReadS: strconv.FormatBool(smt.TaskParams.EnableConsistentRead), + TaskStatus: constant.TaskDatabaseStatusWaiting, + SqlQueryS: attrs.SqlQueryS, + }) + } + + migrateSqlRuleGroupResults, err := model.GetISqlMigrateRuleRW().FindSqlMigrateRuleGroupBySchemaTable(smt.Ctx) if err != nil { return err } - var migrateTasks []*task.SqlMigrateTask + err = model.Transaction(smt.Ctx, func(txnCtx context.Context) error { + err = model.GetISqlMigrateTaskRW().CreateInBatchSqlMigrateTask(txnCtx, sqlMigrateTasks, int(smt.TaskParams.WriteThread), int(smt.TaskParams.BatchSize)) + if err != nil { + return err + } + for _, r := range migrateSqlRuleGroupResults { + _, err = model.GetISqlMigrateSummaryRW().CreateSqlMigrateSummary(txnCtx, &task.SqlMigrateSummary{ + TaskName: smt.Task.TaskName, + SqlTotals: r.RowTotals, + InitFlag: constant.TaskInitStatusFinished, + MigrateFlag: constant.TaskMigrateStatusNotFinished, + }) + if err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + + return nil +} + +func (smt *SqlMigrateTask) Run() error { + logger.Info("sql migrate task run table", + zap.String("task_name", smt.Task.TaskName), zap.String("task_mode", smt.Task.TaskMode), zap.String("task_flow", smt.Task.TaskFlow)) + err := smt.Process() + if err != nil { + return err + } + return nil +} + +func (smt *SqlMigrateTask) Resume() error { + logger.Info("sql migrate task resume table", + zap.String("task_name", smt.Task.TaskName), zap.String("task_mode", smt.Task.TaskMode), zap.String("task_flow", smt.Task.TaskFlow)) + return nil +} + +func (smt *SqlMigrateTask) Process() error { + startTime := time.Now() + s, err := model.GetISqlMigrateSummaryRW().GetSqlMigrateSummary(smt.Ctx, &task.SqlMigrateSummary{TaskName: smt.Task.TaskName}) + if err != nil { + return err + } + + if strings.EqualFold(s.MigrateFlag, constant.TaskMigrateStatusFinished) { + logger.Warn("sql migrate task migrate skip", + zap.String("task_name", smt.Task.TaskName), + zap.String("task_mode", smt.Task.TaskMode), + zap.String("task_flow", smt.Task.TaskFlow), + zap.String("init_flag", s.InitFlag), + zap.String("migrate_flag", s.MigrateFlag), + zap.String("action", "migrate skip")) + return nil + } + var ( + migrateTasks []*task.SqlMigrateTask + ) err = model.Transaction(smt.Ctx, func(txnCtx context.Context) error { // get migrate task tables migrateTasks, err = model.GetISqlMigrateTaskRW().FindSqlMigrateTaskByTaskStatus(txnCtx, @@ -148,7 +289,7 @@ func (smt *SqlMigrateTask) Start() error { TaskName: dt.TaskName, LogDetail: fmt.Sprintf("%v [%v] sql migrate task [%v] taskflow [%v] schema_name_t [%v] table_name_t [%v] start", stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeSqlMigrate), + stringutil.StringLower(smt.Task.TaskMode), dt.TaskName, smt.Task.TaskFlow, dt.SchemaNameT, @@ -163,24 +304,24 @@ func (smt *SqlMigrateTask) Start() error { return errW } - sqlStr := processor.GenMYSQLCompatibleDatabasePrepareStmt(dt.SchemaNameT, dt.TableNameT, smt.TaskParams.SqlHintT, dt.ColumnDetailT, int(smt.TaskParams.BatchSize), true) + sqlStr := GenMYSQLCompatibleDatabasePrepareStmt(dt.SchemaNameT, dt.TableNameT, smt.TaskParams.SqlHintT, dt.ColumnDetailT, int(smt.TaskParams.BatchSize), true) - stmt, err := databaseT.PrepareContext(smt.Ctx, sqlStr) + stmt, err := smt.DatabaseT.PrepareContext(smt.Ctx, sqlStr) if err != nil { return err } defer stmt.Close() - err = database.IDataMigrateProcess(&processor.SqlMigrateRow{ + err = database.IDataMigrateProcess(&SqlMigrateRow{ Ctx: smt.Ctx, TaskMode: smt.Task.TaskMode, TaskFlow: smt.Task.TaskFlow, Smt: dt, - DatabaseS: databaseS, - DatabaseT: databaseT, + DatabaseS: smt.DatabaseS, + DatabaseT: smt.DatabaseT, DatabaseTStmt: stmt, - DBCharsetS: constant.MigrateOracleCharsetStringConvertMapping[stringutil.StringUpper(smt.DatasourceS.ConnectCharset)], - DBCharsetT: stringutil.StringUpper(smt.DatasourceT.ConnectCharset), + DBCharsetS: constant.MigrateOracleCharsetStringConvertMapping[stringutil.StringUpper(smt.DBCharsetS)], + DBCharsetT: stringutil.StringUpper(smt.DBCharsetT), SqlThreadT: int(smt.TaskParams.SqlThreadT), BatchSize: int(smt.TaskParams.BatchSize), CallTimeout: int(smt.TaskParams.CallTimeout), @@ -206,7 +347,7 @@ func (smt *SqlMigrateTask) Start() error { TaskName: dt.TaskName, LogDetail: fmt.Sprintf("%v [%v] sql migrate task [%v] taskflow [%v] schema_name_t [%v] table_name_t [%v] success", stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeSqlMigrate), + stringutil.StringLower(smt.Task.TaskMode), dt.TaskName, smt.Task.TaskFlow, dt.SchemaNameT, @@ -253,7 +394,7 @@ func (smt *SqlMigrateTask) Start() error { TaskName: esmt.TaskName, LogDetail: fmt.Sprintf("%v [%v] sql migrate task [%v] taskflow [%v] schema_name_t [%v] table_name_t [%v] failed, please see [sql_migrate_task] detail", stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeSqlMigrate), + stringutil.StringLower(smt.Task.TaskMode), esmt.TaskName, smt.Task.TaskFlow, esmt.SchemaNameT, @@ -270,41 +411,62 @@ func (smt *SqlMigrateTask) Start() error { } } - schemaEndTime := time.Now() - _, err = model.GetISqlMigrateSummaryRW().UpdateSqlMigrateSummary(smt.Ctx, &task.SqlMigrateSummary{ - TaskName: smt.Task.TaskName, - }, map[string]interface{}{ - "Duration": fmt.Sprintf("%f", schemaEndTime.Sub(schemaStartTime).Seconds()), - }) - if err != nil { - return err - } - logger.Info("sql migrate task", - zap.String("task_name", smt.Task.TaskName), - zap.String("task_mode", smt.Task.TaskMode), - zap.String("task_flow", smt.Task.TaskFlow), - zap.String("cost", schemaEndTime.Sub(schemaStartTime).String())) - return nil -} - -func (smt *SqlMigrateTask) InitSqlMigrateTask(databaseS database.IDatabase) error { - var ( - initFlags *task.Task - err error - ) + var successRecs int64 err = model.Transaction(smt.Ctx, func(txnCtx context.Context) error { - initFlags, err = model.GetITaskRW().GetTask(txnCtx, &task.Task{TaskName: smt.Task.TaskName}) + tableStatusRecs, err := model.GetISqlMigrateTaskRW().FindSqlMigrateTaskGroupByTaskStatus(txnCtx, smt.Task.TaskName) if err != nil { return err } - if strings.EqualFold(initFlags.TaskInit, constant.TaskInitStatusNotFinished) { - err = model.GetISqlMigrateTaskRW().DeleteSqlMigrateTaskName(txnCtx, []string{smt.Task.TaskName}) - if err != nil { - return err - } - err = model.GetISqlMigrateSummaryRW().DeleteSqlMigrateSummaryName(txnCtx, []string{smt.Task.TaskName}) - if err != nil { - return err + for _, rec := range tableStatusRecs { + switch rec.TaskStatus { + case constant.TaskDatabaseStatusSuccess: + _, err = model.GetISqlMigrateSummaryRW().UpdateSqlMigrateSummary(txnCtx, &task.SqlMigrateSummary{ + TaskName: rec.TaskName, + }, map[string]interface{}{ + "SqlSuccess": rec.StatusCounts, + }) + if err != nil { + return err + } + successRecs = successRecs + rec.StatusCounts + case constant.TaskDatabaseStatusFailed: + _, err = model.GetISqlMigrateSummaryRW().UpdateSqlMigrateSummary(txnCtx, &task.SqlMigrateSummary{ + TaskName: rec.TaskName, + }, map[string]interface{}{ + "SqlFails": rec.StatusCounts, + }) + if err != nil { + return err + } + case constant.TaskDatabaseStatusWaiting: + _, err = model.GetISqlMigrateSummaryRW().UpdateSqlMigrateSummary(txnCtx, &task.SqlMigrateSummary{ + TaskName: rec.TaskName, + }, map[string]interface{}{ + "SqlWaits": rec.StatusCounts, + }) + if err != nil { + return err + } + case constant.TaskDatabaseStatusRunning: + _, err = model.GetISqlMigrateSummaryRW().UpdateSqlMigrateSummary(txnCtx, &task.SqlMigrateSummary{ + TaskName: rec.TaskName, + }, map[string]interface{}{ + "SqlRuns": rec.StatusCounts, + }) + if err != nil { + return err + } + case constant.TaskDatabaseStatusStopped: + _, err = model.GetISqlMigrateSummaryRW().UpdateSqlMigrateSummary(txnCtx, &task.SqlMigrateSummary{ + TaskName: rec.TaskName, + }, map[string]interface{}{ + "SqlStops": rec.StatusCounts, + }) + if err != nil { + return err + } + default: + return fmt.Errorf("the task [%v] task_mode [%s] task_flow [%v] task_status [%v] panic, please contact auhtor or reselect", smt.Task.TaskName, smt.Task.TaskMode, smt.Task.TaskFlow, rec.TaskStatus) } } return nil @@ -313,112 +475,30 @@ func (smt *SqlMigrateTask) InitSqlMigrateTask(databaseS database.IDatabase) erro return err } - // repeatedDoneInfos used for store the sql_migrate_task information has be finished, avoid repeated initialization - migrateDoneInfos, err := model.GetISqlMigrateTaskRW().FindSqlMigrateTaskByTaskStatus(smt.Ctx, &task.SqlMigrateTask{TaskName: smt.Task.TaskName, TaskStatus: constant.TaskDatabaseStatusSuccess}) - if err != nil { - return err - } - - repeatedDoneInfos := make(map[string]struct{}) - for _, m := range migrateDoneInfos { - repeatedDoneInfos[stringutil.StringBuilder(m.TaskName, m.SchemaNameT, m.TableNameT, m.SqlQueryS)] = struct{}{} - } - - var globalScn string - - globalScnS, err := databaseS.GetDatabaseConsistentPos() - if err != nil { - return err - } - - globalScn = strconv.FormatUint(globalScnS, 10) - - logger.Info("sql migrate task init sql", - zap.String("task_name", smt.Task.TaskName), - zap.String("task_mode", smt.Task.TaskMode), - zap.String("task_flow", smt.Task.TaskFlow)) - - migrateSqlRules, err := model.GetISqlMigrateRuleRW().FindSqlMigrateRule(smt.Ctx, &rule.SqlMigrateRule{ - TaskName: smt.Task.TaskName, - }) + s, err = model.GetISqlMigrateSummaryRW().GetSqlMigrateSummary(smt.Ctx, &task.SqlMigrateSummary{TaskName: smt.Task.TaskName}) if err != nil { return err } - - var sqlMigrateTasks []*task.SqlMigrateTask - for _, s := range migrateSqlRules { - if _, ok := repeatedDoneInfos[stringutil.StringBuilder(s.TaskName, s.SchemaNameT, s.TableNameT, s.SqlQueryS)]; ok { - continue - } - - columnRouteRule := make(map[string]string) - err = stringutil.UnmarshalJSON([]byte(s.ColumnRouteRule), &columnRouteRule) - if err != nil { - return err - } - dataRule := &processor.SqlMigrateRule{ - Ctx: smt.Ctx, - TaskName: smt.Task.TaskName, - TaskMode: smt.Task.TaskMode, - TaskFlow: smt.Task.TaskFlow, - SchemaNameT: s.SchemaNameT, - TableNameT: s.TableNameT, - SqlHintT: s.SqlHintT, - GlobalSqlHintT: smt.TaskParams.SqlHintT, - DatabaseS: databaseS, - DBCharsetS: smt.DatasourceS.ConnectCharset, - SqlQueryS: s.SqlQueryS, - ColumnRouteRule: columnRouteRule, - CaseFieldRuleS: smt.Task.CaseFieldRuleS, - CaseFieldRuleT: smt.Task.CaseFieldRuleT, - } - attrs, err := database.ISqlMigrateAttributesRule(dataRule) - if err != nil { - return err - } - sqlMigrateTasks = append(sqlMigrateTasks, &task.SqlMigrateTask{ - TaskName: smt.Task.TaskName, - SchemaNameT: attrs.SchemaNameT, - TableNameT: attrs.TableNameT, - SnapshotPointS: globalScn, - ColumnDetailO: attrs.ColumnDetailO, - ColumnDetailS: attrs.ColumnDetailS, - ColumnDetailT: attrs.ColumnDetailT, - SqlHintT: attrs.SqlHintT, - ConsistentReadS: strconv.FormatBool(smt.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - SqlQueryS: attrs.SqlQueryS, + if int64(s.SqlSuccess) == successRecs { + _, err = model.GetISqlMigrateSummaryRW().UpdateSqlMigrateSummary(smt.Ctx, &task.SqlMigrateSummary{ + TaskName: smt.Task.TaskName, + }, map[string]interface{}{ + "MigrateFlag": constant.TaskMigrateStatusFinished, + "Duration": fmt.Sprintf("%f", time.Now().Sub(startTime).Seconds()), }) - } - - migrateSqlRuleGroupResults, err := model.GetISqlMigrateRuleRW().FindSqlMigrateRuleGroupBySchemaTable(smt.Ctx) - if err != nil { - return err - } - - err = model.Transaction(smt.Ctx, func(txnCtx context.Context) error { - _, err = model.GetITaskRW().UpdateTask(txnCtx, &task.Task{TaskName: smt.Task.TaskName}, map[string]interface{}{"TaskInit": constant.TaskInitStatusFinished}) if err != nil { return err } - err = model.GetISqlMigrateTaskRW().CreateInBatchSqlMigrateTask(txnCtx, sqlMigrateTasks, int(smt.TaskParams.WriteThread), int(smt.TaskParams.BatchSize)) + } else { + _, err = model.GetISqlMigrateSummaryRW().UpdateSqlMigrateSummary(smt.Ctx, &task.SqlMigrateSummary{ + TaskName: smt.Task.TaskName, + }, map[string]interface{}{ + "MigrateFlag": constant.TaskMigrateStatusNotFinished, + "Duration": fmt.Sprintf("%f", time.Now().Sub(startTime).Seconds()), + }) if err != nil { return err } - for _, r := range migrateSqlRuleGroupResults { - _, err = model.GetISqlMigrateSummaryRW().CreateSqlMigrateSummary(txnCtx, &task.SqlMigrateSummary{ - TaskName: smt.Task.TaskName, - SqlTotals: r.RowTotals, - }) - if err != nil { - return err - } - } - return nil - }) - if err != nil { - return err } - return nil } diff --git a/database/processor/stmt_migrate_row.go b/database/processor/stmt_migrate_row.go index be73d18..9e62478 100644 --- a/database/processor/stmt_migrate_row.go +++ b/database/processor/stmt_migrate_row.go @@ -106,7 +106,7 @@ func (r *StmtMigrateRow) MigrateRead() error { return fmt.Errorf("the task_flow [%s] task_mode [%s] isn't support, please contact author or reselect", r.TaskFlow, r.TaskMode) } - logger.Info("stmt migrate task chunk rows extractor starting", + logger.Info("data migrate task chunk rows extractor starting", zap.String("task_name", r.Dmt.TaskName), zap.String("task_mode", r.TaskMode), zap.String("task_flow", r.TaskFlow), @@ -134,7 +134,7 @@ func (r *StmtMigrateRow) MigrateRead() error { } endTime := time.Now() - logger.Info("stmt migrate task chunk rows extractor finished", + logger.Info("data migrate task chunk rows extractor finished", zap.String("task_name", r.Dmt.TaskName), zap.String("task_mode", r.TaskMode), zap.String("task_flow", r.TaskFlow), @@ -168,7 +168,7 @@ func (r *StmtMigrateRow) MigrateApply() error { return err } chunkDetailS := stringutil.BytesToString(decChunkDetailS) - logger.Info("stmt migrate task chunk rows applier starting", + logger.Info("data migrate task chunk rows applier starting", zap.String("task_name", r.Dmt.TaskName), zap.String("task_mode", r.TaskMode), zap.String("task_flow", r.TaskFlow), @@ -214,7 +214,7 @@ func (r *StmtMigrateRow) MigrateApply() error { return err } - logger.Info("stmt migrate task chunk rows applier finished", + logger.Info("data migrate task chunk rows applier finished", zap.String("task_name", r.Dmt.TaskName), zap.String("task_mode", r.TaskMode), zap.String("task_flow", r.TaskFlow), diff --git a/database/oracle/taskflow/struct_compare.go b/database/processor/struct_compare_task.go similarity index 71% rename from database/oracle/taskflow/struct_compare.go rename to database/processor/struct_compare_task.go index c74540d..30dc9ea 100644 --- a/database/oracle/taskflow/struct_compare.go +++ b/database/processor/struct_compare_task.go @@ -13,106 +13,289 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package taskflow +package processor import ( "context" "fmt" - "strings" - "time" - - "github.com/wentaojin/dbms/database/processor" - "github.com/golang/snappy" - "github.com/wentaojin/dbms/database" "github.com/wentaojin/dbms/errconcurrent" "github.com/wentaojin/dbms/logger" "github.com/wentaojin/dbms/model" - "github.com/wentaojin/dbms/model/datasource" + "github.com/wentaojin/dbms/model/buildin" "github.com/wentaojin/dbms/model/rule" "github.com/wentaojin/dbms/model/task" "github.com/wentaojin/dbms/proto/pb" "github.com/wentaojin/dbms/utils/constant" "github.com/wentaojin/dbms/utils/stringutil" "go.uber.org/zap" + "strings" + "time" ) type StructCompareTask struct { - Ctx context.Context - Task *task.Task - DatasourceS *datasource.Datasource - DatasourceT *datasource.Datasource - TaskParams *pb.StructCompareParam + Ctx context.Context + Task *task.Task + DBTypeS string + DBTypeT string + DatabaseS database.IDatabase + DatabaseT database.IDatabase + DBCharsetS string + DBCharsetT string + SchemaNameS string + SchemaNameT string + StartTime time.Time + TaskParams *pb.StructCompareParam + BuildInDatatypeRulesS []*buildin.BuildinDatatypeRule + BuildInDefaultValueRulesS []*buildin.BuildinDefaultvalRule + BuildInDatatypeRulesT []*buildin.BuildinDatatypeRule + BuildInDefaultValueRulesT []*buildin.BuildinDefaultvalRule } -func (dmt *StructCompareTask) Start() error { - schemaTaskTime := time.Now() - logger.Info("struct compare task get schema route", +func (dmt *StructCompareTask) Init() error { + logger.Info("struct compare task init table", zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) - schemaRoute, err := model.GetIMigrateSchemaRouteRW().GetSchemaRouteRule(dmt.Ctx, &rule.SchemaRouteRule{TaskName: dmt.Task.TaskName}) + + if !dmt.TaskParams.EnableCheckpoint { + err := model.GetIStructCompareSummaryRW().DeleteStructCompareSummaryName(dmt.Ctx, []string{dmt.Task.TaskName}) + if err != nil { + return err + } + err = model.GetIStructCompareTaskRW().DeleteStructCompareTaskName(dmt.Ctx, []string{dmt.Task.TaskName}) + if err != nil { + return err + } + } + logger.Warn("struct compare task checkpoint skip", + zap.String("task_name", dmt.Task.TaskName), + zap.String("task_mode", dmt.Task.TaskMode), + zap.String("task_flow", dmt.Task.TaskFlow), + zap.Bool("enable_checkpoint", dmt.TaskParams.EnableCheckpoint)) + s, err := model.GetIStructCompareSummaryRW().GetStructCompareSummary(dmt.Ctx, &task.StructCompareSummary{TaskName: dmt.Task.TaskName, SchemaNameS: dmt.SchemaNameS}) if err != nil { return err } - dbTypeSli := stringutil.StringSplit(dmt.Task.TaskFlow, constant.StringSeparatorAite) - buildInDatatypeRulesS, err := model.GetIBuildInDatatypeRuleRW().QueryBuildInDatatypeRule(dmt.Ctx, dbTypeSli[0], dbTypeSli[1]) + if strings.EqualFold(s.InitFlag, constant.TaskInitStatusNotFinished) { + err = model.Transaction(dmt.Ctx, func(txnCtx context.Context) error { + err = model.GetIStructCompareSummaryRW().DeleteStructCompareSummaryName(txnCtx, []string{dmt.Task.TaskName}) + if err != nil { + return err + } + err = model.GetIStructCompareTaskRW().DeleteStructCompareTaskName(txnCtx, []string{dmt.Task.TaskName}) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + } + + // filter database table + schemaTaskTables, err := model.GetIMigrateTaskTableRW().FindMigrateTaskTable(dmt.Ctx, &rule.MigrateTaskTable{ + TaskName: dmt.Task.TaskName, + SchemaNameS: dmt.SchemaNameS, + }) if err != nil { return err } - buildInDefaultValueRulesS, err := model.GetBuildInDefaultValueRuleRW().QueryBuildInDefaultValueRule(dmt.Ctx, dbTypeSli[0], dbTypeSli[1]) + var ( + includeTables []string + excludeTables []string + databaseTaskTables []string // task tables + ) + databaseTableTypeMap := make(map[string]string) + taskTablesMap := make(map[string]struct{}) + + for _, t := range schemaTaskTables { + if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsExclude) { + excludeTables = append(excludeTables, t.TableNameS) + } + if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsNotExclude) { + includeTables = append(includeTables, t.TableNameS) + } + } + + tableObjs, err := dmt.DatabaseS.FilterDatabaseTable(dmt.SchemaNameS, includeTables, excludeTables) if err != nil { return err } - buildInDatatypeRulesT, err := model.GetIBuildInDatatypeRuleRW().QueryBuildInDatatypeRule(dmt.Ctx, dbTypeSli[1], dbTypeSli[0]) + + // rule case field + for _, t := range tableObjs.TaskTables { + var tabName string + // the according target case field rule convert + if strings.EqualFold(dmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleLower) { + tabName = stringutil.StringLower(t) + } + if strings.EqualFold(dmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleUpper) { + tabName = stringutil.StringUpper(t) + } + if strings.EqualFold(dmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleOrigin) { + tabName = t + } + databaseTaskTables = append(databaseTaskTables, tabName) + taskTablesMap[tabName] = struct{}{} + } + + databaseTableTypeMap, err = dmt.DatabaseS.GetDatabaseTableType(dmt.SchemaNameS) if err != nil { return err } - buildInDefaultValueRulesT, err := model.GetBuildInDefaultValueRuleRW().QueryBuildInDefaultValueRule(dmt.Ctx, dbTypeSli[1], dbTypeSli[0]) + + allTablesT, err := dmt.DatabaseT.GetDatabaseTable(dmt.SchemaNameT) if err != nil { return err } + // get table route rule + tableRouteRule := make(map[string]string) + tableRouteRuleT := make(map[string]string) - logger.Info("struct compare task init database connection", - zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) + tableRoutes, err := model.GetIMigrateTableRouteRW().FindTableRouteRule(dmt.Ctx, &rule.TableRouteRule{ + TaskName: dmt.Task.TaskName, + SchemaNameS: dmt.SchemaNameS, + }) + for _, tr := range tableRoutes { + tableRouteRule[tr.TableNameS] = tr.TableNameT + tableRouteRuleT[tr.TableNameT] = tr.TableNameS + } - sourceDatasource, err := model.GetIDatasourceRW().GetDatasource(dmt.Ctx, dmt.Task.DatasourceNameS) - if err != nil { - return err + tableRouteRuleTNew := make(map[string]string) + for _, t := range allTablesT { + if v, ok := tableRouteRuleT[t]; ok { + tableRouteRuleTNew[v] = t + } else { + tableRouteRuleTNew[t] = t + } + } + + var panicTables []string + for _, t := range databaseTaskTables { + if _, ok := tableRouteRuleTNew[t]; !ok { + panicTables = append(panicTables, t) + } + } + if len(panicTables) > 0 { + return fmt.Errorf("the task [%v] task_flow [%v] task_mode [%v] source database tables aren't existed in the target database, please create the tables [%v]", dmt.Task.TaskName, dmt.Task.TaskFlow, dmt.Task.TaskMode, stringutil.StringJoin(panicTables, constant.StringSeparatorComma)) } - databaseS, err := database.NewDatabase(dmt.Ctx, sourceDatasource, schemaRoute.SchemaNameS, int64(dmt.TaskParams.CallTimeout)) + + // clear the struct compare task table + migrateTasks, err := model.GetIStructCompareTaskRW().BatchFindStructCompareTask(dmt.Ctx, &task.StructCompareTask{TaskName: dmt.Task.TaskName}) if err != nil { return err } - defer databaseS.Close() - databaseT, err := database.NewDatabase(dmt.Ctx, dmt.DatasourceT, "", int64(dmt.TaskParams.CallTimeout)) + + // repeatInitTableMap used for store the struct_migrate_task table name has be finished, avoid repeated initialization + repeatInitTableMap := make(map[string]struct{}) + if len(migrateTasks) > 0 { + for _, smt := range migrateTasks { + if _, ok := taskTablesMap[smt.TableNameS]; !ok { + err = model.GetIStructCompareTaskRW().DeleteStructCompareTask(dmt.Ctx, smt.ID) + if err != nil { + return err + } + } else { + repeatInitTableMap[smt.TableNameS] = struct{}{} + } + } + } + + err = model.GetIStructCompareSummaryRW().DeleteStructCompareSummary(dmt.Ctx, &task.StructCompareSummary{TaskName: dmt.Task.TaskName}) if err != nil { return err } - defer databaseT.Close() - logger.Info("struct compare task inspect migrate task", - zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) - _, err = processor.InspectOracleMigrateTask(dmt.Task.TaskName, dmt.Task.TaskFlow, dmt.Task.TaskMode, databaseS, stringutil.StringUpper(dmt.DatasourceS.ConnectCharset), stringutil.StringUpper(dmt.DatasourceT.ConnectCharset)) + // database tables + // init database table + // get table column route rule + for _, sourceTable := range databaseTaskTables { + // if the table is existed, then skip init + if _, ok := repeatInitTableMap[sourceTable]; ok { + continue + } + var ( + targetTable string + ) + if val, ok := tableRouteRule[sourceTable]; ok { + targetTable = val + } else { + // the according target case field rule convert + if strings.EqualFold(dmt.Task.CaseFieldRuleT, constant.ParamValueStructMigrateCaseFieldRuleLower) { + targetTable = stringutil.StringLower(sourceTable) + } + if strings.EqualFold(dmt.Task.CaseFieldRuleT, constant.ParamValueStructMigrateCaseFieldRuleUpper) { + targetTable = stringutil.StringUpper(sourceTable) + } + if strings.EqualFold(dmt.Task.CaseFieldRuleT, constant.ParamValueStructMigrateCaseFieldRuleOrigin) { + targetTable = sourceTable + } + } + + _, err = model.GetIStructCompareTaskRW().CreateStructCompareTask(dmt.Ctx, &task.StructCompareTask{ + TaskName: dmt.Task.TaskName, + SchemaNameS: dmt.SchemaNameS, + TableNameS: sourceTable, + TableTypeS: databaseTableTypeMap[sourceTable], + SchemaNameT: dmt.SchemaNameT, + TableNameT: targetTable, + TaskStatus: constant.TaskDatabaseStatusWaiting, + }) + if err != nil { + return err + } + } + + _, err = model.GetIStructCompareSummaryRW().CreateStructCompareSummary(dmt.Ctx, + &task.StructCompareSummary{ + TaskName: dmt.Task.TaskName, + SchemaNameS: dmt.SchemaNameS, + TableTotals: uint64(len(databaseTaskTables)), + InitFlag: constant.TaskInitStatusFinished, + CompareFlag: constant.TaskCompareStatusNotFinished, + }) if err != nil { return err } + return nil +} - logger.Info("struct compare task init task", +func (dmt *StructCompareTask) Run() error { + logger.Info("struct compare task run table", zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) - err = dmt.initStructCompareTask(databaseS, databaseT, schemaRoute) + s, err := model.GetIStructCompareSummaryRW().GetStructCompareSummary(dmt.Ctx, &task.StructCompareSummary{TaskName: dmt.Task.TaskName, SchemaNameS: dmt.SchemaNameS}) if err != nil { return err } - logger.Info("struct compare task get tables", - zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) - startTableTime := time.Now() + if strings.EqualFold(s.CompareFlag, constant.TaskCompareStatusFinished) { + logger.Warn("struct compare task process skip", + zap.String("task_name", dmt.Task.TaskName), + zap.String("task_mode", dmt.Task.TaskMode), + zap.String("task_flow", dmt.Task.TaskFlow), + zap.String("init_flag", s.InitFlag), + zap.String("compare_flag", s.CompareFlag), + zap.String("action", "compare skip")) + _, err = model.GetIStructCompareSummaryRW().UpdateStructCompareSummary(dmt.Ctx, + &task.StructCompareSummary{ + TaskName: dmt.Task.TaskName, + SchemaNameS: dmt.SchemaNameS}, + map[string]interface{}{ + "CompareFlag": constant.TaskCompareStatusFinished, + "Duration": fmt.Sprintf("%f", time.Now().Sub(dmt.StartTime).Seconds()), + }) + if err != nil { + return err + } + return nil + } logger.Info("struct compare task process table", zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("schema_name_s", schemaRoute.SchemaNameS)) + zap.String("schema_name_s", dmt.SchemaNameS)) var migrateTasks []*task.StructCompareTask err = model.Transaction(dmt.Ctx, func(txnCtx context.Context) error { @@ -120,7 +303,7 @@ func (dmt *StructCompareTask) Start() error { migrateTasks, err = model.GetIStructCompareTaskRW().FindStructCompareTask(txnCtx, &task.StructCompareTask{ TaskName: dmt.Task.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, + SchemaNameS: dmt.SchemaNameS, TaskStatus: constant.TaskDatabaseStatusWaiting, }) if err != nil { @@ -129,7 +312,7 @@ func (dmt *StructCompareTask) Start() error { migrateFailedTasks, err := model.GetIStructCompareTaskRW().FindStructCompareTask(txnCtx, &task.StructCompareTask{ TaskName: dmt.Task.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, + SchemaNameS: dmt.SchemaNameS, TaskStatus: constant.TaskDatabaseStatusFailed}) if err != nil { return err @@ -137,7 +320,7 @@ func (dmt *StructCompareTask) Start() error { migrateRunningTasks, err := model.GetIStructCompareTaskRW().FindStructCompareTask(txnCtx, &task.StructCompareTask{ TaskName: dmt.Task.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, + SchemaNameS: dmt.SchemaNameS, TaskStatus: constant.TaskDatabaseStatusRunning}) if err != nil { return err @@ -145,7 +328,7 @@ func (dmt *StructCompareTask) Start() error { migrateStopTasks, err := model.GetIStructCompareTaskRW().FindStructCompareTask(txnCtx, &task.StructCompareTask{ TaskName: dmt.Task.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, + SchemaNameS: dmt.SchemaNameS, TaskStatus: constant.TaskDatabaseStatusStopped}) if err != nil { return err @@ -163,7 +346,7 @@ func (dmt *StructCompareTask) Start() error { zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("schema_name_s", schemaRoute.SchemaNameS)) + zap.String("schema_name_s", dmt.SchemaNameS)) g := errconcurrent.NewGroup() g.SetLimit(int(dmt.TaskParams.CompareThread)) @@ -186,9 +369,9 @@ func (dmt *StructCompareTask) Start() error { TableNameS: dt.TableNameS, LogDetail: fmt.Sprintf("%v [%v] struct compare task [%v] taskflow [%v] source table [%v.%v] compare start", stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeDataCompare), + stringutil.StringLower(dmt.Task.TaskMode), dt.TaskName, - dmt.Task.TaskMode, + dmt.Task.TaskFlow, dt.SchemaNameS, dt.TableNameS), }) @@ -203,42 +386,42 @@ func (dmt *StructCompareTask) Start() error { switch { case strings.EqualFold(dmt.Task.TaskFlow, constant.TaskFlowOracleToTiDB) || strings.EqualFold(dmt.Task.TaskFlow, constant.TaskFlowOracleToMySQL): - oracleProcessor, err := database.IStructCompareProcessor(&processor.OracleProcessor{ + oracleProcessor, err := database.IStructCompareProcessor(&OracleProcessor{ Ctx: dmt.Ctx, TaskName: dmt.Task.TaskName, TaskFlow: dmt.Task.TaskFlow, SchemaName: dt.SchemaNameS, TableName: dt.TableNameS, - DBCharset: stringutil.StringUpper(dmt.DatasourceS.ConnectCharset), - Database: databaseS, - BuildinDatatypeRules: buildInDatatypeRulesS, - BuildinDefaultValueRules: buildInDefaultValueRulesS, + DBCharset: dmt.DBCharsetS, + Database: dmt.DatabaseS, + BuildinDatatypeRules: dmt.BuildInDatatypeRulesS, + BuildinDefaultValueRules: dmt.BuildInDefaultValueRulesS, ColumnRouteRules: make(map[string]string), IsBaseline: true, }) if err != nil { - return fmt.Errorf("the struct compare processor database [%s] failed: %v", dmt.DatasourceS.DbType, err) + return fmt.Errorf("the struct compare processor database [%s] failed: %v", dmt.DBTypeS, err) } // oracle baseline, mysql not configure task and not configure rules - mysqlProcessor, err := database.IStructCompareProcessor(&processor.MySQLProcessor{ + mysqlProcessor, err := database.IStructCompareProcessor(&MySQLProcessor{ Ctx: dmt.Ctx, TaskName: dmt.Task.TaskName, TaskFlow: dmt.Task.TaskFlow, SchemaName: dt.SchemaNameT, TableName: dt.TableNameT, - DBCharset: stringutil.StringUpper(dmt.DatasourceT.ConnectCharset), - Database: databaseT, - BuildinDatatypeRules: buildInDatatypeRulesT, - BuildinDefaultValueRules: buildInDefaultValueRulesT, + DBCharset: dmt.DBCharsetT, + Database: dmt.DatabaseT, + BuildinDatatypeRules: dmt.BuildInDatatypeRulesT, + BuildinDefaultValueRules: dmt.BuildInDefaultValueRulesT, ColumnRouteRules: make(map[string]string), IsBaseline: false, }) if err != nil { - return fmt.Errorf("the struct compare processor database [%s] failed: %v", dmt.DatasourceT.DbType, err) + return fmt.Errorf("the struct compare processor database [%s] failed: %v", dmt.DBTypeT, err) } - compareDetail, err := database.IStructCompareTable(&processor.Table{ + compareDetail, err := database.IStructCompareTable(&Table{ TaskName: dmt.Task.TaskName, TaskFlow: dmt.Task.TaskFlow, Source: oracleProcessor, @@ -281,11 +464,11 @@ func (dmt *StructCompareTask) Start() error { return nil } - originStructS, err := databaseS.GetDatabaseTableOriginStruct(dt.SchemaNameS, dt.TableNameS, "TABLE") + originStructS, err := dmt.DatabaseS.GetDatabaseTableOriginStruct(dt.SchemaNameS, dt.TableNameS, "TABLE") if err != nil { return fmt.Errorf("the struct compare table get source origin struct failed: %v", err) } - originStructT, err := databaseT.GetDatabaseTableOriginStruct(dt.SchemaNameT, dt.TableNameT, "") + originStructT, err := dmt.DatabaseT.GetDatabaseTableOriginStruct(dt.SchemaNameT, dt.TableNameT, "") if err != nil { return fmt.Errorf("the struct compare table get target origin struct failed: %v", err) } @@ -324,9 +507,9 @@ func (dmt *StructCompareTask) Start() error { TableNameS: dt.TableNameS, LogDetail: fmt.Sprintf("%v [%v] struct compare task [%v] taskflow [%v] source table [%v.%v] compare equal, please see [struct_compare_task] detail", stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeStructCompare), + stringutil.StringLower(dmt.Task.TaskMode), dt.TaskName, - dmt.Task.TaskMode, + dmt.Task.TaskFlow, dt.SchemaNameS, dt.TableNameS), }) @@ -371,9 +554,9 @@ func (dmt *StructCompareTask) Start() error { TableNameS: smt.TableNameS, LogDetail: fmt.Sprintf("%v [%v] struct compare task [%v] taskflow [%v] source table [%v.%v] failed, please see [struct_compare_task] detail", stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeStructCompare), + stringutil.StringLower(dmt.Task.TaskMode), smt.TaskName, - dmt.Task.TaskMode, + dmt.Task.TaskFlow, smt.SchemaNameS, smt.TableNameS), }) @@ -398,7 +581,7 @@ func (dmt *StructCompareTask) Start() error { case constant.TaskDatabaseStatusEqual: _, err = model.GetIStructCompareSummaryRW().UpdateStructCompareSummary(txnCtx, &task.StructCompareSummary{ TaskName: rec.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, + SchemaNameS: dmt.SchemaNameS, }, map[string]interface{}{ "TableEquals": rec.StatusCounts, }) @@ -408,7 +591,7 @@ func (dmt *StructCompareTask) Start() error { case constant.TaskDatabaseStatusNotEqual: _, err = model.GetIStructCompareSummaryRW().UpdateStructCompareSummary(txnCtx, &task.StructCompareSummary{ TaskName: rec.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, + SchemaNameS: dmt.SchemaNameS, }, map[string]interface{}{ "TableNotEquals": rec.StatusCounts, }) @@ -418,7 +601,7 @@ func (dmt *StructCompareTask) Start() error { case constant.TaskDatabaseStatusFailed: _, err = model.GetIStructCompareSummaryRW().UpdateStructCompareSummary(txnCtx, &task.StructCompareSummary{ TaskName: rec.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, + SchemaNameS: dmt.SchemaNameS, }, map[string]interface{}{ "TableFails": rec.StatusCounts, }) @@ -428,7 +611,7 @@ func (dmt *StructCompareTask) Start() error { case constant.TaskDatabaseStatusWaiting: _, err = model.GetIStructCompareSummaryRW().UpdateStructCompareSummary(txnCtx, &task.StructCompareSummary{ TaskName: rec.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, + SchemaNameS: dmt.SchemaNameS, }, map[string]interface{}{ "TableWaits": rec.StatusCounts, }) @@ -438,7 +621,7 @@ func (dmt *StructCompareTask) Start() error { case constant.TaskDatabaseStatusRunning: _, err = model.GetIStructCompareSummaryRW().UpdateStructCompareSummary(txnCtx, &task.StructCompareSummary{ TaskName: rec.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, + SchemaNameS: dmt.SchemaNameS, }, map[string]interface{}{ "TableRuns": rec.StatusCounts, }) @@ -448,7 +631,7 @@ func (dmt *StructCompareTask) Start() error { case constant.TaskDatabaseStatusStopped: _, err = model.GetIStructCompareSummaryRW().UpdateStructCompareSummary(txnCtx, &task.StructCompareSummary{ TaskName: rec.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, + SchemaNameS: dmt.SchemaNameS, }, map[string]interface{}{ "TableStops": rec.StatusCounts, }) @@ -456,16 +639,18 @@ func (dmt *StructCompareTask) Start() error { return err } default: - return fmt.Errorf("the task [%v] task_mode [%s] task_flow [%v] schema_name_s [%v] task_status [%v] panic, please contact auhtor or reselect", dmt.Task.TaskName, dmt.Task.TaskMode, dmt.Task.TaskFlow, schemaRoute.SchemaNameS, rec.TaskStatus) + return fmt.Errorf("the task [%v] task_mode [%s] task_flow [%v] schema_name_s [%v] task_status [%v] panic, please contact auhtor or reselect", dmt.Task.TaskName, dmt.Task.TaskMode, dmt.Task.TaskFlow, dmt.SchemaNameS, rec.TaskStatus) } } - _, err = model.GetIStructCompareSummaryRW().UpdateStructCompareSummary(txnCtx, &task.StructCompareSummary{ - TaskName: dmt.Task.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - }, map[string]interface{}{ - "Duration": fmt.Sprintf("%f", time.Now().Sub(startTableTime).Seconds()), - }) + _, err = model.GetIStructMigrateSummaryRW().UpdateStructMigrateSummary(txnCtx, + &task.StructMigrateSummary{ + TaskName: dmt.Task.TaskName, + SchemaNameS: dmt.SchemaNameS}, + map[string]interface{}{ + "CompareFlag": constant.TaskCompareStatusFinished, + "Duration": fmt.Sprintf("%f", time.Now().Sub(dmt.StartTime).Seconds()), + }) if err != nil { return err } @@ -475,216 +660,11 @@ func (dmt *StructCompareTask) Start() error { return err } - logger.Info("struct compare task", - zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("cost", time.Now().Sub(schemaTaskTime).String())) return nil } -func (dmt *StructCompareTask) initStructCompareTask(databaseS, databaseT database.IDatabase, schemaRoute *rule.SchemaRouteRule) error { - // delete checkpoint - initFlags, err := model.GetITaskRW().GetTask(dmt.Ctx, &task.Task{TaskName: dmt.Task.TaskName}) - if err != nil { - return err - } - if !dmt.TaskParams.EnableCheckpoint || strings.EqualFold(initFlags.TaskInit, constant.TaskInitStatusNotFinished) { - err := model.GetIStructCompareTaskRW().DeleteStructCompareTaskName(dmt.Ctx, []string{schemaRoute.TaskName}) - if err != nil { - return err - } - err = model.GetIStructCompareSummaryRW().DeleteStructCompareSummaryName(dmt.Ctx, []string{schemaRoute.TaskName}) - if err != nil { - return err - } - } else if dmt.TaskParams.EnableCheckpoint && strings.EqualFold(initFlags.TaskInit, constant.TaskInitStatusFinished) { - logger.Warn("stmt migrate task init skip", - zap.String("task_name", dmt.Task.TaskName), - zap.String("task_mode", dmt.Task.TaskMode), - zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("task_init", constant.TaskInitStatusFinished)) - return nil - } - - // filter database table - schemaTaskTables, err := model.GetIMigrateTaskTableRW().FindMigrateTaskTable(dmt.Ctx, &rule.MigrateTaskTable{ - TaskName: schemaRoute.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - }) - if err != nil { - return err - } - var ( - includeTables []string - excludeTables []string - databaseTaskTables []string // task tables - ) - databaseTableTypeMap := make(map[string]string) - - for _, t := range schemaTaskTables { - if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsExclude) { - excludeTables = append(excludeTables, t.TableNameS) - } - if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsNotExclude) { - includeTables = append(includeTables, t.TableNameS) - } - } - - tableObjs, err := databaseS.FilterDatabaseTable(schemaRoute.SchemaNameS, includeTables, excludeTables) - if err != nil { - return err - } - - // rule case field - for _, t := range tableObjs.TaskTables { - var tabName string - // the according target case field rule convert - if strings.EqualFold(dmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleLower) { - tabName = stringutil.StringLower(t) - } - if strings.EqualFold(dmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleUpper) { - tabName = stringutil.StringUpper(t) - } - if strings.EqualFold(dmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleOrigin) { - tabName = t - } - databaseTaskTables = append(databaseTaskTables, tabName) - } - - databaseTableTypeMap, err = databaseS.GetDatabaseTableType(schemaRoute.SchemaNameS) - if err != nil { - return err - } - - allTablesT, err := databaseT.GetDatabaseTable(schemaRoute.SchemaNameT) - if err != nil { - return err - } - // get table route rule - tableRouteRule := make(map[string]string) - tableRouteRuleT := make(map[string]string) - - tableRoutes, err := model.GetIMigrateTableRouteRW().FindTableRouteRule(dmt.Ctx, &rule.TableRouteRule{ - TaskName: schemaRoute.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - }) - for _, tr := range tableRoutes { - tableRouteRule[tr.TableNameS] = tr.TableNameT - tableRouteRuleT[tr.TableNameT] = tr.TableNameS - } - - tableRouteRuleTNew := make(map[string]string) - for _, t := range allTablesT { - if v, ok := tableRouteRuleT[t]; ok { - tableRouteRuleTNew[v] = t - } else { - tableRouteRuleTNew[t] = t - } - } - - var panicTables []string - for _, t := range databaseTaskTables { - if _, ok := tableRouteRuleTNew[t]; !ok { - panicTables = append(panicTables, t) - } - } - if len(panicTables) > 0 { - return fmt.Errorf("the task [%v] task_flow [%v] task_mode [%v] source database tables aren't existed in the target database, please create the tables [%v]", dmt.Task.TaskName, dmt.Task.TaskFlow, dmt.Task.TaskMode, stringutil.StringJoin(panicTables, constant.StringSeparatorComma)) - } - - // clear the struct compare task table - migrateTasks, err := model.GetIStructCompareTaskRW().BatchFindStructCompareTask(dmt.Ctx, &task.StructCompareTask{TaskName: dmt.Task.TaskName}) - if err != nil { - return err - } - - // repeatInitTableMap used for store the struct_migrate_task table name has be finished, avoid repeated initialization - repeatInitTableMap := make(map[string]struct{}) - if len(migrateTasks) > 0 { - taskTablesMap := make(map[string]struct{}) - for _, t := range databaseTaskTables { - taskTablesMap[t] = struct{}{} - } - for _, smt := range migrateTasks { - if _, ok := taskTablesMap[smt.TableNameS]; !ok { - err = model.GetIStructCompareTaskRW().DeleteStructCompareTask(dmt.Ctx, smt.ID) - if err != nil { - return err - } - } else { - repeatInitTableMap[smt.TableNameS] = struct{}{} - } - } - } - - err = model.GetIStructCompareSummaryRW().DeleteStructCompareSummary(dmt.Ctx, &task.StructCompareSummary{TaskName: dmt.Task.TaskName}) - if err != nil { - return err - } - - // database tables - // init database table - // get table column route rule - for _, sourceTable := range databaseTaskTables { - initStructInfos, err := model.GetIStructCompareTaskRW().GetStructCompareTaskTable(dmt.Ctx, &task.StructCompareTask{ - TaskName: dmt.Task.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - TableNameS: sourceTable, - }) - if err != nil { - return err - } - if len(initStructInfos) > 1 { - return fmt.Errorf("the struct compare task table is over one, it should be only one") - } - // if the table is existed and task_status success, then skip init - if _, ok := repeatInitTableMap[sourceTable]; ok && strings.EqualFold(initStructInfos[0].TaskStatus, constant.TaskDatabaseStatusSuccess) { - continue - } - var ( - targetTable string - ) - if val, ok := tableRouteRule[sourceTable]; ok { - targetTable = val - } else { - // the according target case field rule convert - if strings.EqualFold(dmt.Task.CaseFieldRuleT, constant.ParamValueStructMigrateCaseFieldRuleLower) { - targetTable = stringutil.StringLower(sourceTable) - } - if strings.EqualFold(dmt.Task.CaseFieldRuleT, constant.ParamValueStructMigrateCaseFieldRuleUpper) { - targetTable = stringutil.StringUpper(sourceTable) - } - if strings.EqualFold(dmt.Task.CaseFieldRuleT, constant.ParamValueStructMigrateCaseFieldRuleOrigin) { - targetTable = sourceTable - } - } - - _, err = model.GetIStructCompareTaskRW().CreateStructCompareTask(dmt.Ctx, &task.StructCompareTask{ - TaskName: dmt.Task.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - TableNameS: sourceTable, - TableTypeS: databaseTableTypeMap[sourceTable], - SchemaNameT: schemaRoute.SchemaNameT, - TableNameT: targetTable, - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - if err != nil { - return err - } - } - - _, err = model.GetIStructCompareSummaryRW().CreateStructCompareSummary(dmt.Ctx, - &task.StructCompareSummary{ - TaskName: dmt.Task.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - TableTotals: uint64(len(databaseTaskTables)), - }) - if err != nil { - return err - } - _, err = model.GetITaskRW().UpdateTask(dmt.Ctx, &task.Task{TaskName: dmt.Task.TaskName}, map[string]interface{}{"TaskInit": constant.TaskInitStatusFinished}) - if err != nil { - return err - } - +func (dmt *StructCompareTask) Resume() error { + logger.Info("struct compare task resume table", + zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) return nil } diff --git a/database/processor/struct_migrate_task.go b/database/processor/struct_migrate_task.go new file mode 100644 index 0000000..6972506 --- /dev/null +++ b/database/processor/struct_migrate_task.go @@ -0,0 +1,675 @@ +/* +Copyright © 2020 Marvin + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package processor + +import ( + "context" + "fmt" + "github.com/wentaojin/dbms/errconcurrent" + "github.com/wentaojin/dbms/model/rule" + "strings" + "time" + + "github.com/wentaojin/dbms/model/buildin" + + "github.com/wentaojin/dbms/logger" + "go.uber.org/zap" + + "github.com/wentaojin/dbms/database" + + "github.com/wentaojin/dbms/model" + "github.com/wentaojin/dbms/model/task" + "github.com/wentaojin/dbms/proto/pb" + "github.com/wentaojin/dbms/utils/constant" + "github.com/wentaojin/dbms/utils/stringutil" +) + +type StructMigrateTask struct { + Ctx context.Context + Task *task.Task + SchemaNameS string + SchemaNameT string + DatabaseS database.IDatabase + DatabaseT database.IDatabase + DBTypeS string + DBVersionS string + DBCharsetS string + DBCharsetT string + StartTime time.Time + BuildInDatatypeRules []*buildin.BuildinDatatypeRule + BuildInDefaultValueRules []*buildin.BuildinDefaultvalRule + TaskParams *pb.StructMigrateParam +} + +func (st *StructMigrateTask) Init() error { + logger.Info("struct migrate task init table", + zap.String("task_name", st.Task.TaskName), zap.String("task_mode", st.Task.TaskMode), zap.String("task_flow", st.Task.TaskFlow)) + + if !st.TaskParams.EnableCheckpoint { + err := model.GetIStructMigrateSummaryRW().DeleteStructMigrateSummaryName(st.Ctx, []string{st.Task.TaskName}) + if err != nil { + return err + } + err = model.GetIStructMigrateTaskRW().DeleteStructMigrateTaskName(st.Ctx, []string{st.Task.TaskName}) + if err != nil { + return err + } + } + logger.Warn("struct migrate task checkpoint skip", + zap.String("task_name", st.Task.TaskName), + zap.String("task_mode", st.Task.TaskMode), + zap.String("task_flow", st.Task.TaskFlow), + zap.Bool("enable_checkpoint", st.TaskParams.EnableCheckpoint)) + s, err := model.GetIStructMigrateSummaryRW().GetStructMigrateSummary(st.Ctx, &task.StructMigrateSummary{TaskName: st.Task.TaskName, SchemaNameS: st.SchemaNameS}) + if err != nil { + return err + } + + if strings.EqualFold(s.InitFlag, constant.TaskInitStatusNotFinished) { + err = model.Transaction(st.Ctx, func(txnCtx context.Context) error { + err = model.GetIStructMigrateSummaryRW().DeleteStructMigrateSummaryName(txnCtx, []string{st.Task.TaskName}) + if err != nil { + return err + } + err = model.GetIStructMigrateTaskRW().DeleteStructMigrateTaskName(txnCtx, []string{st.Task.TaskName}) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + } + + // filter database table + schemaTaskTables, err := model.GetIMigrateTaskTableRW().FindMigrateTaskTable(st.Ctx, &rule.MigrateTaskTable{ + TaskName: st.Task.TaskName, + SchemaNameS: st.SchemaNameS, + }) + if err != nil { + return err + } + var ( + includeTables []string + excludeTables []string + databaseTaskTables []string // task tables + ) + databaseTableTypeMap := make(map[string]string) + taskTablesMap := make(map[string]struct{}) + + for _, t := range schemaTaskTables { + if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsExclude) { + excludeTables = append(excludeTables, t.TableNameS) + } + if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsNotExclude) { + includeTables = append(includeTables, t.TableNameS) + } + } + + tableObjs, err := st.DatabaseS.FilterDatabaseTable(st.SchemaNameS, includeTables, excludeTables) + if err != nil { + return err + } + + // rule case field + for _, t := range tableObjs.TaskTables { + var tabName string + // the according target case field rule convert + if strings.EqualFold(st.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleLower) { + tabName = stringutil.StringLower(t) + } + if strings.EqualFold(st.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleUpper) { + tabName = stringutil.StringUpper(t) + } + if strings.EqualFold(st.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleOrigin) { + tabName = t + } + databaseTaskTables = append(databaseTaskTables, tabName) + taskTablesMap[tabName] = struct{}{} + } + + databaseTableTypeMap, err = st.DatabaseS.GetDatabaseTableType(st.SchemaNameS) + if err != nil { + return err + } + + // get table route rule + tableRouteRule := make(map[string]string) + + tableRoutes, err := model.GetIMigrateTableRouteRW().FindTableRouteRule(st.Ctx, &rule.TableRouteRule{ + TaskName: st.Task.TaskName, + SchemaNameS: st.SchemaNameS, + }) + for _, tr := range tableRoutes { + tableRouteRule[tr.TableNameS] = tr.TableNameT + } + + // clear the struct migrate task table + migrateTasks, err := model.GetIStructMigrateTaskRW().BatchFindStructMigrateTask(st.Ctx, &task.StructMigrateTask{TaskName: st.Task.TaskName}) + if err != nil { + return err + } + + // repeatInitTableMap used for store the struct_migrate_task table name has be finished, avoid repeated initialization + repeatInitTableMap := make(map[string]struct{}) + if len(migrateTasks) > 0 { + for _, smt := range migrateTasks { + if _, ok := taskTablesMap[smt.TableNameS]; !ok { + err = model.GetIStructMigrateTaskRW().DeleteStructMigrateTask(st.Ctx, smt.ID) + if err != nil { + return err + } + } else { + repeatInitTableMap[smt.TableNameS] = struct{}{} + } + } + } + + // database tables + // init database table + // get table column route rule + for _, sourceTable := range databaseTaskTables { + // if the table is existed, then skip init + if _, ok := repeatInitTableMap[sourceTable]; ok { + continue + } + var ( + targetTable string + ) + if val, ok := tableRouteRule[sourceTable]; ok { + targetTable = val + } else { + // the according target case field rule convert + if strings.EqualFold(st.Task.CaseFieldRuleT, constant.ParamValueStructMigrateCaseFieldRuleLower) { + targetTable = stringutil.StringLower(sourceTable) + } + if strings.EqualFold(st.Task.CaseFieldRuleT, constant.ParamValueStructMigrateCaseFieldRuleUpper) { + targetTable = stringutil.StringUpper(sourceTable) + } + if strings.EqualFold(st.Task.CaseFieldRuleT, constant.ParamValueStructMigrateCaseFieldRuleOrigin) { + targetTable = sourceTable + } + } + + _, err = model.GetIStructMigrateTaskRW().CreateStructMigrateTask(st.Ctx, &task.StructMigrateTask{ + TaskName: st.Task.TaskName, + SchemaNameS: st.SchemaNameS, + TableNameS: sourceTable, + TableTypeS: databaseTableTypeMap[sourceTable], + SchemaNameT: st.SchemaNameT, + TableNameT: targetTable, + TaskStatus: constant.TaskDatabaseStatusWaiting, + Category: constant.DatabaseStructMigrateSqlTableCategory, + }) + if err != nil { + return err + } + } + + _, err = model.GetIStructMigrateSummaryRW().CreateStructMigrateSummary(st.Ctx, + &task.StructMigrateSummary{ + TaskName: st.Task.TaskName, + SchemaNameS: st.SchemaNameS, + TableTotals: uint64(len(databaseTaskTables) + 1), // include schema create sql + InitFlag: constant.TaskInitStatusFinished, + MigrateFlag: constant.TaskMigrateStatusNotFinished, + }) + if err != nil { + return err + } + return nil +} + +func (st *StructMigrateTask) Run() error { + logger.Info("struct migrate task run table", + zap.String("task_name", st.Task.TaskName), zap.String("task_mode", st.Task.TaskMode), zap.String("task_flow", st.Task.TaskFlow)) + s, err := model.GetIStructMigrateSummaryRW().GetStructMigrateSummary(st.Ctx, &task.StructMigrateSummary{TaskName: st.Task.TaskName, SchemaNameS: st.SchemaNameS}) + if err != nil { + return err + } + + if strings.EqualFold(s.MigrateFlag, constant.TaskMigrateStatusFinished) { + logger.Warn("struct migrate task migrate skip", + zap.String("task_name", st.Task.TaskName), + zap.String("task_mode", st.Task.TaskMode), + zap.String("task_flow", st.Task.TaskFlow), + zap.String("init_flag", s.InitFlag), + zap.String("migrate_flag", s.MigrateFlag), + zap.String("action", "migrate skip")) + _, err = model.GetIStructMigrateSummaryRW().UpdateStructMigrateSummary(st.Ctx, + &task.StructMigrateSummary{ + TaskName: st.Task.TaskName, + SchemaNameS: st.SchemaNameS}, + map[string]interface{}{ + "MigrateFlag": constant.TaskMigrateStatusFinished, + "Duration": fmt.Sprintf("%f", time.Now().Sub(st.StartTime).Seconds()), + }) + if err != nil { + return err + } + return nil + } + + logger.Info("struct migrate task get migrate tasks", + zap.String("task_name", st.Task.TaskName), + zap.String("task_mode", st.Task.TaskMode), + zap.String("task_flow", st.Task.TaskFlow)) + var ( + migrateTasks []*task.StructMigrateTask + ) + + err = model.Transaction(st.Ctx, func(txnCtx context.Context) error { + // get migrate task tables + migrateTasks, err = model.GetIStructMigrateTaskRW().QueryStructMigrateTask(txnCtx, + &task.StructMigrateTask{ + TaskName: st.Task.TaskName, + TaskStatus: constant.TaskDatabaseStatusWaiting, + Category: constant.DatabaseStructMigrateSqlTableCategory}) + if err != nil { + return err + } + migrateFailedTasks, err := model.GetIStructMigrateTaskRW().QueryStructMigrateTask(txnCtx, + &task.StructMigrateTask{ + TaskName: st.Task.TaskName, + TaskStatus: constant.TaskDatabaseStatusFailed, + Category: constant.DatabaseStructMigrateSqlTableCategory}) + if err != nil { + return err + } + migrateRunningTasks, err := model.GetIStructMigrateTaskRW().QueryStructMigrateTask(txnCtx, + &task.StructMigrateTask{ + TaskName: st.Task.TaskName, + TaskStatus: constant.TaskDatabaseStatusRunning, + Category: constant.DatabaseStructMigrateSqlTableCategory}) + if err != nil { + return err + } + migrateStopTasks, err := model.GetIStructMigrateTaskRW().QueryStructMigrateTask(txnCtx, + &task.StructMigrateTask{ + TaskName: st.Task.TaskName, + TaskStatus: constant.TaskDatabaseStatusStopped, + Category: constant.DatabaseStructMigrateSqlTableCategory}) + if err != nil { + return err + } + migrateTasks = append(migrateTasks, migrateFailedTasks...) + migrateTasks = append(migrateTasks, migrateRunningTasks...) + migrateTasks = append(migrateTasks, migrateStopTasks...) + return nil + }) + if err != nil { + return err + } + + logger.Info("struct migrate task process migrate tables", + zap.String("task_name", st.Task.TaskName), + zap.String("task_mode", st.Task.TaskMode), + zap.String("task_flow", st.Task.TaskFlow)) + g := errconcurrent.NewGroup() + g.SetLimit(int(st.TaskParams.MigrateThread)) + + for _, job := range migrateTasks { + gTime := time.Now() + g.Go(job, gTime, func(job interface{}) error { + smt := job.(*task.StructMigrateTask) + err = st.structMigrateStart(smt) + if err != nil { + return err + } + return nil + }) + } + for _, r := range g.Wait() { + if r.Err != nil { + smt := r.Task.(*task.StructMigrateTask) + logger.Warn("struct migrate task", + zap.String("task_name", st.Task.TaskName), + zap.String("task_mode", st.Task.TaskMode), + zap.String("task_flow", st.Task.TaskFlow), + zap.String("schema_name_s", smt.SchemaNameS), + zap.String("table_name_s", smt.TableNameS), + zap.Error(r.Err)) + + errW := model.Transaction(st.Ctx, func(txnCtx context.Context) error { + _, err = model.GetIStructMigrateTaskRW().UpdateStructMigrateTask(txnCtx, + &task.StructMigrateTask{TaskName: smt.TaskName, SchemaNameS: smt.SchemaNameS, TableNameS: smt.TableNameS}, + map[string]interface{}{ + "TaskStatus": constant.TaskDatabaseStatusFailed, + "Duration": fmt.Sprintf("%f", time.Now().Sub(r.Time).Seconds()), + "ErrorDetail": r.Err.Error(), + }) + if err != nil { + return err + } + _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ + TaskName: smt.TaskName, + SchemaNameS: smt.SchemaNameS, + TableNameS: smt.TableNameS, + LogDetail: fmt.Sprintf("%v [%v] struct migrate task [%v] taskflow [%v] source table [%v.%v] failed, please see [struct_migrate_task] detail", + stringutil.CurrentTimeFormatString(), + stringutil.StringLower(st.Task.TaskMode), + smt.TaskName, + st.Task.TaskMode, + smt.SchemaNameS, + smt.TableNameS), + }) + if err != nil { + return err + } + return nil + }) + if errW != nil { + return errW + } + } + } + + // sequence migrate exclude struct_migrate_summary compute counts + err = st.sequenceMigrateStart(st.DatabaseS, st.DatabaseT) + if err != nil { + return err + } + + s, err = model.GetIStructMigrateSummaryRW().GetStructMigrateSummary(st.Ctx, &task.StructMigrateSummary{TaskName: st.Task.TaskName, SchemaNameS: st.SchemaNameS}) + if err != nil { + return err + } + if s.TableTotals == s.TableSuccess { + _, err = model.GetIStructMigrateSummaryRW().UpdateStructMigrateSummary(st.Ctx, + &task.StructMigrateSummary{ + TaskName: st.Task.TaskName, + SchemaNameS: st.SchemaNameS}, + map[string]interface{}{ + "MigrateFlag": constant.TaskMigrateStatusFinished, + "Duration": fmt.Sprintf("%f", time.Now().Sub(st.StartTime).Seconds()), + }) + if err != nil { + return err + } + } else { + _, err = model.GetIStructMigrateSummaryRW().UpdateStructMigrateSummary(st.Ctx, + &task.StructMigrateSummary{ + TaskName: st.Task.TaskName, + SchemaNameS: st.SchemaNameS}, + map[string]interface{}{ + "MigrateFlag": constant.TaskMigrateStatusNotFinished, + "Duration": fmt.Sprintf("%f", time.Now().Sub(st.StartTime).Seconds()), + }) + if err != nil { + return err + } + } + return nil +} + +func (st *StructMigrateTask) Resume() error { + logger.Info("struct migrate task resume table", + zap.String("task_name", st.Task.TaskName), zap.String("task_mode", st.Task.TaskMode), zap.String("task_flow", st.Task.TaskFlow)) + return nil +} + +func (st *StructMigrateTask) structMigrateStart(smt *task.StructMigrateTask) error { + // if the schema table success, skip + if strings.EqualFold(smt.TaskStatus, constant.TaskDatabaseStatusSuccess) { + logger.Warn("struct migrate task process", + zap.String("task_name", st.Task.TaskName), + zap.String("task_mode", st.Task.TaskMode), + zap.String("task_flow", st.Task.TaskFlow), + zap.String("schema_name_s", st.SchemaNameS), + zap.String("table_name_s", smt.TableNameS), + zap.String("task_status", constant.TaskDatabaseStatusSuccess), + zap.String("table task had done", "skip migrate"), + zap.String("cost", time.Now().Sub(st.StartTime).String())) + return nil + } + // if the table is MATERIALIZED VIEW, SKIP + // MATERIALIZED VIEW isn't support struct migrate + if strings.EqualFold(smt.TableTypeS, constant.OracleDatabaseTableTypeMaterializedView) { + logger.Warn("struct migrate task process", + zap.String("task_name", st.Task.TaskName), + zap.String("task_mode", st.Task.TaskMode), + zap.String("task_flow", st.Task.TaskFlow), + zap.String("schema_name_s", smt.SchemaNameS), + zap.String("table_name_s", smt.TableNameS), + zap.String("table_type_s", smt.TableTypeS), + zap.String("suggest", "if necessary, please manually process the tables in the above list")) + zap.String("cost", time.Now().Sub(st.StartTime).String()) + return nil + } + + err := model.Transaction(st.Ctx, func(txnCtx context.Context) error { + _, err := model.GetIStructMigrateTaskRW().UpdateStructMigrateTask(txnCtx, &task.StructMigrateTask{ + TaskName: smt.TaskName, + SchemaNameS: smt.SchemaNameS, + TableNameS: smt.TableNameS}, + map[string]interface{}{ + "TaskStatus": constant.TaskDatabaseStatusRunning, + }) + if err != nil { + return err + } + _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ + TaskName: smt.TaskName, + SchemaNameS: smt.SchemaNameS, + TableNameS: smt.TableNameS, + LogDetail: fmt.Sprintf("%v [%v] the struct migrate task [%v] source table [%v.%v] starting", + stringutil.CurrentTimeFormatString(), + stringutil.StringLower(st.Task.TaskMode), + smt.TaskName, + smt.SchemaNameS, + smt.TableNameS), + }) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + + sourceTime := time.Now() + datasourceS := &Datasource{ + DBTypeS: st.DBTypeS, + DBVersionS: st.DBVersionS, + DatabaseS: st.DatabaseS, + SchemaNameS: smt.SchemaNameS, + TableNameS: smt.TableNameS, + TableTypeS: smt.TableTypeS, + } + + attrs, err := database.IStructMigrateAttributes(datasourceS) + if err != nil { + return err + } + logger.Info("struct migrate task process", + zap.String("task_name", st.Task.TaskName), + zap.String("task_mode", st.Task.TaskMode), + zap.String("task_flow", st.Task.TaskFlow), + zap.String("schema_name_s", smt.SchemaNameS), + zap.String("table_name_s", smt.TableNameS), + zap.String("task_stage", "datasource"), + zap.String("datasource", datasourceS.String()), + zap.String("cost", time.Now().Sub(sourceTime).String())) + ruleTime := time.Now() + dataRule := &StructMigrateRule{ + Ctx: st.Ctx, + TaskName: smt.TaskName, + TaskFlow: st.Task.TaskFlow, + SchemaNameS: smt.SchemaNameS, + TableNameS: smt.TableNameS, + TablePrimaryAttrs: attrs.PrimaryKey, + TableColumnsAttrs: attrs.TableColumns, + TableCommentAttrs: attrs.TableComment, + CreateIfNotExist: st.TaskParams.CreateIfNotExist, + CaseFieldRuleT: st.Task.CaseFieldRuleT, + DBVersionS: st.DBVersionS, + DBCharsetS: st.DBCharsetS, + DBCharsetT: st.DBCharsetT, + BuildinDatatypeRules: st.BuildInDatatypeRules, + BuildinDefaultValueRules: st.BuildInDefaultValueRules, + } + + rules, err := database.IStructMigrateAttributesRule(dataRule) + if err != nil { + return err + } + logger.Info("struct migrate task process", + zap.String("task_name", st.Task.TaskName), + zap.String("task_mode", st.Task.TaskMode), + zap.String("task_flow", st.Task.TaskFlow), + zap.String("schema_name_s", smt.SchemaNameS), + zap.String("table_name_s", smt.TableNameS), + zap.String("task_stage", "rule"), + zap.String("rule", dataRule.String()), + zap.String("cost", time.Now().Sub(ruleTime).String())) + + tableTime := time.Now() + dataTable := &StructMigrateTable{ + TaskName: smt.TaskName, + TaskFlow: st.Task.TaskFlow, + DatasourceS: datasourceS, + DBCharsetT: st.DBCharsetT, + TableAttributes: attrs, + TableAttributesRule: rules, + } + + tableStruct, err := database.IStructMigrateTableStructure(dataTable) + if err != nil { + return err + } + + logger.Info("struct migrate task process", + zap.String("task_name", st.Task.TaskName), + zap.String("task_mode", st.Task.TaskMode), + zap.String("task_flow", st.Task.TaskFlow), + zap.String("schema_name_s", smt.SchemaNameS), + zap.String("table_name_s", smt.TableNameS), + zap.String("task_stage", "struct"), + zap.String("struct", dataTable.String()), + zap.String("cost", time.Now().Sub(tableTime).String())) + + writerTime := time.Now() + var w database.IStructMigrateDatabaseWriter + w = NewStructMigrateDatabase(st.Ctx, smt.TaskName, st.Task.TaskFlow, st.DatabaseT, st.StartTime, tableStruct) + + if st.TaskParams.EnableDirectCreate { + err = w.SyncStructDatabase() + if err != nil { + return err + } + logger.Info("struct migrate task process", + zap.String("task_name", st.Task.TaskName), + zap.String("task_mode", st.Task.TaskMode), + zap.String("task_flow", st.Task.TaskFlow), + zap.String("schema_name_s", smt.SchemaNameS), + zap.String("table_name_s", smt.TableNameS), + zap.String("task_stage", "struct sync database"), + zap.String("cost", time.Now().Sub(writerTime).String())) + + return nil + } + + err = w.WriteStructDatabase() + if err != nil { + return err + } + logger.Info("struct migrate task process", + zap.String("task_name", st.Task.TaskName), + zap.String("task_mode", st.Task.TaskMode), + zap.String("task_flow", st.Task.TaskFlow), + zap.String("schema_name_s", smt.SchemaNameS), + zap.String("table_name_s", smt.TableNameS), + zap.String("task_stage", "struct write database"), + zap.String("cost", time.Now().Sub(writerTime).String())) + return nil +} + +func (st *StructMigrateTask) sequenceMigrateStart(databaseS, databaseT database.IDatabase) error { + startTime := time.Now() + logger.Info("sequence migrate task process", + zap.String("task_name", st.Task.TaskName), + zap.String("task_mode", st.Task.TaskMode), + zap.String("task_flow", st.Task.TaskFlow), + zap.String("schema_name_s", st.SchemaNameS), + zap.String("start_time", startTime.String())) + sequences, err := databaseS.GetDatabaseSequence(st.SchemaNameS) + if err != nil { + return err + } + + var seqCreates []string + for _, seq := range sequences { + lastNumber, err := stringutil.StrconvIntBitSize(seq["LAST_NUMBER"], 64) + if err != nil { + return err + } + cacheSize, err := stringutil.StrconvIntBitSize(seq["CACHE_SIZE"], 64) + if err != nil { + return err + } + // disable cache + if cacheSize == 0 { + lastNumber = lastNumber + 5000 + } else { + lastNumber = lastNumber + (cacheSize * 2) + } + + switch st.Task.TaskFlow { + case constant.TaskFlowOracleToMySQL, constant.TaskFlowOracleToTiDB: + if st.TaskParams.CreateIfNotExist { + seqCreates = append(seqCreates, fmt.Sprintf(`CREATE SEQUENCE IF NOT EXISTS %s.%s START %v INCREMENT %v MINVALUE %v MAXVALUE %v CACHE %v CYCLE %v;`, st.SchemaNameT, seq["SEQUENCE_NAME"], lastNumber, seq["INCREMENT_BY"], seq["MIN_VALUE"], seq["MAX_VALUE"], seq["CACHE_SIZE"], seq["CYCLE_FLAG"])) + } else { + seqCreates = append(seqCreates, fmt.Sprintf(`CREATE SEQUENCE %s.%s START %v INCREMENT %v MINVALUE %v MAXVALUE %v CACHE %v CYCLE %v;`, st.SchemaNameT, seq["SEQUENCE_NAME"], lastNumber, seq["INCREMENT_BY"], seq["MIN_VALUE"], seq["MAX_VALUE"], seq["CACHE_SIZE"], seq["CYCLE_FLAG"])) + } + default: + return fmt.Errorf("the task [%v] task_flow [%v] isn't support, please contact author or reselect", st.Task.TaskName, st.Task.TaskFlow) + } + } + + writerTime := time.Now() + var w database.ISequenceMigrateDatabaseWriter + w = NewSequenceMigrateDatabase(st.Ctx, st.Task.TaskName, st.Task.TaskFlow, st.SchemaNameS, st.SchemaNameT, databaseT, startTime, seqCreates) + + if st.TaskParams.EnableDirectCreate { + err = w.SyncSequenceDatabase() + if err != nil { + return err + } + logger.Info("sequence migrate task process", + zap.String("task_name", st.Task.TaskName), + zap.String("task_mode", st.Task.TaskMode), + zap.String("task_flow", st.Task.TaskFlow), + zap.String("schema_name_s", st.SchemaNameS), + zap.String("task_stage", "struct sequence database"), + zap.String("cost", time.Now().Sub(writerTime).String())) + return nil + } + + err = w.WriteSequenceDatabase() + if err != nil { + return err + } + logger.Info("sequence migrate task process", + zap.String("task_name", st.Task.TaskName), + zap.String("task_mode", st.Task.TaskMode), + zap.String("task_flow", st.Task.TaskFlow), + zap.String("schema_name_s", st.SchemaNameS), + zap.String("task_stage", "struct sequence database"), + zap.String("cost", time.Now().Sub(writerTime).String())) + return nil +} diff --git a/database/taskflow/csv_migrate.go b/database/taskflow/csv_migrate.go new file mode 100644 index 0000000..2242962 --- /dev/null +++ b/database/taskflow/csv_migrate.go @@ -0,0 +1,122 @@ +/* +Copyright © 2020 Marvin + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package taskflow + +import ( + "context" + "fmt" + "github.com/wentaojin/dbms/database/processor" + "time" + + "github.com/wentaojin/dbms/database" + "github.com/wentaojin/dbms/logger" + "github.com/wentaojin/dbms/model" + "github.com/wentaojin/dbms/model/datasource" + "github.com/wentaojin/dbms/model/rule" + "github.com/wentaojin/dbms/model/task" + "github.com/wentaojin/dbms/proto/pb" + "github.com/wentaojin/dbms/utils/constant" + "github.com/wentaojin/dbms/utils/stringutil" + "go.uber.org/zap" +) + +type CsvMigrateTask struct { + Ctx context.Context + Task *task.Task + DatasourceS *datasource.Datasource + DatasourceT *datasource.Datasource + TaskParams *pb.CsvMigrateParam +} + +func (cmt *CsvMigrateTask) Start() error { + schemaTaskTime := time.Now() + logger.Info("data migrate task get schema route", + zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow)) + schemaRoute, err := model.GetIMigrateSchemaRouteRW().GetSchemaRouteRule(cmt.Ctx, &rule.SchemaRouteRule{TaskName: cmt.Task.TaskName}) + if err != nil { + return err + } + + logger.Info("data migrate task init database connection", + zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow)) + + var ( + databaseS, databaseT database.IDatabase + ) + switch cmt.Task.TaskFlow { + case constant.TaskFlowOracleToMySQL, constant.TaskFlowOracleToTiDB: + databaseS, err = database.NewDatabase(cmt.Ctx, cmt.DatasourceS, schemaRoute.SchemaNameS, int64(cmt.TaskParams.CallTimeout)) + if err != nil { + return err + } + defer databaseS.Close() + databaseT, err = database.NewDatabase(cmt.Ctx, cmt.DatasourceT, "", int64(cmt.TaskParams.CallTimeout)) + if err != nil { + return err + } + defer databaseT.Close() + + logger.Info("data migrate task inspect migrate task", + zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow)) + _, err = processor.InspectOracleMigrateTask(cmt.Task.TaskName, cmt.Task.TaskFlow, cmt.Task.TaskMode, databaseS, stringutil.StringUpper(cmt.DatasourceS.ConnectCharset), stringutil.StringUpper(cmt.DatasourceT.ConnectCharset)) + if err != nil { + return err + } + default: + return fmt.Errorf("the task_name [%s] task_mode [%s] task_flow [%s] schema_name_s [%s] isn't support, please contact author or reselect", cmt.Task.TaskName, cmt.Task.TaskMode, cmt.Task.TaskFlow, schemaRoute.SchemaNameS) + } + + dbVersionS, err := databaseS.GetDatabaseVersion() + if err != nil { + return err + } + dbRoles, err := databaseS.GetDatabaseRole() + if err != nil { + return err + } + + err = database.IDatabaseRun(cmt.Ctx, &processor.DataMigrateTask{ + Ctx: cmt.Ctx, + Task: cmt.Task, + DBRoleS: dbRoles, + DBVersionS: dbVersionS, + DBCharsetS: stringutil.StringUpper(cmt.DatasourceS.ConnectCharset), + DBCharsetT: stringutil.StringUpper(cmt.DatasourceT.ConnectCharset), + DatabaseS: databaseS, + DatabaseT: databaseT, + SchemaNameS: schemaRoute.SchemaNameS, + TableThread: cmt.TaskParams.TableThread, + GlobalSqlHintS: cmt.TaskParams.SqlHintS, + EnableCheckpoint: cmt.TaskParams.EnableCheckpoint, + EnableConsistentRead: cmt.TaskParams.EnableConsistentRead, + ChunkSize: cmt.TaskParams.ChunkSize, + BatchSize: cmt.TaskParams.BatchSize, + WriteThread: cmt.TaskParams.WriteThread, + CallTimeout: cmt.TaskParams.CallTimeout, + SqlThreadS: cmt.TaskParams.SqlThreadS, + CsvParams: cmt.TaskParams, + WaiterC: make(chan *processor.WaitingRecs, constant.DefaultMigrateTaskQueueSize), + ResumeC: make(chan *processor.WaitingRecs, constant.DefaultMigrateTaskQueueSize), + }) + if err != nil { + return err + } + + logger.Info("data migrate task", + zap.String("task_name", cmt.Task.TaskName), zap.String("task_mode", cmt.Task.TaskMode), zap.String("task_flow", cmt.Task.TaskFlow), + zap.String("cost", time.Now().Sub(schemaTaskTime).String())) + return nil +} diff --git a/database/taskflow/data_compare.go b/database/taskflow/data_compare.go index 6bb866d..60e1135 100644 --- a/database/taskflow/data_compare.go +++ b/database/taskflow/data_compare.go @@ -18,16 +18,10 @@ package taskflow import ( "context" "fmt" - "github.com/google/uuid" "github.com/wentaojin/dbms/database/processor" - "strconv" - "strings" "time" - "github.com/golang/snappy" - "github.com/wentaojin/dbms/database" - "github.com/wentaojin/dbms/errconcurrent" "github.com/wentaojin/dbms/logger" "github.com/wentaojin/dbms/model" "github.com/wentaojin/dbms/model/datasource" @@ -37,7 +31,6 @@ import ( "github.com/wentaojin/dbms/utils/constant" "github.com/wentaojin/dbms/utils/stringutil" "go.uber.org/zap" - "golang.org/x/sync/errgroup" ) type DataCompareTask struct { @@ -60,895 +53,51 @@ func (dmt *DataCompareTask) Start() error { logger.Info("data compare task init database connection", zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) - sourceDatasource, err := model.GetIDatasourceRW().GetDatasource(dmt.Ctx, dmt.Task.DatasourceNameS) - if err != nil { - return err - } - databaseS, err := database.NewDatabase(dmt.Ctx, sourceDatasource, schemaRoute.SchemaNameS, int64(dmt.TaskParams.CallTimeout)) - if err != nil { - return err - } - defer databaseS.Close() - databaseT, err := database.NewDatabase(dmt.Ctx, dmt.DatasourceT, "", int64(dmt.TaskParams.CallTimeout)) - if err != nil { - return err - } - defer databaseT.Close() - + var ( + databaseS, databaseT database.IDatabase + ) switch dmt.Task.TaskFlow { case constant.TaskFlowOracleToMySQL, constant.TaskFlowOracleToTiDB: - logger.Info("data compare task inspect migrate task", - zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) - _, err = processor.InspectOracleMigrateTask(dmt.Task.TaskName, dmt.Task.TaskFlow, dmt.Task.TaskMode, databaseS, stringutil.StringUpper(dmt.DatasourceS.ConnectCharset), stringutil.StringUpper(dmt.DatasourceT.ConnectCharset)) + databaseS, err = database.NewDatabase(dmt.Ctx, dmt.DatasourceS, schemaRoute.SchemaNameS, int64(dmt.TaskParams.CallTimeout)) if err != nil { return err } - } - - logger.Info("data compare task init task", - zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) - - err = dmt.InitDataCompareTask(databaseS, databaseT, schemaRoute) - if err != nil { - return err - } - - logger.Info("data compare task get tables", - zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) - - summaries, err := model.GetIDataCompareSummaryRW().FindDataCompareSummary(dmt.Ctx, &task.DataCompareSummary{ - TaskName: dmt.Task.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - }) - if err != nil { - return err - } - - for _, s := range summaries { - startTableTime := time.Now() - logger.Info("data compare task process table", - zap.String("task_name", dmt.Task.TaskName), - zap.String("task_mode", dmt.Task.TaskMode), - zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("schema_name_s", s.SchemaNameS), - zap.String("table_name_s", s.TableNameS)) - - var migrateTasks []*task.DataCompareTask - err = model.Transaction(dmt.Ctx, func(txnCtx context.Context) error { - // get migrate task tables - migrateTasks, err = model.GetIDataCompareTaskRW().FindDataCompareTask(txnCtx, - &task.DataCompareTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - if err != nil { - return err - } - migrateFailedTasks, err := model.GetIDataCompareTaskRW().FindDataCompareTask(txnCtx, - &task.DataCompareTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusFailed}) - if err != nil { - return err - } - migrateRunningTasks, err := model.GetIDataCompareTaskRW().FindDataCompareTask(txnCtx, - &task.DataCompareTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusRunning}) - if err != nil { - return err - } - migrateStopTasks, err := model.GetIDataCompareTaskRW().FindDataCompareTask(txnCtx, - &task.DataCompareTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - TaskStatus: constant.TaskDatabaseStatusStopped}) - if err != nil { - return err - } - migrateTasks = append(migrateTasks, migrateFailedTasks...) - migrateTasks = append(migrateTasks, migrateRunningTasks...) - migrateTasks = append(migrateTasks, migrateStopTasks...) - return nil - }) + defer databaseS.Close() + databaseT, err = database.NewDatabase(dmt.Ctx, dmt.DatasourceT, "", int64(dmt.TaskParams.CallTimeout)) if err != nil { return err } + defer databaseT.Close() - logger.Info("data compare task process chunks", - zap.String("task_name", dmt.Task.TaskName), - zap.String("task_mode", dmt.Task.TaskMode), - zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("schema_name_s", s.SchemaNameS), - zap.String("table_name_s", s.TableNameS)) - - g := errconcurrent.NewGroup() - g.SetLimit(int(dmt.TaskParams.SqlThread)) - for _, j := range migrateTasks { - gTime := time.Now() - g.Go(j, gTime, func(j interface{}) error { - dt := j.(*task.DataCompareTask) - errW := model.Transaction(dmt.Ctx, func(txnCtx context.Context) error { - _, err = model.GetIDataCompareTaskRW().UpdateDataCompareTask(txnCtx, - &task.DataCompareTask{TaskName: dt.TaskName, SchemaNameS: dt.SchemaNameS, TableNameS: dt.TableNameS, ChunkID: dt.ChunkID}, - map[string]interface{}{ - "TaskStatus": constant.TaskDatabaseStatusRunning, - }) - if err != nil { - return err - } - // clear data compare chunk result - err = model.GetIDataCompareResultRW().DeleteDataCompareResult(txnCtx, &task.DataCompareResult{ - TaskName: dt.TaskName, - SchemaNameS: dt.SchemaNameS, - TableNameS: dt.TableNameS, - ChunkID: dt.ChunkID, - }) - if err != nil { - return err - } - _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ - TaskName: dt.TaskName, - SchemaNameS: dt.SchemaNameS, - TableNameS: dt.TableNameS, - LogDetail: fmt.Sprintf("%v [%v] data compare task [%v] taskflow [%v] source table [%v.%v] chunk [%s] start", - stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeDataCompare), - dt.TaskName, - dmt.Task.TaskMode, - dt.SchemaNameS, - dt.TableNameS, - dt.ChunkDetailS), - }) - if err != nil { - return err - } - return nil - }) - if errW != nil { - return errW - } - - var dbCharsetS, dbCharsetT string - switch { - case strings.EqualFold(dmt.Task.TaskFlow, constant.TaskFlowOracleToTiDB) || strings.EqualFold(dmt.Task.TaskFlow, constant.TaskFlowOracleToMySQL): - dbCharsetS = constant.MigrateOracleCharsetStringConvertMapping[stringutil.StringUpper(dmt.DatasourceS.ConnectCharset)] - dbCharsetT = constant.MigrateMySQLCompatibleCharsetStringConvertMapping[stringutil.StringUpper(dmt.DatasourceT.ConnectCharset)] - case strings.EqualFold(dmt.Task.TaskFlow, constant.TaskFlowTiDBToOracle) || strings.EqualFold(dmt.Task.TaskFlow, constant.TaskFlowMySQLToOracle): - dbCharsetS = constant.MigrateMySQLCompatibleCharsetStringConvertMapping[stringutil.StringUpper(dmt.DatasourceS.ConnectCharset)] - dbCharsetT = constant.MigrateOracleCharsetStringConvertMapping[stringutil.StringUpper(dmt.DatasourceT.ConnectCharset)] - default: - return fmt.Errorf("the task [%s] schema [%s] taskflow [%s] column rule isn't support, please contact author", dmt.Task.TaskName, dt.SchemaNameS, dmt.Task.TaskFlow) - } - - err = database.IDataCompareProcess(&processor.DataCompareRow{ - Ctx: dmt.Ctx, - TaskMode: dmt.Task.TaskMode, - TaskFlow: dmt.Task.TaskFlow, - StartTime: gTime, - Dmt: dt, - DatabaseS: databaseS, - DatabaseT: databaseT, - BatchSize: int(dmt.TaskParams.BatchSize), - WriteThread: int(dmt.TaskParams.WriteThread), - CallTimeout: int(dmt.TaskParams.CallTimeout), - DBCharsetS: dbCharsetS, - DBCharsetT: dbCharsetT, - RepairStmtFlow: dmt.TaskParams.RepairStmtFlow, - }) - if err != nil { - return err - } - return nil - }) - } - - for _, r := range g.Wait() { - if r.Err != nil { - smt := r.Task.(*task.DataCompareTask) - logger.Warn("data compare task process tables", - zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("schema_name_s", smt.SchemaNameS), - zap.String("table_name_s", smt.TableNameS), - zap.Error(r.Err)) - - errW := model.Transaction(dmt.Ctx, func(txnCtx context.Context) error { - _, err = model.GetIDataCompareTaskRW().UpdateDataCompareTask(txnCtx, - &task.DataCompareTask{TaskName: smt.TaskName, SchemaNameS: smt.SchemaNameS, TableNameS: smt.TableNameS, ChunkID: smt.ChunkID}, - map[string]interface{}{ - "TaskStatus": constant.TaskDatabaseStatusFailed, - "Duration": fmt.Sprintf("%f", time.Now().Sub(r.Time).Seconds()), - "ErrorDetail": r.Err.Error(), - }) - if err != nil { - return err - } - _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ - TaskName: smt.TaskName, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - LogDetail: fmt.Sprintf("%v [%v] data compare task [%v] taskflow [%v] source table [%v.%v] failed, please see [data_compare_task] detail", - stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeDataCompare), - smt.TaskName, - dmt.Task.TaskMode, - smt.SchemaNameS, - smt.TableNameS), - }) - if err != nil { - return err - } - return nil - }) - if errW != nil { - return errW - } - } - } - - endTableTime := time.Now() - err = model.Transaction(dmt.Ctx, func(txnCtx context.Context) error { - tableStatusRecs, err := model.GetIDataCompareTaskRW().FindDataCompareTaskBySchemaTableChunkStatus(txnCtx, &task.DataCompareTask{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - }) - if err != nil { - return err - } - for _, rec := range tableStatusRecs { - switch rec.TaskStatus { - case constant.TaskDatabaseStatusEqual: - _, err = model.GetIDataCompareSummaryRW().UpdateDataCompareSummary(txnCtx, &task.DataCompareSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkEquals": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusNotEqual: - _, err = model.GetIDataCompareSummaryRW().UpdateDataCompareSummary(txnCtx, &task.DataCompareSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkNotEquals": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusFailed: - _, err = model.GetIDataCompareSummaryRW().UpdateDataCompareSummary(txnCtx, &task.DataCompareSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkFails": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusWaiting: - _, err = model.GetIDataCompareSummaryRW().UpdateDataCompareSummary(txnCtx, &task.DataCompareSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkWaits": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusRunning: - _, err = model.GetIDataCompareSummaryRW().UpdateDataCompareSummary(txnCtx, &task.DataCompareSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkRuns": rec.StatusTotals, - }) - if err != nil { - return err - } - case constant.TaskDatabaseStatusStopped: - _, err = model.GetIDataCompareSummaryRW().UpdateDataCompareSummary(txnCtx, &task.DataCompareSummary{ - TaskName: rec.TaskName, - SchemaNameS: rec.SchemaNameS, - TableNameS: rec.TableNameS, - }, map[string]interface{}{ - "ChunkStops": rec.StatusTotals, - }) - if err != nil { - return err - } - default: - return fmt.Errorf("the task [%v] task_mode [%s] task_flow [%v] schema_name_s [%v] table_name_s [%v] task_status [%v] panic, please contact auhtor or reselect", s.TaskName, dmt.Task.TaskMode, dmt.Task.TaskFlow, rec.SchemaNameS, rec.TableNameS, rec.TaskStatus) - } - } - - _, err = model.GetIDataCompareSummaryRW().UpdateDataCompareSummary(txnCtx, &task.DataCompareSummary{ - TaskName: s.TaskName, - SchemaNameS: s.SchemaNameS, - TableNameS: s.TableNameS, - }, map[string]interface{}{ - "Duration": fmt.Sprintf("%f", time.Now().Sub(startTableTime).Seconds()), - }) - if err != nil { - return err - } - return nil - }) + logger.Info("data compare task inspect migrate task", + zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) + _, err = processor.InspectOracleMigrateTask(dmt.Task.TaskName, dmt.Task.TaskFlow, dmt.Task.TaskMode, databaseS, stringutil.StringUpper(dmt.DatasourceS.ConnectCharset), stringutil.StringUpper(dmt.DatasourceT.ConnectCharset)) if err != nil { return err } - - logger.Info("data compare task process table", - zap.String("task_name", dmt.Task.TaskName), - zap.String("task_mode", dmt.Task.TaskMode), - zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("schema_name_s", s.SchemaNameS), - zap.String("table_name_s", s.TableNameS), - zap.String("cost", endTableTime.Sub(startTableTime).String())) + default: + return fmt.Errorf("the task_name [%s] task_mode [%s] task_flow [%s] schema_name_s [%s] isn't support, please contact author or reselect", dmt.Task.TaskName, dmt.Task.TaskMode, dmt.Task.TaskFlow, schemaRoute.SchemaNameS) } - logger.Info("data compare task", - zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("cost", time.Now().Sub(schemaTaskTime).String())) - return nil -} -func (dmt *DataCompareTask) InitDataCompareTask(databaseS, databaseT database.IDatabase, schemaRoute *rule.SchemaRouteRule) error { - // delete checkpoint - initFlags, err := model.GetITaskRW().GetTask(dmt.Ctx, &task.Task{TaskName: dmt.Task.TaskName}) - if err != nil { - return err - } - if !dmt.TaskParams.EnableCheckpoint || strings.EqualFold(initFlags.TaskInit, constant.TaskInitStatusNotFinished) { - err = model.GetIDataCompareTaskRW().DeleteDataCompareTaskName(dmt.Ctx, []string{schemaRoute.TaskName}) - if err != nil { - return err - } - err = model.GetIDataCompareSummaryRW().DeleteDataCompareSummaryName(dmt.Ctx, []string{schemaRoute.TaskName}) - if err != nil { - return err - } - err = model.GetIDataCompareResultRW().DeleteDataCompareResultName(dmt.Ctx, []string{schemaRoute.TaskName}) - if err != nil { - return err - } - } else if dmt.TaskParams.EnableCheckpoint && strings.EqualFold(initFlags.TaskInit, constant.TaskInitStatusFinished) { - logger.Warn("data compare task init skip", - zap.String("task_name", dmt.Task.TaskName), - zap.String("task_mode", dmt.Task.TaskMode), - zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("task_init", constant.TaskInitStatusFinished)) - return nil - } - // filter database table - schemaTaskTables, err := model.GetIMigrateTaskTableRW().FindMigrateTaskTable(dmt.Ctx, &rule.MigrateTaskTable{ - TaskName: schemaRoute.TaskName, + err = database.IDatabaseRun(dmt.Ctx, &processor.DataCompareTask{ + Ctx: dmt.Ctx, + Task: dmt.Task, + DatabaseS: databaseS, + DatabaseT: databaseT, SchemaNameS: schemaRoute.SchemaNameS, + DBCharsetS: stringutil.StringUpper(dmt.DatasourceS.ConnectCharset), + DBCharsetT: stringutil.StringUpper(dmt.DatasourceT.ConnectCharset), + TaskParams: dmt.TaskParams, + WaiterC: make(chan *processor.WaitingRecs, constant.DefaultMigrateTaskQueueSize), + ResumeC: make(chan *processor.WaitingRecs, constant.DefaultMigrateTaskQueueSize), }) if err != nil { return err } - var ( - includeTables []string - excludeTables []string - databaseTaskTables []string // task tables - globalScnS string - globalScnT string - ) - databaseTableTypeMap := make(map[string]string) - - for _, t := range schemaTaskTables { - if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsExclude) { - excludeTables = append(excludeTables, t.TableNameS) - } - if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsNotExclude) { - includeTables = append(includeTables, t.TableNameS) - } - } - - tableObjs, err := databaseS.FilterDatabaseTable(schemaRoute.SchemaNameS, includeTables, excludeTables) - if err != nil { - return err - } - - // rule case field - for _, t := range tableObjs.TaskTables { - var tabName string - // the according target case field rule convert - if strings.EqualFold(dmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleLower) { - tabName = stringutil.StringLower(t) - } - if strings.EqualFold(dmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleUpper) { - tabName = stringutil.StringUpper(t) - } - if strings.EqualFold(dmt.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleOrigin) { - tabName = t - } - databaseTaskTables = append(databaseTaskTables, tabName) - } - - // clear the data compare task table - // repeatInitTableMap used for store the struct_migrate_task table name has be finished, avoid repeated initialization - migrateGroupTasks, err := model.GetIDataCompareTaskRW().FindDataCompareTaskGroupByTaskSchemaTable(dmt.Ctx, dmt.Task.TaskName) - if err != nil { - return err - } - repeatInitTableMap := make(map[string]struct{}) - - if len(migrateGroupTasks) > 0 { - taskTablesMap := make(map[string]struct{}) - for _, t := range databaseTaskTables { - taskTablesMap[t] = struct{}{} - } - for _, smt := range migrateGroupTasks { - if smt.SchemaNameS == schemaRoute.SchemaNameS { - if _, ok := taskTablesMap[smt.TableNameS]; !ok { - err = model.Transaction(dmt.Ctx, func(txnCtx context.Context) error { - err = model.GetIDataCompareSummaryRW().DeleteDataCompareSummary(txnCtx, &task.DataCompareSummary{ - TaskName: smt.TaskName, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - }) - if err != nil { - return err - } - err = model.GetIDataCompareTaskRW().DeleteDataCompareTask(txnCtx, &task.DataCompareTask{ - TaskName: smt.TaskName, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - continue - } - var summary *task.DataCompareSummary - - summary, err = model.GetIDataCompareSummaryRW().GetDataCompareSummary(dmt.Ctx, &task.DataCompareSummary{ - TaskName: smt.TaskName, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - }) - if err != nil { - return err - } - - if int64(summary.ChunkTotals) != smt.ChunkTotals { - err = model.Transaction(dmt.Ctx, func(txnCtx context.Context) error { - err = model.GetIDataCompareSummaryRW().DeleteDataCompareSummary(txnCtx, &task.DataCompareSummary{ - TaskName: smt.TaskName, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - }) - if err != nil { - return err - } - err = model.GetIDataCompareTaskRW().DeleteDataCompareTask(txnCtx, &task.DataCompareTask{ - TaskName: smt.TaskName, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - continue - } - - repeatInitTableMap[smt.TableNameS] = struct{}{} - } - } - } - - databaseTableTypeMap, err = databaseS.GetDatabaseTableType(schemaRoute.SchemaNameS) - if err != nil { - return err - } - - switch dmt.Task.TaskFlow { - case constant.TaskFlowOracleToTiDB: - globalScn, err := databaseS.GetDatabaseConsistentPos() - if err != nil { - return err - } - - if dmt.TaskParams.EnableConsistentRead { - globalScnS = strconv.FormatUint(globalScn, 10) - } - if !dmt.TaskParams.EnableConsistentRead && !strings.EqualFold(dmt.TaskParams.ConsistentReadPointS, "") { - globalScnS = dmt.TaskParams.ConsistentReadPointS - } - if !strings.EqualFold(dmt.TaskParams.ConsistentReadPointT, "") { - globalScnT = dmt.TaskParams.ConsistentReadPointT - } - case constant.TaskFlowOracleToMySQL: - globalScn, err := databaseS.GetDatabaseConsistentPos() - if err != nil { - return err - } - - if dmt.TaskParams.EnableConsistentRead { - globalScnS = strconv.FormatUint(globalScn, 10) - } - if !dmt.TaskParams.EnableConsistentRead && !strings.EqualFold(dmt.TaskParams.ConsistentReadPointS, "") { - globalScnS = dmt.TaskParams.ConsistentReadPointS - } - // ignore params dmt.TaskParams.ConsistentReadPointT, mysql database is not support - case constant.TaskFlowTiDBToOracle: - if !strings.EqualFold(dmt.TaskParams.ConsistentReadPointS, "") { - globalScnS = dmt.TaskParams.ConsistentReadPointS - } - - if !strings.EqualFold(dmt.TaskParams.ConsistentReadPointT, "") { - globalScnT = dmt.TaskParams.ConsistentReadPointT - } - case constant.TaskFlowMySQLToOracle: - // ignore params dmt.TaskParams.ConsistentReadPointS, mysql database is not support - - if !strings.EqualFold(dmt.TaskParams.ConsistentReadPointT, "") { - globalScnT = dmt.TaskParams.ConsistentReadPointT - } - } - - // database tables - // init database table - dbTypeSli := stringutil.StringSplit(dmt.Task.TaskFlow, constant.StringSeparatorAite) - dbTypeS := dbTypeSli[0] - dbTypeT := dbTypeSli[1] - - logger.Info("data compare task init", - zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) - - g, gCtx := errgroup.WithContext(dmt.Ctx) - g.SetLimit(int(dmt.TaskParams.TableThread)) - - for _, taskJob := range databaseTaskTables { - sourceTable := taskJob - g.Go(func() error { - select { - case <-gCtx.Done(): - return gCtx.Err() - default: - startTime := time.Now() - if _, ok := repeatInitTableMap[sourceTable]; ok { - // skip - return nil - } - tableRows, err := databaseS.GetDatabaseTableRows(schemaRoute.SchemaNameS, sourceTable) - if err != nil { - return err - } - tableSize, err := databaseS.GetDatabaseTableSize(schemaRoute.SchemaNameS, sourceTable) - if err != nil { - return err - } - - dataRule := &processor.DataCompareRule{ - Ctx: gCtx, - TaskMode: dmt.Task.TaskMode, - TaskName: dmt.Task.TaskName, - TaskFlow: dmt.Task.TaskFlow, - DatabaseS: databaseS, - DatabaseT: databaseT, - SchemaNameS: schemaRoute.SchemaNameS, - TableNameS: sourceTable, - TableTypeS: databaseTableTypeMap, - OnlyDatabaseCompareRow: dmt.TaskParams.OnlyCompareRow, - DisableDatabaseCompareMd5: dmt.TaskParams.DisableMd5Checksum, - DBCharsetS: dmt.DatasourceS.ConnectCharset, - DBCharsetT: dmt.DatasourceT.ConnectCharset, - CaseFieldRuleS: dmt.Task.CaseFieldRuleS, - CaseFieldRuleT: dmt.Task.CaseFieldRuleT, - GlobalSqlHintS: dmt.TaskParams.SqlHintS, - GlobalSqlHintT: dmt.TaskParams.SqlHintT, - GlobalIgnoreConditionFields: dmt.TaskParams.IgnoreConditionFields, - } - - attsRule, err := database.IDataCompareAttributesRule(dataRule) - if err != nil { - return err - } - - logger.Info("data compare task init table start", - zap.String("task_name", dmt.Task.TaskName), - zap.String("task_mode", dmt.Task.TaskMode), - zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS)) - - upstreamCons, err := databaseS.GetDatabaseTableHighestSelectivityIndex(attsRule.SchemaNameS, attsRule.TableNameS, attsRule.CompareConditionFieldS, attsRule.IgnoreConditionFields) - if err != nil { - return err - } - - logger.Debug("data compare task init table chunk", - zap.String("task_name", dmt.Task.TaskName), - zap.String("task_mode", dmt.Task.TaskMode), - zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.Any("upstream bucket", upstreamCons)) - - // upstream bucket ranges - upstreamConsNew, upstreamBuckets, err := processor.ProcessUpstreamDatabaseTableColumnStatisticsBucket( - dbTypeS, - stringutil.StringUpper(dmt.DatasourceS.ConnectCharset), - dmt.Task.CaseFieldRuleS, - databaseS, - attsRule.SchemaNameS, - attsRule.TableNameS, - upstreamCons, - int64(dmt.TaskParams.ChunkSize), - dmt.TaskParams.EnableCollationSetting) - if err != nil { - return err - } - - if len(upstreamBuckets) == 0 { - var encChunkS, encChunkT []byte - if !strings.EqualFold(attsRule.CompareConditionRangeS, "") { - encChunkS = snappy.Encode(nil, []byte(attsRule.CompareConditionRangeS)) - } else { - encChunkS = snappy.Encode(nil, []byte("1 = 1")) - } - if !strings.EqualFold(attsRule.CompareConditionRangeT, "") { - encChunkT = snappy.Encode(nil, []byte(attsRule.CompareConditionRangeT)) - } else { - encChunkT = snappy.Encode(nil, []byte("1 = 1")) - } - - logger.Warn("data compare task init table chunk", - zap.String("task_name", dmt.Task.TaskName), - zap.String("task_mode", dmt.Task.TaskMode), - zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.Any("upstream bucket new", upstreamConsNew), - zap.Any("upstream bucket range", string(encChunkS))) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - encryptChunkT, err := stringutil.Encrypt(stringutil.BytesToString(encChunkT), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - err = model.Transaction(gCtx, func(txnCtx context.Context) error { - _, err = model.GetIDataCompareTaskRW().CreateDataCompareTask(txnCtx, &task.DataCompareTask{ - TaskName: dmt.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScnS, - SnapshotPointT: globalScnT, - CompareMethod: attsRule.CompareMethod, - ColumnDetailSO: attsRule.ColumnDetailSO, - ColumnDetailS: attsRule.ColumnDetailS, - ColumnDetailTO: attsRule.ColumnDetailTO, - ColumnDetailT: attsRule.ColumnDetailT, - SqlHintS: attsRule.SqlHintS, - SqlHintT: attsRule.SqlHintT, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: "", - ChunkDetailT: encryptChunkT, - ChunkDetailArgT: "", - ConsistentReadS: strconv.FormatBool(dmt.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - if err != nil { - return err - } - _, err = model.GetIDataCompareSummaryRW().CreateDataCompareSummary(txnCtx, &task.DataCompareSummary{ - TaskName: dmt.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - SnapshotPointS: globalScnS, - SnapshotPointT: globalScnT, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: 1, - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - return nil - } - - logger.Debug("data compare task init table chunk", - zap.String("task_name", dmt.Task.TaskName), - zap.String("task_mode", dmt.Task.TaskMode), - zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.Any("upstream bucket new", upstreamConsNew), - zap.Any("upstream bucket range", upstreamBuckets)) - - columnDatatypeSliT, err := processor.GetDownstreamDatabaseTableColumnDatatype(attsRule.SchemaNameT, attsRule.TableNameT, databaseT, upstreamConsNew.IndexColumn, attsRule.ColumnNameRouteRule) - if err != nil { - return err - } - - logger.Debug("data compare task init table chunk", - zap.String("task_name", dmt.Task.TaskName), - zap.String("task_mode", dmt.Task.TaskMode), - zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.Any("upstream bucket new", upstreamConsNew), - zap.Any("downstream route rule", attsRule.ColumnNameRouteRule), - zap.Any("downstream column datatype", columnDatatypeSliT)) - - downstreamConsRule, err := processor.ReverseUpstreamHighestBucketDownstreamRule(dmt.Task.TaskFlow, dbTypeT, stringutil.StringUpper(dmt.DatasourceS.ConnectCharset), columnDatatypeSliT, upstreamConsNew, attsRule.ColumnNameRouteRule) - if err != nil { - return err - } - - logger.Debug("data compare task init table chunk", - zap.String("task_name", dmt.Task.TaskName), - zap.String("task_mode", dmt.Task.TaskMode), - zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.Any("downstream column rule", downstreamConsRule)) - - // downstream bucket ranges - downstreamBuckets, err := processor.ProcessDownstreamDatabaseTableColumnStatisticsBucket(dbTypeT, stringutil.StringUpper(dmt.DatasourceT.ConnectCharset), upstreamBuckets, downstreamConsRule) - if err != nil { - return err - } - - logger.Debug("data compare task init table chunk", - zap.String("task_name", dmt.Task.TaskName), - zap.String("task_mode", dmt.Task.TaskMode), - zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.Any("downstream bucket range", downstreamBuckets)) - - var metas []*task.DataCompareTask - for i, r := range upstreamBuckets { - toStringS, toStringArgsS := r.ToString() - toStringT, toStringArgsT := downstreamBuckets[i].ToString() - - if !strings.EqualFold(attsRule.CompareConditionRangeS, "") { - toStringS = fmt.Sprintf("%s AND (%s)", toStringS, attsRule.CompareConditionRangeS) - } - if !strings.EqualFold(attsRule.CompareConditionRangeT, "") { - toStringT = fmt.Sprintf("%s AND (%s)", toStringT, attsRule.CompareConditionRangeT) - } - - encChunkS := snappy.Encode(nil, []byte(toStringS)) - encChunkT := snappy.Encode(nil, []byte(toStringT)) - - encryptChunkS, err := stringutil.Encrypt(stringutil.BytesToString(encChunkS), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - encryptChunkT, err := stringutil.Encrypt(stringutil.BytesToString(encChunkT), []byte(constant.DefaultDataEncryptDecryptKey)) - if err != nil { - return err - } - - var argsS, argsT string - if toStringArgsS != nil { - argsS, err = stringutil.MarshalJSON(toStringArgsS) - if err != nil { - return err - } - } - if toStringArgsT != nil { - argsT, err = stringutil.MarshalJSON(toStringArgsT) - if err != nil { - return err - } - } - - metas = append(metas, &task.DataCompareTask{ - TaskName: dmt.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - TableTypeS: attsRule.TableTypeS, - SnapshotPointS: globalScnS, - SnapshotPointT: globalScnT, - CompareMethod: attsRule.CompareMethod, - ColumnDetailSO: attsRule.ColumnDetailSO, - ColumnDetailS: attsRule.ColumnDetailS, - ColumnDetailTO: attsRule.ColumnDetailTO, - ColumnDetailT: attsRule.ColumnDetailT, - SqlHintS: attsRule.SqlHintS, - SqlHintT: attsRule.SqlHintT, - ChunkID: uuid.New().String(), - ChunkDetailS: encryptChunkS, - ChunkDetailArgS: argsS, - ChunkDetailT: encryptChunkT, - ChunkDetailArgT: argsT, - ConsistentReadS: strconv.FormatBool(dmt.TaskParams.EnableConsistentRead), - TaskStatus: constant.TaskDatabaseStatusWaiting, - }) - } - - err = model.GetIDataCompareTaskRW().CreateInBatchDataCompareTask(gCtx, metas, int(dmt.TaskParams.WriteThread), int(dmt.TaskParams.BatchSize)) - if err != nil { - return err - } - _, err = model.GetIDataCompareSummaryRW().CreateDataCompareSummary(gCtx, &task.DataCompareSummary{ - TaskName: dmt.Task.TaskName, - SchemaNameS: attsRule.SchemaNameS, - TableNameS: attsRule.TableNameS, - SchemaNameT: attsRule.SchemaNameT, - TableNameT: attsRule.TableNameT, - SnapshotPointS: globalScnS, - SnapshotPointT: globalScnT, - TableRowsS: tableRows, - TableSizeS: tableSize, - ChunkTotals: uint64(len(upstreamBuckets)), - }) - if err != nil { - return err - } - - logger.Info("data compare task init table finished", - zap.String("task_name", dmt.Task.TaskName), - zap.String("task_mode", dmt.Task.TaskMode), - zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("schema_name_s", attsRule.SchemaNameS), - zap.String("table_name_s", attsRule.TableNameS), - zap.String("cost", time.Now().Sub(startTime).String())) - return nil - } - }) - } - - if err = g.Wait(); err != nil { - logger.Error("data compare task init", - zap.String("task_name", dmt.Task.TaskName), - zap.String("task_mode", dmt.Task.TaskMode), - zap.String("task_flow", dmt.Task.TaskFlow), - zap.String("schema_name_s", schemaRoute.SchemaNameS), - zap.Error(err)) - return err - } - _, err = model.GetITaskRW().UpdateTask(dmt.Ctx, &task.Task{TaskName: dmt.Task.TaskName}, map[string]interface{}{"TaskInit": constant.TaskInitStatusFinished}) - if err != nil { - return err - } + logger.Info("data compare task", + zap.String("task_name", dmt.Task.TaskName), + zap.String("task_mode", dmt.Task.TaskMode), + zap.String("task_flow", dmt.Task.TaskFlow), + zap.String("cost", time.Now().Sub(schemaTaskTime).String())) return nil } diff --git a/database/taskflow/data_scan.go b/database/taskflow/data_scan.go new file mode 100644 index 0000000..3c98c3f --- /dev/null +++ b/database/taskflow/data_scan.go @@ -0,0 +1,106 @@ +/* +Copyright © 2020 Marvin + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package taskflow + +import ( + "context" + "fmt" + "github.com/wentaojin/dbms/database" + "github.com/wentaojin/dbms/database/processor" + "github.com/wentaojin/dbms/logger" + "github.com/wentaojin/dbms/model" + "github.com/wentaojin/dbms/model/datasource" + "github.com/wentaojin/dbms/model/rule" + "github.com/wentaojin/dbms/model/task" + "github.com/wentaojin/dbms/proto/pb" + "github.com/wentaojin/dbms/utils/constant" + "github.com/wentaojin/dbms/utils/stringutil" + "go.uber.org/zap" + "time" +) + +type DataScanTask struct { + Ctx context.Context + Task *task.Task + DatasourceS *datasource.Datasource + DatasourceT *datasource.Datasource + TaskParams *pb.DataScanParam +} + +func (dst *DataScanTask) Start() error { + schemaTaskTime := time.Now() + logger.Info("data scan task get schema route", + zap.String("task_name", dst.Task.TaskName), zap.String("task_mode", dst.Task.TaskMode), zap.String("task_flow", dst.Task.TaskFlow)) + schemaNameRoute, err := model.GetIMigrateSchemaRouteRW().GetSchemaRouteRule(dst.Ctx, &rule.SchemaRouteRule{TaskName: dst.Task.TaskName}) + if err != nil { + return err + } + schemaNameS := schemaNameRoute.SchemaNameS + + logger.Info("data scan task init database connection", + zap.String("task_name", dst.Task.TaskName), zap.String("task_mode", dst.Task.TaskMode), zap.String("task_flow", dst.Task.TaskFlow)) + var ( + databaseS database.IDatabase + ) + switch dst.Task.TaskFlow { + case constant.TaskFlowOracleToMySQL, constant.TaskFlowOracleToTiDB: + databaseS, err = database.NewDatabase(dst.Ctx, dst.DatasourceS, schemaNameS, int64(dst.TaskParams.CallTimeout)) + if err != nil { + return err + } + defer databaseS.Close() + + logger.Info("data scan task inspect migrate task", + zap.String("task_name", dst.Task.TaskName), zap.String("task_mode", dst.Task.TaskMode), zap.String("task_flow", dst.Task.TaskFlow)) + + _, err = processor.InspectOracleMigrateTask(dst.Task.TaskName, dst.Task.TaskFlow, dst.Task.TaskMode, databaseS, stringutil.StringUpper(dst.DatasourceS.ConnectCharset), stringutil.StringUpper(dst.DatasourceT.ConnectCharset)) + if err != nil { + return err + } + default: + return fmt.Errorf("the task_name [%s] task_mode [%s] task_flow [%s] schema_name_s [%s] isn't support, please contact author or reselect", dst.Task.TaskName, dst.Task.TaskMode, dst.Task.TaskFlow, schemaNameS) + } + + dbVersionS, err := databaseS.GetDatabaseVersion() + if err != nil { + return err + } + dbRoles, err := databaseS.GetDatabaseRole() + if err != nil { + return err + } + err = database.IDatabaseRun(dst.Ctx, &processor.DataScanTask{ + Ctx: dst.Ctx, + Task: dst.Task, + DatabaseS: databaseS, + SchemaNameS: schemaNameS, + DBVersionS: dbVersionS, + DBRoleS: dbRoles, + DBCharsetS: stringutil.StringUpper(dst.DatasourceS.ConnectCharset), + TaskParams: dst.TaskParams, + WaiterC: make(chan *processor.WaitingRecs, constant.DefaultMigrateTaskQueueSize), + ResumeC: make(chan *processor.WaitingRecs, constant.DefaultMigrateTaskQueueSize), + }) + if err != nil { + return err + } + logger.Info("data scan task", + zap.String("task_name", dst.Task.TaskName), + zap.String("task_mode", dst.Task.TaskMode), + zap.String("task_flow", dst.Task.TaskFlow), + zap.String("cost", time.Now().Sub(schemaTaskTime).String())) + return nil +} diff --git a/database/taskflow/sql_migrate.go b/database/taskflow/sql_migrate.go new file mode 100644 index 0000000..f9f0f74 --- /dev/null +++ b/database/taskflow/sql_migrate.go @@ -0,0 +1,115 @@ +/* +Copyright © 2020 Marvin + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package taskflow + +import ( + "context" + "fmt" + "github.com/wentaojin/dbms/database/processor" + "github.com/wentaojin/dbms/utils/constant" + "time" + + "github.com/wentaojin/dbms/database" + "github.com/wentaojin/dbms/logger" + "github.com/wentaojin/dbms/model/datasource" + "github.com/wentaojin/dbms/model/task" + "github.com/wentaojin/dbms/proto/pb" + "github.com/wentaojin/dbms/utils/stringutil" + "go.uber.org/zap" +) + +type SqlMigrateTask struct { + Ctx context.Context + Task *task.Task + DatasourceS *datasource.Datasource + DatasourceT *datasource.Datasource + TaskParams *pb.SqlMigrateParam +} + +func (smt *SqlMigrateTask) Start() error { + schemaStartTime := time.Now() + logger.Info("sql migrate task init database connection", + zap.String("task_name", smt.Task.TaskName), + zap.String("task_mode", smt.Task.TaskMode), + zap.String("task_flow", smt.Task.TaskFlow)) + + var ( + databaseS, databaseT database.IDatabase + err error + ) + switch smt.Task.TaskFlow { + case constant.TaskFlowOracleToMySQL, constant.TaskFlowOracleToTiDB: + databaseS, err = database.NewDatabase(smt.Ctx, smt.DatasourceS, "", int64(smt.TaskParams.CallTimeout)) + if err != nil { + return err + } + defer databaseS.Close() + + databaseT, err = database.NewDatabase(smt.Ctx, smt.DatasourceT, "", int64(smt.TaskParams.CallTimeout)) + if err != nil { + return err + } + defer databaseT.Close() + + logger.Info("sql migrate task inspect migrate task", + zap.String("task_name", smt.Task.TaskName), + zap.String("task_mode", smt.Task.TaskMode), + zap.String("task_flow", smt.Task.TaskFlow)) + _, err = processor.InspectOracleMigrateTask(smt.Task.TaskName, smt.Task.TaskFlow, smt.Task.TaskMode, databaseS, stringutil.StringUpper(smt.DatasourceS.ConnectCharset), stringutil.StringUpper(smt.DatasourceT.ConnectCharset)) + if err != nil { + return err + } + + logger.Info("sql migrate task init migrate task", + zap.String("task_name", smt.Task.TaskName), + zap.String("task_mode", smt.Task.TaskMode), + zap.String("task_flow", smt.Task.TaskFlow)) + + default: + return fmt.Errorf("the task_name [%s] task_mode [%s] task_flow [%s] isn't support, please contact author or reselect", smt.Task.TaskName, smt.Task.TaskMode, smt.Task.TaskFlow) + } + + dbVersionS, err := databaseS.GetDatabaseVersion() + if err != nil { + return err + } + dbRoles, err := databaseS.GetDatabaseRole() + if err != nil { + return err + } + + err = database.IDatabaseRun(smt.Ctx, &processor.SqlMigrateTask{ + Ctx: smt.Ctx, + Task: smt.Task, + DatabaseS: databaseS, + DatabaseT: databaseT, + DBRoleS: dbRoles, + DBCharsetS: stringutil.StringUpper(smt.DatasourceS.ConnectCharset), + DBCharsetT: stringutil.StringUpper(smt.DatasourceT.ConnectCharset), + DBVersionS: dbVersionS, + TaskParams: smt.TaskParams, + }) + if err != nil { + return err + } + schemaEndTime := time.Now() + logger.Info("sql migrate task", + zap.String("task_name", smt.Task.TaskName), + zap.String("task_mode", smt.Task.TaskMode), + zap.String("task_flow", smt.Task.TaskFlow), + zap.String("cost", schemaEndTime.Sub(schemaStartTime).String())) + return nil +} diff --git a/database/taskflow/stmt_migrate.go b/database/taskflow/stmt_migrate.go new file mode 100644 index 0000000..eca6ebd --- /dev/null +++ b/database/taskflow/stmt_migrate.go @@ -0,0 +1,128 @@ +/* +Copyright © 2020 Marvin + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package taskflow + +import ( + "context" + "fmt" + "github.com/wentaojin/dbms/database/processor" + "time" + + "github.com/wentaojin/dbms/database" + "github.com/wentaojin/dbms/logger" + "github.com/wentaojin/dbms/model" + "github.com/wentaojin/dbms/model/datasource" + "github.com/wentaojin/dbms/model/rule" + "github.com/wentaojin/dbms/model/task" + "github.com/wentaojin/dbms/proto/pb" + "github.com/wentaojin/dbms/utils/constant" + "github.com/wentaojin/dbms/utils/stringutil" + "go.uber.org/zap" +) + +type StmtMigrateTask struct { + Ctx context.Context + Task *task.Task + DatasourceS *datasource.Datasource + DatasourceT *datasource.Datasource + TaskParams *pb.StatementMigrateParam + + WaiterC chan *processor.WaitingRecs + ResumeC chan *processor.WaitingRecs +} + +func (stm *StmtMigrateTask) Start() error { + schemaTaskTime := time.Now() + logger.Info("stmt migrate task get schema route", + zap.String("task_name", stm.Task.TaskName), zap.String("task_mode", stm.Task.TaskMode), zap.String("task_flow", stm.Task.TaskFlow)) + schemaRoute, err := model.GetIMigrateSchemaRouteRW().GetSchemaRouteRule(stm.Ctx, &rule.SchemaRouteRule{TaskName: stm.Task.TaskName}) + if err != nil { + return err + } + + logger.Info("stmt migrate task init database connection", + zap.String("task_name", stm.Task.TaskName), zap.String("task_mode", stm.Task.TaskMode), zap.String("task_flow", stm.Task.TaskFlow)) + + var ( + databaseS, databaseT database.IDatabase + ) + switch stm.Task.TaskFlow { + case constant.TaskFlowOracleToMySQL, constant.TaskFlowOracleToTiDB: + databaseS, err = database.NewDatabase(stm.Ctx, stm.DatasourceS, schemaRoute.SchemaNameS, int64(stm.TaskParams.CallTimeout)) + if err != nil { + return err + } + defer databaseS.Close() + databaseT, err = database.NewDatabase(stm.Ctx, stm.DatasourceT, "", int64(stm.TaskParams.CallTimeout)) + if err != nil { + return err + } + defer databaseT.Close() + + logger.Info("stmt migrate task inspect migrate task", + zap.String("task_name", stm.Task.TaskName), zap.String("task_mode", stm.Task.TaskMode), zap.String("task_flow", stm.Task.TaskFlow)) + _, err = processor.InspectOracleMigrateTask(stm.Task.TaskName, stm.Task.TaskFlow, stm.Task.TaskMode, databaseS, + stringutil.StringUpper(stm.DatasourceS.ConnectCharset), + stringutil.StringUpper(stm.DatasourceT.ConnectCharset)) + if err != nil { + return err + } + default: + return fmt.Errorf("the task_name [%s] task_mode [%s] task_flow [%s] schema_name_s [%s] isn't support, please contact author or reselect", stm.Task.TaskName, stm.Task.TaskMode, stm.Task.TaskFlow, schemaRoute.SchemaNameS) + } + + dbVersionS, err := databaseS.GetDatabaseVersion() + if err != nil { + return err + } + dbRoles, err := databaseS.GetDatabaseRole() + if err != nil { + return err + } + + err = database.IDatabaseRun(stm.Ctx, &processor.DataMigrateTask{ + Ctx: stm.Ctx, + Task: stm.Task, + DBRoleS: dbRoles, + DBVersionS: dbVersionS, + DBCharsetS: stringutil.StringUpper(stm.DatasourceS.ConnectCharset), + DBCharsetT: stringutil.StringUpper(stm.DatasourceT.ConnectCharset), + DatabaseS: databaseS, + DatabaseT: databaseT, + SchemaNameS: schemaRoute.SchemaNameS, + TableThread: stm.TaskParams.TableThread, + GlobalSqlHintS: stm.TaskParams.SqlHintS, + GlobalSqlHintT: stm.TaskParams.SqlHintT, + EnableCheckpoint: stm.TaskParams.EnableCheckpoint, + EnableConsistentRead: stm.TaskParams.EnableConsistentRead, + ChunkSize: stm.TaskParams.ChunkSize, + BatchSize: stm.TaskParams.BatchSize, + WriteThread: stm.TaskParams.WriteThread, + CallTimeout: stm.TaskParams.CallTimeout, + SqlThreadS: stm.TaskParams.SqlThreadS, + StmtParams: stm.TaskParams, + WaiterC: make(chan *processor.WaitingRecs, constant.DefaultMigrateTaskQueueSize), + ResumeC: make(chan *processor.WaitingRecs, constant.DefaultMigrateTaskQueueSize), + }) + if err != nil { + return err + } + + logger.Info("data migrate task", + zap.String("task_name", stm.Task.TaskName), zap.String("task_mode", stm.Task.TaskMode), zap.String("task_flow", stm.Task.TaskFlow), + zap.String("cost", time.Now().Sub(schemaTaskTime).String())) + return nil +} diff --git a/database/taskflow/struct_compare.go b/database/taskflow/struct_compare.go new file mode 100644 index 0000000..26ac2f2 --- /dev/null +++ b/database/taskflow/struct_compare.go @@ -0,0 +1,125 @@ +/* +Copyright © 2020 Marvin + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package taskflow + +import ( + "context" + "fmt" + "time" + + "github.com/wentaojin/dbms/database/processor" + + "github.com/wentaojin/dbms/database" + "github.com/wentaojin/dbms/logger" + "github.com/wentaojin/dbms/model" + "github.com/wentaojin/dbms/model/datasource" + "github.com/wentaojin/dbms/model/rule" + "github.com/wentaojin/dbms/model/task" + "github.com/wentaojin/dbms/proto/pb" + "github.com/wentaojin/dbms/utils/constant" + "github.com/wentaojin/dbms/utils/stringutil" + "go.uber.org/zap" +) + +type StructCompareTask struct { + Ctx context.Context + Task *task.Task + DatasourceS *datasource.Datasource + DatasourceT *datasource.Datasource + TaskParams *pb.StructCompareParam +} + +func (dmt *StructCompareTask) Start() error { + startTime := time.Now() + logger.Info("struct compare task get schema route", + zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) + schemaRoute, err := model.GetIMigrateSchemaRouteRW().GetSchemaRouteRule(dmt.Ctx, &rule.SchemaRouteRule{TaskName: dmt.Task.TaskName}) + if err != nil { + return err + } + + dbTypeSli := stringutil.StringSplit(dmt.Task.TaskFlow, constant.StringSeparatorAite) + buildInDatatypeRulesS, err := model.GetIBuildInDatatypeRuleRW().QueryBuildInDatatypeRule(dmt.Ctx, dbTypeSli[0], dbTypeSli[1]) + if err != nil { + return err + } + buildInDefaultValueRulesS, err := model.GetBuildInDefaultValueRuleRW().QueryBuildInDefaultValueRule(dmt.Ctx, dbTypeSli[0], dbTypeSli[1]) + if err != nil { + return err + } + buildInDatatypeRulesT, err := model.GetIBuildInDatatypeRuleRW().QueryBuildInDatatypeRule(dmt.Ctx, dbTypeSli[1], dbTypeSli[0]) + if err != nil { + return err + } + buildInDefaultValueRulesT, err := model.GetBuildInDefaultValueRuleRW().QueryBuildInDefaultValueRule(dmt.Ctx, dbTypeSli[1], dbTypeSli[0]) + if err != nil { + return err + } + + logger.Info("struct compare task init database connection", + zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) + var ( + databaseS, databaseT database.IDatabase + ) + switch dmt.Task.TaskFlow { + case constant.TaskFlowOracleToTiDB, constant.TaskFlowOracleToMySQL: + databaseS, err = database.NewDatabase(dmt.Ctx, dmt.DatasourceS, schemaRoute.SchemaNameS, int64(dmt.TaskParams.CallTimeout)) + if err != nil { + return err + } + defer databaseS.Close() + databaseT, err = database.NewDatabase(dmt.Ctx, dmt.DatasourceT, "", int64(dmt.TaskParams.CallTimeout)) + if err != nil { + return err + } + defer databaseT.Close() + + logger.Info("struct compare task inspect migrate task", + zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow)) + _, err = processor.InspectOracleMigrateTask(dmt.Task.TaskName, dmt.Task.TaskFlow, dmt.Task.TaskMode, databaseS, stringutil.StringUpper(dmt.DatasourceS.ConnectCharset), stringutil.StringUpper(dmt.DatasourceT.ConnectCharset)) + if err != nil { + return err + } + default: + return fmt.Errorf("the task_name [%s] task_mode [%s] task_flow [%s] schema [%s] isn't support, please contact author or reselect", dmt.Task.TaskName, dmt.Task.TaskMode, dmt.Task.TaskFlow, schemaRoute.SchemaNameS) + } + + err = database.IDatabaseRun(dmt.Ctx, &processor.StructCompareTask{ + Ctx: dmt.Ctx, + Task: dmt.Task, + DBTypeS: dbTypeSli[0], + DBTypeT: dbTypeSli[1], + DatabaseS: databaseS, + DatabaseT: databaseT, + DBCharsetS: stringutil.StringUpper(dmt.DatasourceS.ConnectCharset), + DBCharsetT: stringutil.StringUpper(dmt.DatasourceT.ConnectCharset), + SchemaNameS: schemaRoute.SchemaNameS, + SchemaNameT: schemaRoute.SchemaNameT, + StartTime: startTime, + TaskParams: dmt.TaskParams, + BuildInDatatypeRulesS: buildInDatatypeRulesS, + BuildInDefaultValueRulesS: buildInDefaultValueRulesS, + BuildInDatatypeRulesT: buildInDatatypeRulesT, + BuildInDefaultValueRulesT: buildInDefaultValueRulesT, + }) + if err != nil { + return err + } + logger.Info("struct compare task finished", + zap.String("task_name", dmt.Task.TaskName), zap.String("task_mode", dmt.Task.TaskMode), zap.String("task_flow", dmt.Task.TaskFlow), + zap.String("cost", time.Now().Sub(startTime).String())) + return nil +} diff --git a/database/taskflow/struct_migrate.go b/database/taskflow/struct_migrate.go index e2b3c00..edd642a 100644 --- a/database/taskflow/struct_migrate.go +++ b/database/taskflow/struct_migrate.go @@ -20,14 +20,8 @@ import ( "fmt" "github.com/wentaojin/dbms/database/processor" "github.com/wentaojin/dbms/model/datasource" - "github.com/wentaojin/dbms/model/rule" - "strings" "time" - "github.com/wentaojin/dbms/errconcurrent" - - "github.com/wentaojin/dbms/model/buildin" - "github.com/wentaojin/dbms/logger" "go.uber.org/zap" @@ -51,7 +45,7 @@ type StructMigrateTask struct { } func (st *StructMigrateTask) Start() error { - schemaStartTime := time.Now() + startTime := time.Now() var ( databaseS, databaseT database.IDatabase err error @@ -90,13 +84,6 @@ func (st *StructMigrateTask) Start() error { return err } - logger.Info("struct migrate task init task information", - zap.String("task_name", st.Task.TaskName), zap.String("task_mode", st.Task.TaskMode), zap.String("task_flow", st.Task.TaskFlow)) - err = st.initStructMigrateTask(databaseS) - if err != nil { - return err - } - // process schema var createSchema string schemaCreateTime := time.Now() @@ -190,603 +177,35 @@ func (st *StructMigrateTask) Start() error { } } - logger.Info("struct migrate task get migrate tasks", - zap.String("task_name", st.Task.TaskName), - zap.String("task_mode", st.Task.TaskMode), - zap.String("task_flow", st.Task.TaskFlow)) - var migrateTasks []*task.StructMigrateTask - - err = model.Transaction(st.Ctx, func(txnCtx context.Context) error { - // get migrate task tables - migrateTasks, err = model.GetIStructMigrateTaskRW().QueryStructMigrateTask(txnCtx, - &task.StructMigrateTask{ - TaskName: st.Task.TaskName, - TaskStatus: constant.TaskDatabaseStatusWaiting, - Category: constant.DatabaseStructMigrateSqlTableCategory}) - if err != nil { - return err - } - migrateFailedTasks, err := model.GetIStructMigrateTaskRW().QueryStructMigrateTask(txnCtx, - &task.StructMigrateTask{ - TaskName: st.Task.TaskName, - TaskStatus: constant.TaskDatabaseStatusFailed, - Category: constant.DatabaseStructMigrateSqlTableCategory}) - if err != nil { - return err - } - migrateRunningTasks, err := model.GetIStructMigrateTaskRW().QueryStructMigrateTask(txnCtx, - &task.StructMigrateTask{ - TaskName: st.Task.TaskName, - TaskStatus: constant.TaskDatabaseStatusRunning, - Category: constant.DatabaseStructMigrateSqlTableCategory}) - if err != nil { - return err - } - migrateStopTasks, err := model.GetIStructMigrateTaskRW().QueryStructMigrateTask(txnCtx, - &task.StructMigrateTask{ - TaskName: st.Task.TaskName, - TaskStatus: constant.TaskDatabaseStatusStopped, - Category: constant.DatabaseStructMigrateSqlTableCategory}) - if err != nil { - return err - } - migrateTasks = append(migrateTasks, migrateFailedTasks...) - migrateTasks = append(migrateTasks, migrateRunningTasks...) - migrateTasks = append(migrateTasks, migrateStopTasks...) - return nil - }) - if err != nil { - return err - } - - logger.Info("struct migrate task process migrate tables", + logger.Info("struct migrate task process table", zap.String("task_name", st.Task.TaskName), zap.String("task_mode", st.Task.TaskMode), zap.String("task_flow", st.Task.TaskFlow)) - g := errconcurrent.NewGroup() - g.SetLimit(int(st.TaskParams.MigrateThread)) - - for _, job := range migrateTasks { - gTime := time.Now() - g.Go(job, gTime, func(job interface{}) error { - smt := job.(*task.StructMigrateTask) - err = st.structMigrateStart( - dbTypeS, - dbVersionS, - databaseS, - databaseT, - stringutil.StringUpper(st.DatasourceS.ConnectCharset), - stringutil.StringUpper(st.DatasourceT.ConnectCharset), - gTime, - smt, - buildInDatatypeRules, - buildInDefaultValueRules) - if err != nil { - return err - } - return nil - }) - } - for _, r := range g.Wait() { - if r.Err != nil { - smt := r.Task.(*task.StructMigrateTask) - logger.Warn("struct migrate task", - zap.String("task_name", st.Task.TaskName), - zap.String("task_mode", st.Task.TaskMode), - zap.String("task_flow", st.Task.TaskFlow), - zap.String("schema_name_s", smt.SchemaNameS), - zap.String("table_name_s", smt.TableNameS), - zap.Error(r.Err)) - - errW := model.Transaction(st.Ctx, func(txnCtx context.Context) error { - _, err = model.GetIStructMigrateTaskRW().UpdateStructMigrateTask(txnCtx, - &task.StructMigrateTask{TaskName: smt.TaskName, SchemaNameS: smt.SchemaNameS, TableNameS: smt.TableNameS}, - map[string]interface{}{ - "TaskStatus": constant.TaskDatabaseStatusFailed, - "Duration": fmt.Sprintf("%f", time.Now().Sub(r.Time).Seconds()), - "ErrorDetail": r.Err.Error(), - }) - if err != nil { - return err - } - _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ - TaskName: smt.TaskName, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - LogDetail: fmt.Sprintf("%v [%v] struct migrate task [%v] taskflow [%v] source table [%v.%v] failed, please see [struct_migrate_task] detail", - stringutil.CurrentTimeFormatString(), - stringutil.StringLower(st.Task.TaskMode), - smt.TaskName, - st.Task.TaskMode, - smt.SchemaNameS, - smt.TableNameS), - }) - if err != nil { - return err - } - return nil - }) - if errW != nil { - return errW - } - } - } - - // sequence migrate exclude struct_migrate_summary compute counts - err = st.sequenceMigrateStart(databaseS, databaseT) - if err != nil { - return err - } - - err = databaseS.Close() - if err != nil { - return err - } - err = databaseT.Close() - if err != nil { - return err - } - schemaEndTime := time.Now() - _, err = model.GetIStructMigrateSummaryRW().UpdateStructMigrateSummary(st.Ctx, - &task.StructMigrateSummary{ - TaskName: st.Task.TaskName, - SchemaNameS: st.SchemaNameS}, - map[string]interface{}{ - "Duration": fmt.Sprintf("%f", schemaEndTime.Sub(schemaStartTime).Seconds()), - }) - if err != nil { - return err - } - logger.Info("struct migrate task", - zap.String("task_name", st.Task.TaskName), - zap.String("task_mode", st.Task.TaskMode), - zap.String("task_flow", st.Task.TaskFlow), - zap.String("cost", schemaEndTime.Sub(schemaStartTime).String())) - return nil -} - -func (st *StructMigrateTask) structMigrateStart( - dbTypeS string, - dbVersionS string, - databaseS, - databaseT database.IDatabase, - dbCharsetS, - dbCharsetT string, - startTime time.Time, - smt *task.StructMigrateTask, - buildInDatatypeRules []*buildin.BuildinDatatypeRule, - buildInDefaultValueRules []*buildin.BuildinDefaultvalRule) error { - // if the schema table success, skip - if strings.EqualFold(smt.TaskStatus, constant.TaskDatabaseStatusSuccess) { - logger.Warn("struct migrate task process", - zap.String("task_name", st.Task.TaskName), - zap.String("task_mode", st.Task.TaskMode), - zap.String("task_flow", st.Task.TaskFlow), - zap.String("schema_name_s", smt.SchemaNameS), - zap.String("table_name_s", smt.TableNameS), - zap.String("task_status", constant.TaskDatabaseStatusSuccess), - zap.String("table task had done", "skip migrate"), - zap.String("cost", time.Now().Sub(startTime).String())) - return nil - } - // if the table is MATERIALIZED VIEW, SKIP - // MATERIALIZED VIEW isn't support struct migrate - if strings.EqualFold(smt.TableTypeS, constant.OracleDatabaseTableTypeMaterializedView) { - logger.Warn("struct migrate task process", - zap.String("task_name", st.Task.TaskName), - zap.String("task_mode", st.Task.TaskMode), - zap.String("task_flow", st.Task.TaskFlow), - zap.String("schema_name_s", smt.SchemaNameS), - zap.String("table_name_s", smt.TableNameS), - zap.String("table_type_s", smt.TableTypeS), - zap.String("suggest", "if necessary, please manually process the tables in the above list")) - zap.String("cost", time.Now().Sub(startTime).String()) - return nil - } - - err := model.Transaction(st.Ctx, func(txnCtx context.Context) error { - _, err := model.GetIStructMigrateTaskRW().UpdateStructMigrateTask(txnCtx, &task.StructMigrateTask{ - TaskName: smt.TaskName, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS}, - map[string]interface{}{ - "TaskStatus": constant.TaskDatabaseStatusRunning, - }) - if err != nil { - return err - } - _, err = model.GetITaskLogRW().CreateLog(txnCtx, &task.Log{ - TaskName: smt.TaskName, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - LogDetail: fmt.Sprintf("%v [%v] the worker task [%v] source table [%v.%v] starting", - stringutil.CurrentTimeFormatString(), - stringutil.StringLower(constant.TaskModeStructMigrate), - smt.TaskName, - smt.SchemaNameS, - smt.TableNameS), - }) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - sourceTime := time.Now() - datasourceS := &processor.Datasource{ - DBTypeS: dbTypeS, - DBVersionS: dbVersionS, - DatabaseS: databaseS, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - TableTypeS: smt.TableTypeS, - } - - attrs, err := database.IStructMigrateAttributes(datasourceS) - if err != nil { - return err - } - logger.Info("struct migrate task process", - zap.String("task_name", st.Task.TaskName), - zap.String("task_mode", st.Task.TaskMode), - zap.String("task_flow", st.Task.TaskFlow), - zap.String("schema_name_s", smt.SchemaNameS), - zap.String("table_name_s", smt.TableNameS), - zap.String("task_stage", "datasource"), - zap.String("datasource", datasourceS.String()), - zap.String("cost", time.Now().Sub(sourceTime).String())) - ruleTime := time.Now() - dataRule := &processor.StructMigrateRule{ + err = database.IDatabaseRun(st.Ctx, &processor.StructMigrateTask{ Ctx: st.Ctx, - TaskName: smt.TaskName, - TaskFlow: st.Task.TaskFlow, - SchemaNameS: smt.SchemaNameS, - TableNameS: smt.TableNameS, - TablePrimaryAttrs: attrs.PrimaryKey, - TableColumnsAttrs: attrs.TableColumns, - TableCommentAttrs: attrs.TableComment, - CreateIfNotExist: st.TaskParams.CreateIfNotExist, - CaseFieldRuleT: st.Task.CaseFieldRuleT, + Task: st.Task, + SchemaNameS: st.SchemaNameS, + SchemaNameT: st.SchemaNameT, + DatabaseS: databaseS, + DatabaseT: databaseT, + DBTypeS: dbTypeS, DBVersionS: dbVersionS, - DBCharsetS: dbCharsetS, - DBCharsetT: dbCharsetT, - BuildinDatatypeRules: buildInDatatypeRules, - BuildinDefaultValueRules: buildInDefaultValueRules, - } - - rules, err := database.IStructMigrateAttributesRule(dataRule) - if err != nil { - return err - } - logger.Info("struct migrate task process", - zap.String("task_name", st.Task.TaskName), - zap.String("task_mode", st.Task.TaskMode), - zap.String("task_flow", st.Task.TaskFlow), - zap.String("schema_name_s", smt.SchemaNameS), - zap.String("table_name_s", smt.TableNameS), - zap.String("task_stage", "rule"), - zap.String("rule", dataRule.String()), - zap.String("cost", time.Now().Sub(ruleTime).String())) - - tableTime := time.Now() - dataTable := &processor.StructMigrateTable{ - TaskName: smt.TaskName, - TaskFlow: st.Task.TaskFlow, - DatasourceS: datasourceS, - DBCharsetT: dbCharsetT, - TableAttributes: attrs, - TableAttributesRule: rules, - } - - tableStruct, err := database.IStructMigrateTableStructure(dataTable) - if err != nil { - return err - } - - logger.Info("struct migrate task process", - zap.String("task_name", st.Task.TaskName), - zap.String("task_mode", st.Task.TaskMode), - zap.String("task_flow", st.Task.TaskFlow), - zap.String("schema_name_s", smt.SchemaNameS), - zap.String("table_name_s", smt.TableNameS), - zap.String("task_stage", "struct"), - zap.String("struct", dataTable.String()), - zap.String("cost", time.Now().Sub(tableTime).String())) - - writerTime := time.Now() - var w database.IStructMigrateDatabaseWriter - w = processor.NewStructMigrateDatabase(st.Ctx, smt.TaskName, st.Task.TaskFlow, databaseT, startTime, tableStruct) - - if st.TaskParams.EnableDirectCreate { - err = w.SyncStructDatabase() - if err != nil { - return err - } - logger.Info("struct migrate task process", - zap.String("task_name", st.Task.TaskName), - zap.String("task_mode", st.Task.TaskMode), - zap.String("task_flow", st.Task.TaskFlow), - zap.String("schema_name_s", smt.SchemaNameS), - zap.String("table_name_s", smt.TableNameS), - zap.String("task_stage", "struct sync database"), - zap.String("cost", time.Now().Sub(writerTime).String())) - - return nil - } - - err = w.WriteStructDatabase() - if err != nil { - return err - } - logger.Info("struct migrate task process", - zap.String("task_name", st.Task.TaskName), - zap.String("task_mode", st.Task.TaskMode), - zap.String("task_flow", st.Task.TaskFlow), - zap.String("schema_name_s", smt.SchemaNameS), - zap.String("table_name_s", smt.TableNameS), - zap.String("task_stage", "struct write database"), - zap.String("cost", time.Now().Sub(writerTime).String())) - return nil -} - -func (st *StructMigrateTask) sequenceMigrateStart(databaseS, databaseT database.IDatabase) error { - startTime := time.Now() - logger.Info("sequence migrate task process", - zap.String("task_name", st.Task.TaskName), - zap.String("task_mode", st.Task.TaskMode), - zap.String("task_flow", st.Task.TaskFlow), - zap.String("schema_name_s", st.SchemaNameS), - zap.String("start_time", startTime.String())) - sequences, err := databaseS.GetDatabaseSequence(st.SchemaNameS) + DBCharsetS: stringutil.StringUpper(st.DatasourceS.ConnectCharset), + DBCharsetT: stringutil.StringUpper(st.DatasourceT.ConnectCharset), + StartTime: startTime, + BuildInDatatypeRules: buildInDatatypeRules, + BuildInDefaultValueRules: buildInDefaultValueRules, + TaskParams: st.TaskParams, + }) if err != nil { return err } - var seqCreates []string - for _, seq := range sequences { - lastNumber, err := stringutil.StrconvIntBitSize(seq["LAST_NUMBER"], 64) - if err != nil { - return err - } - cacheSize, err := stringutil.StrconvIntBitSize(seq["CACHE_SIZE"], 64) - if err != nil { - return err - } - // disable cache - if cacheSize == 0 { - lastNumber = lastNumber + 5000 - } else { - lastNumber = lastNumber + (cacheSize * 2) - } - - switch st.Task.TaskFlow { - case constant.TaskFlowOracleToMySQL, constant.TaskFlowOracleToTiDB: - if st.TaskParams.CreateIfNotExist { - seqCreates = append(seqCreates, fmt.Sprintf(`CREATE SEQUENCE IF NOT EXISTS %s.%s START %v INCREMENT %v MINVALUE %v MAXVALUE %v CACHE %v CYCLE %v;`, st.SchemaNameT, seq["SEQUENCE_NAME"], lastNumber, seq["INCREMENT_BY"], seq["MIN_VALUE"], seq["MAX_VALUE"], seq["CACHE_SIZE"], seq["CYCLE_FLAG"])) - } else { - seqCreates = append(seqCreates, fmt.Sprintf(`CREATE SEQUENCE %s.%s START %v INCREMENT %v MINVALUE %v MAXVALUE %v CACHE %v CYCLE %v;`, st.SchemaNameT, seq["SEQUENCE_NAME"], lastNumber, seq["INCREMENT_BY"], seq["MIN_VALUE"], seq["MAX_VALUE"], seq["CACHE_SIZE"], seq["CYCLE_FLAG"])) - } - default: - return fmt.Errorf("the task [%v] task_flow [%v] isn't support, please contact author or reselect", st.Task.TaskName, st.Task.TaskFlow) - } - } - - writerTime := time.Now() - var w database.ISequenceMigrateDatabaseWriter - w = processor.NewSequenceMigrateDatabase(st.Ctx, st.Task.TaskName, st.Task.TaskFlow, st.SchemaNameS, st.SchemaNameT, databaseT, startTime, seqCreates) - - if st.TaskParams.EnableDirectCreate { - err = w.SyncSequenceDatabase() - if err != nil { - return err - } - logger.Info("sequence migrate task process", - zap.String("task_name", st.Task.TaskName), - zap.String("task_mode", st.Task.TaskMode), - zap.String("task_flow", st.Task.TaskFlow), - zap.String("schema_name_s", st.SchemaNameS), - zap.String("task_stage", "struct sequence database"), - zap.String("cost", time.Now().Sub(writerTime).String())) - return nil - } - - err = w.WriteSequenceDatabase() - if err != nil { - return err - } - logger.Info("sequence migrate task process", + endTime := time.Now() + logger.Info("struct migrate task", zap.String("task_name", st.Task.TaskName), zap.String("task_mode", st.Task.TaskMode), zap.String("task_flow", st.Task.TaskFlow), - zap.String("schema_name_s", st.SchemaNameS), - zap.String("task_stage", "struct sequence database"), - zap.String("cost", time.Now().Sub(writerTime).String())) - return nil -} - -func (st *StructMigrateTask) initStructMigrateTask(databaseS database.IDatabase) error { - // delete checkpoint - initFlags, err := model.GetITaskRW().GetTask(st.Ctx, &task.Task{TaskName: st.Task.TaskName}) - if err != nil { - return err - } - if !st.TaskParams.EnableCheckpoint || strings.EqualFold(initFlags.TaskInit, constant.TaskInitStatusNotFinished) { - err := model.GetIStructMigrateSummaryRW().DeleteStructMigrateSummaryName(st.Ctx, []string{st.Task.TaskName}) - if err != nil { - return err - } - err = model.GetIStructMigrateTaskRW().DeleteStructMigrateTaskName(st.Ctx, []string{st.Task.TaskName}) - if err != nil { - return err - } - } else if st.TaskParams.EnableCheckpoint && strings.EqualFold(initFlags.TaskInit, constant.TaskInitStatusFinished) { - logger.Warn("struct migrate task init skip", - zap.String("task_name", st.Task.TaskName), - zap.String("task_mode", st.Task.TaskMode), - zap.String("task_flow", st.Task.TaskFlow), - zap.String("task_init", constant.TaskInitStatusFinished)) - return nil - } - - schemaRoute, err := model.GetIMigrateSchemaRouteRW().GetSchemaRouteRule(st.Ctx, - &rule.SchemaRouteRule{TaskName: st.Task.TaskName}) - if err != nil { - return err - } - - // filter database table - schemaTaskTables, err := model.GetIMigrateTaskTableRW().FindMigrateTaskTable(st.Ctx, &rule.MigrateTaskTable{ - TaskName: schemaRoute.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - }) - if err != nil { - return err - } - var ( - includeTables []string - excludeTables []string - databaseTaskTables []string // task tables - ) - databaseTableTypeMap := make(map[string]string) - - for _, t := range schemaTaskTables { - if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsExclude) { - excludeTables = append(excludeTables, t.TableNameS) - } - if strings.EqualFold(t.IsExclude, constant.MigrateTaskTableIsNotExclude) { - includeTables = append(includeTables, t.TableNameS) - } - } - - tableObjs, err := databaseS.FilterDatabaseTable(schemaRoute.SchemaNameS, includeTables, excludeTables) - if err != nil { - return err - } - - // rule case field - for _, t := range tableObjs.TaskTables { - var tabName string - // the according target case field rule convert - if strings.EqualFold(st.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleLower) { - tabName = stringutil.StringLower(t) - } - if strings.EqualFold(st.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleUpper) { - tabName = stringutil.StringUpper(t) - } - if strings.EqualFold(st.Task.CaseFieldRuleS, constant.ParamValueStructMigrateCaseFieldRuleOrigin) { - tabName = t - } - databaseTaskTables = append(databaseTaskTables, tabName) - } - - databaseTableTypeMap, err = databaseS.GetDatabaseTableType(schemaRoute.SchemaNameS) - if err != nil { - return err - } - - // get table route rule - tableRouteRule := make(map[string]string) - - tableRoutes, err := model.GetIMigrateTableRouteRW().FindTableRouteRule(st.Ctx, &rule.TableRouteRule{ - TaskName: schemaRoute.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - }) - for _, tr := range tableRoutes { - tableRouteRule[tr.TableNameS] = tr.TableNameT - } - - // clear the struct migrate task table - migrateTasks, err := model.GetIStructMigrateTaskRW().BatchFindStructMigrateTask(st.Ctx, &task.StructMigrateTask{TaskName: st.Task.TaskName}) - if err != nil { - return err - } - - // repeatInitTableMap used for store the struct_migrate_task table name has be finished, avoid repeated initialization - repeatInitTableMap := make(map[string]struct{}) - if len(migrateTasks) > 0 { - taskTablesMap := make(map[string]struct{}) - for _, t := range databaseTaskTables { - taskTablesMap[t] = struct{}{} - } - for _, smt := range migrateTasks { - if _, ok := taskTablesMap[smt.TableNameS]; !ok { - err = model.GetIStructMigrateTaskRW().DeleteStructMigrateTask(st.Ctx, smt.ID) - if err != nil { - return err - } - } else { - repeatInitTableMap[smt.TableNameS] = struct{}{} - } - } - } - - // database tables - // init database table - // get table column route rule - for _, sourceTable := range databaseTaskTables { - initStructInfos, err := model.GetIStructMigrateTaskRW().GetStructMigrateTaskTable(st.Ctx, &task.StructMigrateTask{ - TaskName: st.Task.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - TableNameS: sourceTable, - }) - if err != nil { - return err - } - if len(initStructInfos) > 1 { - return fmt.Errorf("the struct migrate task table is over one, it should be only one") - } - // if the table is existed, then skip init - if _, ok := repeatInitTableMap[sourceTable]; ok { - continue - } - var ( - targetTable string - ) - if val, ok := tableRouteRule[sourceTable]; ok { - targetTable = val - } else { - // the according target case field rule convert - if strings.EqualFold(st.Task.CaseFieldRuleT, constant.ParamValueStructMigrateCaseFieldRuleLower) { - targetTable = stringutil.StringLower(sourceTable) - } - if strings.EqualFold(st.Task.CaseFieldRuleT, constant.ParamValueStructMigrateCaseFieldRuleUpper) { - targetTable = stringutil.StringUpper(sourceTable) - } - if strings.EqualFold(st.Task.CaseFieldRuleT, constant.ParamValueStructMigrateCaseFieldRuleOrigin) { - targetTable = sourceTable - } - } - - _, err = model.GetIStructMigrateTaskRW().CreateStructMigrateTask(st.Ctx, &task.StructMigrateTask{ - TaskName: st.Task.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - TableNameS: sourceTable, - TableTypeS: databaseTableTypeMap[sourceTable], - SchemaNameT: schemaRoute.SchemaNameT, - TableNameT: targetTable, - TaskStatus: constant.TaskDatabaseStatusWaiting, - Category: constant.DatabaseStructMigrateSqlTableCategory, - }) - if err != nil { - return err - } - } - - _, err = model.GetIStructMigrateSummaryRW().CreateStructMigrateSummary(st.Ctx, - &task.StructMigrateSummary{ - TaskName: st.Task.TaskName, - SchemaNameS: schemaRoute.SchemaNameS, - TableTotals: uint64(len(databaseTaskTables) + 1), // include schema create sql - }) - if err != nil { - return err - } - _, err = model.GetITaskRW().UpdateTask(st.Ctx, &task.Task{TaskName: st.Task.TaskName}, map[string]interface{}{"TaskInit": constant.TaskInitStatusFinished}) - if err != nil { - return err - } + zap.String("cost", endTime.Sub(startTime).String())) return nil } diff --git a/example/csv_migrate_task.toml b/example/csv_migrate_task.toml index a1b23b1..66ee2be 100644 --- a/example/csv_migrate_task.toml +++ b/example/csv_migrate_task.toml @@ -1,6 +1,6 @@ task-name = "cmt_33to45" -datasource-name-s = "oracle33" -datasource-name-t = "tidb145" +datasource-name-s = "o11024" +datasource-name-t = "test08" comment = "测试数据源" [case-field-rule] @@ -13,22 +13,18 @@ case-field-rule-s = "0" # 1,参数值是 0 代表统一转换数据库名、表名、字段名值以当前配置文件为准 # 2,参数值是 1 代表统一转换数据库名、表名、字段名值为小写 # 3,参数值是 2 代表统一转换数据库名、表名、字段名值为大写 -case-field-rule-t = "0" +case-field-rule-t = "2" [schema-route-rule] -schema-name-s = "MARVIN" +schema-name-s = "SCOTT" schema-name-t = "STEVEN" -include-table-s = ["MARVIN00","MARVIN01","MARVIN_COLUMN_T"] +include-table-s = ["T1"] exclude-table-s = [] [[schema-route-rule.table-route-rules]] table-name-s = "MARVIN00" table-name-t = "STEVEN00" column-route-rules = {"NAME" = "T_NAME00"} -[[schema-route-rule.table-route-rules]] -table-name-s = "MARVIN01" -table-name-t = "STEVEN01" -column-route-rules = {"NAME" = "t_name00"} [[data-migrate-rules]] table-name-s = "MARVIN00" @@ -43,9 +39,9 @@ sql-hint-s = "/*+ PARALLEL(8) */" # 表串行运行,表内并发 [csv-migrate-param] # 初始化表任务并发数 -table-thread = 100 +table-thread = 2 # 数据写入批量大小 -batch-size = 50 +batch-size = 500 # 数据校验写 meta 数据库并发数 write-thread = 4 # 数据表大小放大系数,当磁盘空间小于数据表大小时,则不执行导出任务自动跳过,直至满足条件或者任务结束为止 @@ -70,7 +66,7 @@ escape-backslash = true chunk-size = 100000 # 数据文件输出目录, 所有表数据输出文件目录,需要磁盘空间充足,当数据目录不足某个表存储时将自动跳过数据导出直至满足条件的表或者任务结束 # 目录格式:/data/${target_dbname}/${table_name} -output-dir = "/users/marvin/gostore/dbms/data" +output-dir = "/users/marvin/gostore/dbms/data/csv" # 表内 SQL 执行并发数,表示同时多少并发 SQL 读取上游表数据,可动态变更 sql-thread-s = 2 # 指定分片 chunk sql 查询 hint diff --git a/example/sql_migrate_task.toml b/example/sql_migrate_task.toml index ae5118c..58ef8bd 100644 --- a/example/sql_migrate_task.toml +++ b/example/sql_migrate_task.toml @@ -47,6 +47,8 @@ sql-thread-t = 64 sql-hint-t = "" # calltimeout,单位:秒 call-timeout = 36000 +# 断点续传 +enable-checkpoint = true # 是否一致性读 ORA enable-consistent-read = false # 是否 safe-mode , false -> insert into, true -> repalce into diff --git a/logger/logger.go b/logger/logger.go index 8b02b99..dbc51ef 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -72,10 +72,10 @@ func GetGormLogger(logLevel string, slowThreshold uint64) zapgorm2.Logger { } else if strings.EqualFold(spaceLogLevel, "error") { gormLogger.LogLevel = logger2.Error } - + // avoid First Method logger output "error": "record not found" + gormLogger.IgnoreRecordNotFoundError = false gormLogger.LogMode(gormLogger.LogLevel) gormLogger.SetAsDefault() - return gormLogger } diff --git a/master/openapi.go b/master/openapi.go index 5bc735f..cdda5ff 100644 --- a/master/openapi.go +++ b/master/openapi.go @@ -748,6 +748,7 @@ func (s *Server) upsertSqlMigrateTask(ctx context.Context, req openapi.APIPutSql CallTimeout: *req.SqlMigrateParam.CallTimeout, EnableConsistentRead: *req.SqlMigrateParam.EnableConsistentRead, EnableSafeMode: *req.SqlMigrateParam.EnableSafeMode, + EnableCheckpoint: *req.SqlMigrateParam.EnableCheckpoint, }, }) if err != nil { diff --git a/model/task/task_entity.go b/model/task/task_entity.go index ef1749b..ff24ef9 100644 --- a/model/task/task_entity.go +++ b/model/task/task_entity.go @@ -31,7 +31,6 @@ type Task struct { WorkerAddr string `gorm:"type:varchar(30);comment:worker addr" json:"workerAddr"` CaseFieldRuleS string `gorm:"type:varchar(30);comment:source case field rule" json:"caseFieldRuleS"` CaseFieldRuleT string `gorm:"type:varchar(30);comment:target case field rule" json:"CaseFieldRuleT"` - TaskInit string `gorm:"type:varchar(30);default:N;comment:the task init status" json:"TaskInit"` TaskStatus string `gorm:"type:varchar(30);comment:task status" json:"taskStatus"` StartTime *time.Time `gorm:"default:null;comment:task start running time" json:"startTime"` EndTime *time.Time `gorm:"default:null;comment:task end running time" json:"endTime"` @@ -70,6 +69,8 @@ type StructMigrateSummary struct { TableWaits uint64 `gorm:"type:int;comment:source table table waits" json:"tableWaits"` TableRuns uint64 `gorm:"type:int;comment:source table table runs" json:"tableRuns"` TableStops uint64 `gorm:"type:int;comment:source table table stops" json:"tableStops"` + InitFlag string `gorm:"type:char(1);default:N;comment:the task table init flag" json:"initFlag"` + MigrateFlag string `gorm:"type:char(1);default:N;comment:the task table migrate finished flag" json:"migrateFlag"` Duration float64 `gorm:"comment:run duration, size: seconds" json:"duration"` *common.Entity } @@ -104,6 +105,8 @@ type StructCompareSummary struct { TableWaits uint64 `gorm:"type:int;comment:source table table waits" json:"tableWaits"` TableRuns uint64 `gorm:"type:int;comment:source table table runs" json:"tableRuns"` TableStops uint64 `gorm:"type:int;comment:source table table stops" json:"tableStops"` + InitFlag string `gorm:"type:char(1);default:N;comment:the task table init flag" json:"initFlag"` + CompareFlag string `gorm:"type:char(1);default:N;comment:the task table compare finished flag" json:"compareFlag"` Duration float64 `gorm:"comment:run duration, size: seconds" json:"duration"` *common.Entity } @@ -141,6 +144,8 @@ type DataMigrateSummary struct { ChunkWaits uint64 `gorm:"type:int;comment:source table chunk waits" json:"chunkWaits"` ChunkRuns uint64 `gorm:"type:int;comment:source table chunk runs" json:"chunkRuns"` ChunkStops uint64 `gorm:"type:int;comment:source table chunk stops" json:"chunkStops"` + InitFlag string `gorm:"type:char(1);default:N;comment:the task table init flag" json:"initFlag"` + MigrateFlag string `gorm:"type:char(1);default:N;comment:the task table migrate finished flag" json:"migrateFlag"` Refused string `gorm:"type:text;comment:csv migrate table refused" json:"refused"` Duration float64 `gorm:"comment:run duration, size: seconds" json:"duration"` *common.Entity @@ -172,15 +177,17 @@ type DataMigrateTask struct { } type SqlMigrateSummary struct { - ID uint64 `gorm:"primary_key;autoIncrement;comment:id" json:"id"` - TaskName string `gorm:"type:varchar(30);not null;uniqueIndex:uniq_schema_table_complex;comment:task name" json:"taskName"` - SqlTotals uint64 `gorm:"type:int;comment:source table sql totals" json:"sqlTotals"` - SqlSuccess uint64 `gorm:"type:int;comment:source table sql success" json:"sqlSuccess"` - SqlFails uint64 `gorm:"type:int;comment:source table sql fails" json:"sqlFails"` - SqlWaits uint64 `gorm:"type:int;comment:source table sql waits" json:"sqlWaits"` - SqlRuns uint64 `gorm:"type:int;comment:source table sql runs" json:"sqlRuns"` - SqlStops uint64 `gorm:"type:int;comment:source table sql stops" json:"sqlStops"` - Duration float64 `gorm:"comment:run duration, size: seconds" json:"duration"` + ID uint64 `gorm:"primary_key;autoIncrement;comment:id" json:"id"` + TaskName string `gorm:"type:varchar(30);not null;uniqueIndex:uniq_schema_table_complex;comment:task name" json:"taskName"` + SqlTotals uint64 `gorm:"type:int;comment:source table sql totals" json:"sqlTotals"` + SqlSuccess uint64 `gorm:"type:int;comment:source table sql success" json:"sqlSuccess"` + SqlFails uint64 `gorm:"type:int;comment:source table sql fails" json:"sqlFails"` + SqlWaits uint64 `gorm:"type:int;comment:source table sql waits" json:"sqlWaits"` + SqlRuns uint64 `gorm:"type:int;comment:source table sql runs" json:"sqlRuns"` + SqlStops uint64 `gorm:"type:int;comment:source table sql stops" json:"sqlStops"` + Duration float64 `gorm:"comment:run duration, size: seconds" json:"duration"` + InitFlag string `gorm:"type:char(1);default:N;comment:the task sql init flag" json:"initFlag"` + MigrateFlag string `gorm:"type:char(1);default:N;comment:the task sql migrate finished flag" json:"migrateFlag"` *common.Entity } @@ -220,6 +227,8 @@ type DataCompareSummary struct { ChunkWaits uint64 `gorm:"type:int;comment:source table chunk waits" json:"chunkWaits"` ChunkRuns uint64 `gorm:"type:int;comment:source table chunk runs" json:"chunkRuns"` ChunkStops uint64 `gorm:"type:int;comment:source table chunk stops" json:"chunkStops"` + InitFlag string `gorm:"type:char(1);default:N;comment:the task table init flag" json:"initFlag"` + CompareFlag string `gorm:"type:char(1);default:N;comment:the task table compare finished flag" json:"compareFlag"` Duration float64 `gorm:"comment:run duration, size: seconds" json:"duration"` *common.Entity } @@ -281,6 +290,8 @@ type DataScanSummary struct { ChunkRuns uint64 `gorm:"type:int;comment:source table chunk runs" json:"chunkRuns"` ChunkStops uint64 `gorm:"type:int;comment:source table chunk stops" json:"chunkStops"` Duration float64 `gorm:"comment:run duration, size: seconds" json:"duration"` + InitFlag string `gorm:"type:char(1);default:N;comment:the task table init flag" json:"initFlag"` + ScanFlag string `gorm:"type:char(1);default:N;comment:the task table scan finished flag" json:"scanFlag"` *common.Entity } diff --git a/model/task/task_impl.go b/model/task/task_impl.go index d562fa5..5615ba1 100644 --- a/model/task/task_impl.go +++ b/model/task/task_impl.go @@ -17,6 +17,7 @@ package task import ( "context" + "errors" "fmt" "github.com/wentaojin/dbms/utils/stringutil" "golang.org/x/sync/errgroup" @@ -259,9 +260,12 @@ func (rw *RWStructMigrateSummary) GetStructMigrateSummary(ctx context.Context, t var dataS *StructMigrateSummary err := rw.DB(ctx).Model(&StructMigrateSummary{}).Where("task_name = ? AND schema_name_s = ?", task.TaskName, task.SchemaNameS).First(&dataS).Error if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return dataS, nil + } return nil, fmt.Errorf("get table [%s] record failed: %v", rw.TableName(ctx), err) } - return task, nil + return dataS, nil } func (rw *RWStructMigrateSummary) UpdateStructMigrateSummary(ctx context.Context, task *StructMigrateSummary, updates map[string]interface{}) (*StructMigrateSummary, error) { @@ -449,9 +453,12 @@ func (rw *RWStructCompareSummary) GetStructCompareSummary(ctx context.Context, t var dataS *StructCompareSummary err := rw.DB(ctx).Model(&StructCompareSummary{}).Where("task_name = ? AND schema_name_s = ?", task.TaskName, task.SchemaNameS).First(&dataS).Error if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return dataS, nil + } return nil, fmt.Errorf("get table [%s] record failed: %v", rw.TableName(ctx), err) } - return task, nil + return dataS, nil } func (rw *RWStructCompareSummary) UpdateStructCompareSummary(ctx context.Context, task *StructCompareSummary, updates map[string]interface{}) (*StructCompareSummary, error) { @@ -630,9 +637,12 @@ func (rw *RWDataMigrateSummary) GetDataMigrateSummary(ctx context.Context, task var dataS *DataMigrateSummary err := rw.DB(ctx).Model(&DataMigrateSummary{}).Where("task_name = ? AND schema_name_s = ? AND table_name_s = ?", task.TaskName, task.SchemaNameS, task.TableNameS).First(&dataS).Error if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return dataS, nil + } return nil, fmt.Errorf("get table [%s] record failed: %v", rw.TableName(ctx), err) } - return task, nil + return dataS, nil } func (rw *RWDataMigrateSummary) UpdateDataMigrateSummary(ctx context.Context, task *DataMigrateSummary, updates map[string]interface{}) (*DataMigrateSummary, error) { @@ -738,6 +748,9 @@ func (rw *RWDataMigrateTask) GetDataMigrateTask(ctx context.Context, task *DataM var dataS *DataMigrateTask err := rw.DB(ctx).Model(&DataMigrateTask{}).Where("task_name = ? AND schema_name_s = ? AND table_name_s = ? ", task.TaskName, task.SchemaNameS, task.TableNameS).First(&dataS).Error if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return dataS, nil + } return nil, fmt.Errorf("get table [%s] record failed: %v", rw.TableName(ctx), err) } return dataS, nil @@ -761,20 +774,20 @@ func (rw *RWDataMigrateTask) FindDataMigrateTask(ctx context.Context, task *Data return dataS, nil } -func (rw *RWDataMigrateTask) FindDataMigrateTaskBySchemaTableChunkStatus(ctx context.Context, task *DataMigrateTask) ([]*DataGroupTaskStatusResult, error) { - var dataS []*DataGroupTaskStatusResult - err := rw.DB(ctx).Model(&DataMigrateTask{}).Select("task_name,schema_name_s,table_name_s,task_status,count(1) as status_totals").Where("task_name = ? AND schema_name_s = ? AND table_name_s = ?", task.TaskName, task.SchemaNameS, task.TableNameS).Group("task_name,schema_name_s,table_name_s,task_status").Order("status_totals desc").Find(&dataS).Error +func (rw *RWDataMigrateTask) FindDataMigrateTaskTableStatus(ctx context.Context, taskName, schemaName, tableName string, taskStatus []string) ([]*DataMigrateTask, error) { + var dataS []*DataMigrateTask + err := rw.DB(ctx).Model(&DataMigrateTask{}).Where("task_name = ? AND schema_name_s = ? AND table_name_s = ? AND task_status IN (?)", taskName, schemaName, tableName, taskStatus).Find(&dataS).Error if err != nil { - return nil, fmt.Errorf("find table [%s] group by the the task_name and schema_name_s and table_name_s ans task_status record failed: %v", rw.TableName(ctx), err) + return nil, fmt.Errorf("find table [%s] record failed: %v", rw.TableName(ctx), err) } return dataS, nil } -func (rw *RWDataMigrateTask) FindDataMigrateTaskGroupByTaskSchemaTable(ctx context.Context, taskName string) ([]*DataGroupChunkResult, error) { - var dataS []*DataGroupChunkResult - err := rw.DB(ctx).Model(&DataMigrateTask{}).Select("task_name,schema_name_s,table_name_s,count(1) as chunk_totals").Where("task_name = ?", taskName).Group("task_name,schema_name_s,table_name_s").Order("chunk_totals desc").Find(&dataS).Error +func (rw *RWDataMigrateTask) FindDataMigrateTaskBySchemaTableChunkStatus(ctx context.Context, task *DataMigrateTask) ([]*DataGroupTaskStatusResult, error) { + var dataS []*DataGroupTaskStatusResult + err := rw.DB(ctx).Model(&DataMigrateTask{}).Select("task_name,schema_name_s,table_name_s,task_status,count(1) as status_totals").Where("task_name = ? AND schema_name_s = ? AND table_name_s = ?", task.TaskName, task.SchemaNameS, task.TableNameS).Group("task_name,schema_name_s,table_name_s,task_status").Order("status_totals desc").Find(&dataS).Error if err != nil { - return nil, fmt.Errorf("find table [%s] group by the the task_name and schema_name_s and table_name_s record failed: %v", rw.TableName(ctx), err) + return nil, fmt.Errorf("find table [%s] group by the the task_name and schema_name_s and table_name_s ans task_status record failed: %v", rw.TableName(ctx), err) } return dataS, nil } @@ -852,9 +865,12 @@ func (rw *RWSqlMigrateSummary) GetSqlMigrateSummary(ctx context.Context, task *S var dataS *SqlMigrateSummary err := rw.DB(ctx).Model(&SqlMigrateSummary{}).Where("task_name = ?", task.TaskName).First(&dataS).Error if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return dataS, nil + } return nil, fmt.Errorf("get table [%s] record failed: %v", rw.TableName(ctx), err) } - return task, nil + return dataS, nil } func (rw *RWSqlMigrateSummary) UpdateSqlMigrateSummary(ctx context.Context, task *SqlMigrateSummary, updates map[string]interface{}) (*SqlMigrateSummary, error) { @@ -1020,9 +1036,12 @@ func (rw *RWDataCompareSummary) GetDataCompareSummary(ctx context.Context, task var dataS *DataCompareSummary err := rw.DB(ctx).Model(&DataCompareSummary{}).Where("task_name = ? AND schema_name_s = ? AND table_name_s = ?", task.TaskName, task.SchemaNameS, task.TableNameS).First(&dataS).Error if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return dataS, nil + } return nil, fmt.Errorf("get table [%s] record failed: %v", rw.TableName(ctx), err) } - return task, nil + return dataS, nil } func (rw *RWDataCompareSummary) UpdateDataCompareSummary(ctx context.Context, task *DataCompareSummary, updates map[string]interface{}) (*DataCompareSummary, error) { @@ -1291,9 +1310,12 @@ func (rw *RWDataScanSummary) GetDataScanSummary(ctx context.Context, task *DataS var dataS *DataScanSummary err := rw.DB(ctx).Model(&DataScanSummary{}).Where("task_name = ? AND schema_name_s = ? AND table_name_s = ?", task.TaskName, task.SchemaNameS, task.TableNameS).First(&dataS).Error if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return dataS, nil + } return nil, fmt.Errorf("get table [%s] record failed: %v", rw.TableName(ctx), err) } - return task, nil + return dataS, nil } func (rw *RWDataScanSummary) FindDataScanSummary(ctx context.Context, task *DataScanSummary) ([]*DataScanSummary, error) { diff --git a/model/task/task_iter.go b/model/task/task_iter.go index d837b17..e4fe1a4 100644 --- a/model/task/task_iter.go +++ b/model/task/task_iter.go @@ -106,9 +106,9 @@ type IDataMigrateTask interface { BatchUpdateDataMigrateTask(ctx context.Context, task *DataMigrateTask, updates map[string]interface{}) (*DataMigrateTask, error) GetDataMigrateTask(ctx context.Context, task *DataMigrateTask) (*DataMigrateTask, error) FindDataMigrateTask(ctx context.Context, task *DataMigrateTask) ([]*DataMigrateTask, error) + FindDataMigrateTaskTableStatus(ctx context.Context, taskName, schemaName, tableName string, taskStatus []string) ([]*DataMigrateTask, error) QueryDataMigrateTask(ctx context.Context, task *DataMigrateTask) ([]*DataMigrateTask, error) FindDataMigrateTaskBySchemaTableChunkStatus(ctx context.Context, task *DataMigrateTask) ([]*DataGroupTaskStatusResult, error) - FindDataMigrateTaskGroupByTaskSchemaTable(ctx context.Context, taskName string) ([]*DataGroupChunkResult, error) FindDataMigrateTaskGroupByTaskSchemaTableStatus(ctx context.Context, taskName string) ([]*DataGroupTaskStatusResult, error) FindDataMigrateTaskGroupByTaskStatus(ctx context.Context, taskName string) ([]*DataGroupStatusResult, error) ListDataMigrateTask(ctx context.Context, page uint64, pageSize uint64) ([]*DataMigrateTask, error) diff --git a/openapi/apiserver.types.go b/openapi/apiserver.types.go index ed3e422..615ace6 100644 --- a/openapi/apiserver.types.go +++ b/openapi/apiserver.types.go @@ -243,6 +243,7 @@ type SchemaStructRule struct { type SqlMigrateParam struct { BatchSize *uint64 `json:"batchSize,omitempty"` CallTimeout *uint64 `json:"callTimeout,omitempty"` + EnableCheckpoint *bool `json:"enableCheckpoint,omitempty"` EnableConsistentRead *bool `json:"enableConsistentRead,omitempty"` EnableSafeMode *bool `json:"enableSafeMode,omitempty"` SqlHintT *string `json:"sqlHintT,omitempty"` @@ -486,51 +487,51 @@ type APIPutStructMigrateJSONRequestBody = StructMigrateTask // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+xdW3PbutX9Kxx+36Mq2U562qOnJnYyJzMnqWqqfTmTB4jcFnFMEjQAWnY9+u8dgJRI", - "8ApQl5ATvSXS3rjsvbAWboLfbJeEMYkg4syev9nM9SFE8p8fGAPGvuI1RRwWiKJQfBpTEgPlGKSNi4Jg", - "iUMgCRf/fSA0RNye2wmO+C/v7YnNX2Ow5zaOOKyB2tuJ7SIGnzEE3n0SgCO8MhvGKY7WwiRtxDcU1n6/", + "H4sIAAAAAAAC/+xdW3PbutX9Kxx+36Mq2U562qOnJnYyJzMnqWqqfTmTB4jcFnFMEgwAWnY9+u8dgJRI", + "8ApQF5MTvSXS3rjsvbAWboJfbZeEMYkg4syev9rM9SFE8p8fGAPGvuI1RRwWiKJQfBpTEgPlGKSNi4Jg", + "iUMgCRf/fSA0RNye2wmO+C/v7YnNX2Kw5zaOOKyB2tuJ7SIGnzEE3n0SgCO8MhvGKY7WwiRtxDcU1n6/", "3ZdKVn+Cy4WH0tYlYo/VpqLa7vw/hQd7bv/fLA/DLIvBrCYAov0kDCFSu5s1bWJHSRCgVQD2nNMEJtWu", - "eYgjRhLqwr571VI6vJaaXhyxR2GvZV4X1dtiquqSX86kRqMUp2X/ppEgCSOH08TlDa2TFiZRTj2Wr3GT", - "R2d28xKWffEBDygJ+H9QkPRuRbGMvu0oDUEttK0CA1DXJpU9t9PNCnHXd/B/QZ9szPnJT6JHkzrE4Lz1", + "eYgjRhLqwr571VI6vJaaXhyxR2GvZV4X1dtiquqSX86kRqMUp2X/ppEgCSOH08TlDa2TFiZRTj2WL3GT", + "R2d28xKWffEBDygJ+H9QkPRuRbGMvu0oDUEttK0CA1DXJpU9tdPNCnHXd/B/QZ9szPnJT6JHkzrE4Lz1", "EWXAdUemBwEOMQeqa4/Z478ZWsNn5HKi6wWRSMmtD+5jTHBKWZnRipAAUFSwIhHDjEPE7wF5DZbMRTF8", - "RO4jCxDz6418QF7ar+p3AnUSlZodIAmPE36HdTvMIEYU6QeIPQW/4Yg7+uZLnwLyHF1oyDGR+mi7AA1x", - "ZNCJDcXcqI72kVcvnm5ZCdp0U5WNgyXTrbJCa+0l82yEZp+JJskeYQ4h6yrrTnXcYbimuYhS9HpugU+b", - "eU+SrHkdvXFK5seYIogI3ZIwRnRUnO0qdLcgOxaoUfSK4bLW0MNMwOKr91dJuCwJW+lWk5SDAHFMIgc4", - "F/UcSOB4HREqLD0sSpVjVB0MVVUvwZtEwWuW73uyqa+GQowwdXjIPwepzUm4+LddLkyo+4TMfRQiLoyn", - "pmmt/FJNYgN0Vct7FK3B0Tetx/kRMJQW4UAALu/jf1qoqPNYMwocnnh6NfzcpXiKvVqGuXwWwXyRzzrs", - "FCcYFezs5ELoGxdm69d60jUcFIartYm98YGC5IWDOuu4KBrTROHYa6himo61wHBQGAcgwOGMQd4EBOrB", - "3hqcVl7uG4m2Ng6TzJUB1EW+uXHB25zD9xm7EHgdWFaI1YDZJ4xr9gNHmJsNxhgxtiHU06wgJlSbItNo", - "6WYgIBvRcuaTQLvxCQMaHRr0FC218+ND9hpIFIHLs+087X1j6SQHGjusYocjnrCjnC3ojrxVs3F3jaul", - "/EirJoMBYQrvgzphNDiAPuO2+HZvrANjmEQHgeXgAfQNNm1jyFO+05aJzKWyYqtrwT08JcD4HQTQuNsf", - "7z7et0BnYqtb9YIw3ljxGvSZeA0mE8p9p3ol7h5YTCJWy3ue0YmF7hkCpZo70XLj5ynBFDx7/kfanqyq", - "XTHfa3rkVDW8tAJ6cYPEg6Wc35mioWu2gqMTFm5+hpZ7GO1W7KOnP61bKn7dvdk2pq77DPZyotoHDbUR", - "fwp+/AGpwYFhlC7KHuBrxk6NC+Llyc7g9h7Ls65081RpXJ5YGt1QUIc78tKdWBQslAr6E1jeCXM+Mk/n", - "vxKgr8bbVssjjKFhLvDPtniucknr4rlkrpRgpj2loaGhpAcv08W6CkRiRni/5OjXOPRZ+dSncSck8R+1", - "w+nwkLfyy90ZLyP8NFx28EYgayKI1nJqnY7DV2JS3X6xog/3ZMeURsNCh386+9C2PshXEPqDoXL/U3s9", - "1qe2ykpHS7MEy/aobKk66upjv6qKfj1XgcU0/+RTqiPQUN24b+egike5HK22VByOR2PH/wkBBcThy8M3", - "wj+9YMYPv2l1hym4/FaWW28XZpp+8PSgEJGflBQ/cE5ZD07c+13otwFQF/o9Bv2aTQErHuVy9Om3NLs/", - "mH5Lo6Z636LfjzxkiX23Qfc7OOzI++2NAWg5Wzj3fprxrasj7HeV+eyyST+0nz3Vsza8xBSYLvSEZzZ3", - "OccvA0vKdcFUYxnV6G3lueMDSeMUceRKcYUQ4UDQWYw5oPAfbIPWa6BTTEQlMlm2k35mfVh8sZaAQnti", - "J1Q4+ZzHbD6bFZxk65lLcSzYzJ7bHywmr8dJb+4jbiUMmIUsbxUyCzELRRa8pCacWB6EJGLyzqn1AIgn", - "FJiFI4v7YP0zhkiU8m56ZbEYXPyAXflDBXtiB9iF7IQ6a/WHGLk+WDfTq0p7N5vNFMmvp4SuZ5kvm/3+", - "5fbTN+fTX26mV1Ofh4EELeZBMQh3H7869sR+BsrSDl5Pr6ZX2WCIUIztuf1OfjSxY8R9CcyZV7iw5cnb", - "B+m/ipFKbyVYO1Mru5yDSWSJxAkEpH1NRx0m0RdP9HPxJfXcXwqb2DQ7sJeV31y93yU9m1GhOA6y0M3+", - "ZKLyt8IFrLYZw/4mgMRTbftRZw9yhJ+lXUkELzG4HDwrvRAgRwdaM3v+h71PzPftxF6nl69U998x45Uu", - "dWVEODXn4+os/Y7RGiw5TbHIQ96DQPSHJa4LjFl5wwablDipScoi4X0GyiJRsyKv5Hwk3uvR+rwvfqte", - "TREsvf1BQMgDJXcaKskfau63k5Q480thGtSZGvciz6ye0wCj5uqZFkTenw0iWeTSGA8fJFm2JEVkdzwb", - "iFuFhC51nwMN+W1ALSxc/yDdyAI4EuUoAqNNO4yZIlOPkwJDvSM7MBHZRWwkMrIHghASvlvu1nKF+NQS", - "JtZuSVuXf8K4XDWfJvWy6CHxwC4mg0qsbFCe0pnywJLGHCG1t7KDFYsmAejNEJR3mX7KSYKM1RimByom", - "OmYIJUBIEtCYIpwRDiOYJdQFcQTThRqgNM0YesBkkZwFJdVX6AY0bagL2xjmD2Vk7PUmfxdIQ2xc9txD", - "afK3hC4yM2SMFKDQoTFFHOgKzLlQMAJ1qYRvBNJSBkeTrphCY5GcHhmld9kGJCeVaI1BSxQo7IWk8L6N", - "5tamlV2kNZCSwls4Fy0Z+n7FLlEaW5p7KOiqydmAMJItTTWAI9nZVADStrVpBI9sW/O06Cg/VzawnU01", - "YGPZ4MzxoKiK46JIV1KYiyJDPZHFX8Rk4NiQWdJREokAExk5ff7HoiF56EYiIDkoWtVDGxKZdJwQEcqz", - "aEMTjTxOY1GMDAB7uch/TK0hGOwp6LGblf/u+iIaQ4ZHAQodslHEga5wnAsFI5COSvhGIB5lcDTJhyk0", - "FsnpkVF6emNAIlKJ1hhkRIFCLiT5AwQ6SrL7CXsfPSnUdBGUIQOlkKguRangQVtXzoWGMQhLfRTHIC9l", - "qDTqSy+gCJU5OU7KT7AMSWfqozYKtVGQUZCbwsMBWoIj7HscoCgvFFz0ZthQKaaqU3EUQOjLzdngMArB", - "qQZxFGpTAUqz3hjDRIrN6VFSfQBnUIJTDds41EZFRklvTBY4MgB9VjfFii56M3iwaK9wFECY6c1lgaPo", - "zfhWNxWgdOiN8eLm9CipvvgzPL0Z4epGRcY2+2MPQMXXZYAsfbDSb62EBsqrBvPZ7M0njG/nbzGhfPu2", - "QgwWiPtbe2I/I4rRKtg9Npt+keIvi4M9QzGePV/Pin8xI//2+teb6fUvf5/evHs3fX9d/OsVuc3Nu7/9", - "Kvr/ffu/AAAA//+ICR7f3X0AAA==", + "RO4jCxDz6418QF7ar+p3AnUSlZodIAmPE36HdTvMIEYU6QeI/Qh+wxF39M2XPgXkObrQkGMi9dF2ARri", + "yKATG4q5UR3tI69ePN2yErTppiobB0umW2WF1tpL5tkIzT4TTZI9whxC1lXWneq4w3BNcxGl6OXcAp82", + "854kWfM6euOUzI8xRRARuiVhjOioONtV6G5BdixQo+gVw2WtoYeZgMVX76+ScFkSttKtJikHAeKYRA5w", + "Luo5kMDxOiJUWHpYlCrHqDoYqqpegjeJgpcs3/dkU18NhRhh6vCQfw5Sm5Nw8W+7XJhQ9wmZ+yhEXBhP", + "TdNa+aWaxAboqpb3KFqDo29aj/MjYCgtwoEAXN7H/7RQUeexZhQ4PPH0avi5S/EUe7UMc/ksgvkin3XY", + "KU4wKtjZyYXQNy7M1i/1pGs4KAxXaxN74wMFyQsHddZxUTSmicKx11DFNB1rgeGgMA5AgMMZg7wJCNSD", + "vTU4rbzcNxJtbRwmmSsDqIt8c+OCtzmH7zN2IfA6sKwQqwGzTxjX7AeOMDcbjDFibEOop1lBTKg2RabR", + "0s1AQDai5cwngXbjEwY0OjToKVpq58eH7DWQKAKXZ9t52vvG0kkONHZYxQ5HPGFHOVvQHXmrZuPuGldL", + "+ZFWTQYDwhTeB3XCaHAAfcJt8e3eWAfGMIkOAsvBA+gbbNrGkKd8py0TmUtlxVbXgnv4kQDjdxBA425/", + "vPt43wKdia1u1QvCeGPFa9Bn4jWYTCj3neqVuHtgMYlYLe95RicWumcIlGruRMuNnx8JpuDZ8z/S9mRV", + "7Yr5XtMjp6rhpRXQsxskHizl/M4UDV2zFRydsHDzM7Tcw2i3Yh89/WndUvHr7s22MXXdZ7CXE9U+aKiN", + "+I/g7Q9Ij36sGKVLtwf4mnFY47J5ebKTur3H8qzr4TyhGlcslkb3GFRSQF66X4uChVJBf5rLO2HOWubp", + "/FcC9MV4c2t5hJE2zG2Asy2xq4zTusQumSslmClUaWho6O3Bi3mx+gKRmBHeQnlDVj71md0JSfyt9kEd", + "HvJWfrk745WFn4bLDt4uZE0E0VpOrdNx+EpMvduvX/Thnuww02hY6PBPZx/aVhH5OkN/MFRuiWqv2vrU", + "VlkPaWmWYNkelS1VR1197FdV0a/nWrGY5p98SnUEGqob9+0cVPEol6PVlorD8Wjs+D80oIA4fHn4Rvin", + "Z8z44fex7jAFl9/KcuvtwkzTD54eFCLyk5LiB84p68GJe78L/TYA6kK/x6BfsylgxaNcjj79lmb3B9Nv", + "adRUb2X0+ymILLHvZul+B4cdeVe+MQAtJxDn3k8zvpt1hP2uMp9dtvKH9uOoetaG55gC04We8MzmLuf4", + "/WBJuS6YaiyjGr2tPJ18IGmcIo5cKa4QIhwIOosxBxT+g23Qeg10iomoRCbLdtLPrA+LL9YSUGhP7IQK", + "J5/zmM1ns4KTbD1zKY4Fm9lz+4PF5CU66c19xK2EAbOQ5a1CZiFmociC59SEE8uDkERM3ky1HgDxhAKz", + "cGRxH6x/xhCJUt5NrywWg4sfsCt/zmBP7AC7kJ1jZ63+ECPXB+tmelVp72azmSL59ZTQ9SzzZbPfv9x+", + "+uZ8+svN9Grq8zCQoMU8KAbh7uNXx57YT0BZ2sHr6dX0KhsMEYqxPbffyY8mdoy4L4E58wrXujx5RyH9", + "VzFS6d0Fa2dqZVd4MIkskTiBgLSv6ajDJPriiX4uvqSe+6tjE5tmx/qy8pur97ukZzMqFMdBFrrZn0xU", + "/lq4ptU2Y9jfF5B4qm0/6uxBjvCztCuJ4DkGl4NnpdcG5OhAa2bP/7D3ifm+ndjr9IqW6v47ZrzSpa6M", + "CKfmfFydpd8xWoMlpykWech7EIj+sMR1gTErb9hgkxInNUlZJLzPQFkkalbkxZ2PxHs5Wp/3xW/VCyyC", + "pbdvBIQ8UHKnoZL8oeZ+O0mJM786pkGdqXEv8szqOQ0wai6oaUHk/dkgkkUujfHwQZJlS1JEdhO0gbhV", + "SOhS9znQkN8Z1MLC9RvpRhbAkShHERht2mHMFJl6nBQY6k3agYnILmIjkZE9EISQ8N1yt5YrxKeWMLF2", + "S9q6/BPG5ar5NKmXRQ+JB3YxGVRiZYPylM6UZ5g05gipvZUdrFg0CUBvhqC83vRTThJkrMYwPVAx0TFD", + "KAFCkoDGFOGMcBjBLKEuiCOYLtQApWnG0AMmi+QsKKm+VTegaUNd2MYwfygjY683+etBGmLjsqceSpO/", + "OHSRmSFjpACFDo0p4kBXYM6FghGoSyV8I5CWMjiadMUUGovk9Mgovd42IDmpRGsMWqJAYS8khVdwNLc2", + "rewirYGUFF7MuWjJ0PcrdonS2NLcQ0FXTc4GhJFsaaoBHMnOpgKQtq1NI3hk25qnRUf5UbOB7WyqARvL", + "BmeOB0VVHBdFupLCXBQZ6oks/iImA8eGzJKOkkgEmMjI6fM/Fg3JQzcSAclB0aoe2pDIpOOEiFAeTxua", + "aORxGotiZADYy0X+Y2oNwWA/gh67Wfnvri+iMWR4FKDQIRtFHOgKx7lQMALpqIRvBOJRBkeTfJhCY5Gc", + "HhmlpzcGJCKVaI1BRhQo5EKSP0CgoyS7n7D30ZNCTRdBGTJQConqUpQKHrR15VxoGIOw1EdxDPJShkqj", + "vvQCilCZk+Ok/ATLkHSmPmqjUBsFGQW5KTwcoCU4wr7HAYryQsFFb4YNlWKqOhVHAYS+3JwNDqMQnGoQ", + "R6E2FaA0640xTKTYnB4l1QdwBiU41bCNQ21UZJT0xmSBIwPQZ3VTrOiiN4MHi/YKRwGEmd5cFjiK3oxv", + "dVMBSofeGC9uTo+S6os/w9ObEa5uVGRssz8JAVR8XQbI0gcr/dZKaKC8ajCfzV59wvh2/hoTyrevK8Rg", + "gbi/tSf2E6IYrYLdY7PpFyn+sjjYMxTj2dP1rPh3NfJvr3+9mV7/8vfpzbt30/fXxb9xkdvcvPvbr6L/", + "37f/CwAA//9NbjN0A34AAA==", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/openapi/spec.yaml b/openapi/spec.yaml index 4550827..10657ea 100644 --- a/openapi/spec.yaml +++ b/openapi/spec.yaml @@ -1325,6 +1325,8 @@ components: type: boolean enableSafeMode: type: boolean + enableCheckpoint: + type: boolean StructMigrateParam: type: object properties: diff --git a/proto/dbms_master.proto b/proto/dbms_master.proto index 24f7efc..4bf17b4 100644 --- a/proto/dbms_master.proto +++ b/proto/dbms_master.proto @@ -708,6 +708,7 @@ message SqlMigrateParam { bool enableConsistentRead = 6; bool enableSafeMode = 7; uint64 writeThread = 8; + bool enableCheckpoint = 9; } message DataScanRule { diff --git a/proto/pb/dbms_master.pb.go b/proto/pb/dbms_master.pb.go index fad85cd..2f4547b 100644 --- a/proto/pb/dbms_master.pb.go +++ b/proto/pb/dbms_master.pb.go @@ -5455,6 +5455,7 @@ type SqlMigrateParam struct { EnableConsistentRead bool `protobuf:"varint,6,opt,name=enableConsistentRead,proto3" json:"enableConsistentRead,omitempty"` EnableSafeMode bool `protobuf:"varint,7,opt,name=enableSafeMode,proto3" json:"enableSafeMode,omitempty"` WriteThread uint64 `protobuf:"varint,8,opt,name=writeThread,proto3" json:"writeThread,omitempty"` + EnableCheckpoint bool `protobuf:"varint,9,opt,name=enableCheckpoint,proto3" json:"enableCheckpoint,omitempty"` } func (x *SqlMigrateParam) Reset() { @@ -5545,6 +5546,13 @@ func (x *SqlMigrateParam) GetWriteThread() uint64 { return 0 } +func (x *SqlMigrateParam) GetEnableCheckpoint() bool { + if x != nil { + return x.EnableCheckpoint + } + return false +} + type DataScanRule struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -6572,7 +6580,7 @@ var file_dbms_master_proto_rawDesc = []byte{ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0xab, 0x02, 0x0a, 0x0f, 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x38, 0x01, 0x22, 0xd7, 0x02, 0x0a, 0x0f, 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x71, 0x6c, 0x54, 0x68, 0x72, 0x65, 0x61, @@ -6591,304 +6599,306 @@ var file_dbms_master_proto_rawDesc = []byte{ 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x61, 0x66, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, - 0x22, 0x76, 0x0a, 0x0c, 0x44, 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, 0x52, 0x75, 0x6c, 0x65, - 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x53, - 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x71, 0x6c, 0x48, 0x69, 0x6e, 0x74, 0x53, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x73, 0x71, 0x6c, 0x48, 0x69, 0x6e, 0x74, 0x53, 0x12, 0x2a, 0x0a, 0x10, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x65, 0x53, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x65, 0x53, 0x22, 0xf9, 0x02, 0x0a, 0x0d, 0x44, 0x61, 0x74, - 0x61, 0x53, 0x63, 0x61, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x12, 0x1c, 0x0a, 0x09, - 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x09, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x68, - 0x75, 0x6e, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x63, - 0x68, 0x75, 0x6e, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x71, 0x6c, 0x54, - 0x68, 0x72, 0x65, 0x61, 0x64, 0x53, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x71, - 0x6c, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x53, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x71, 0x6c, 0x48, - 0x69, 0x6e, 0x74, 0x53, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x71, 0x6c, 0x48, - 0x69, 0x6e, 0x74, 0x53, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x61, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x61, 0x6c, 0x6c, 0x54, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x10, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x14, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x61, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x14, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x61, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x65, 0x53, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x53, 0x12, 0x20, 0x0a, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x68, 0x72, 0x65, 0x61, - 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x68, - 0x72, 0x65, 0x61, 0x64, 0x32, 0x87, 0x21, 0x0a, 0x06, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x12, - 0x71, 0x0a, 0x0e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, - 0x65, 0x12, 0x1c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, - 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, - 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x3a, 0x01, 0x2a, 0x1a, 0x17, 0x2f, 0x61, 0x70, 0x69, 0x73, - 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, - 0x73, 0x65, 0x12, 0x71, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, - 0x62, 0x61, 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x22, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x3a, 0x01, 0x2a, 0x2a, 0x17, 0x2f, 0x61, - 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, - 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x0c, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, 0x74, - 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, - 0x6f, 0x77, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, - 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x3a, 0x01, 0x2a, 0x22, 0x17, 0x2f, 0x61, 0x70, 0x69, 0x73, - 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, - 0x73, 0x65, 0x12, 0x79, 0x0a, 0x10, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, - 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, - 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x3a, - 0x01, 0x2a, 0x1a, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x79, 0x0a, - 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x12, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x44, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x44, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x3a, 0x01, 0x2a, 0x2a, 0x19, 0x2f, - 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, - 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x73, 0x0a, 0x0e, 0x53, 0x68, 0x6f, 0x77, - 0x44, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x3a, - 0x01, 0x2a, 0x22, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x95, 0x01, - 0x0a, 0x17, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x41, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, - 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x41, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, - 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x41, - 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, + 0x12, 0x2a, 0x0a, 0x10, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x22, 0x76, 0x0a, 0x0c, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1e, 0x0a, 0x0a, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x53, 0x12, 0x1a, 0x0a, 0x08, + 0x73, 0x71, 0x6c, 0x48, 0x69, 0x6e, 0x74, 0x53, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x73, 0x71, 0x6c, 0x48, 0x69, 0x6e, 0x74, 0x53, 0x12, 0x2a, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x65, 0x53, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x53, 0x22, 0xf9, 0x02, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, + 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, + 0x68, 0x72, 0x65, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x61, 0x74, 0x63, + 0x68, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x61, 0x74, + 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x53, + 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x71, 0x6c, 0x54, 0x68, 0x72, 0x65, 0x61, + 0x64, 0x53, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x71, 0x6c, 0x54, 0x68, 0x72, + 0x65, 0x61, 0x64, 0x53, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x71, 0x6c, 0x48, 0x69, 0x6e, 0x74, 0x53, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x71, 0x6c, 0x48, 0x69, 0x6e, 0x74, 0x53, + 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x61, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x61, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x32, + 0x0a, 0x14, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x61, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, + 0x61, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x53, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x65, 0x53, 0x12, 0x20, + 0x0a, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, + 0x32, 0x87, 0x21, 0x0a, 0x06, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x12, 0x71, 0x0a, 0x0e, 0x55, + 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x1c, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, + 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, + 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x1c, 0x3a, 0x01, 0x2a, 0x1a, 0x17, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x71, + 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, + 0x12, 0x1c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, + 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, + 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x3a, 0x01, 0x2a, 0x2a, 0x17, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, + 0x65, 0x12, 0x6b, 0x0a, 0x0c, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, + 0x65, 0x12, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, + 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, + 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x1c, 0x3a, 0x01, 0x2a, 0x22, 0x17, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x79, + 0x0a, 0x10, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, + 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, + 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x3a, 0x01, 0x2a, 0x1a, 0x19, + 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, + 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x79, 0x0a, 0x10, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x3a, 0x01, 0x2a, 0x2a, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x73, 0x0a, 0x0e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, 0x74, 0x61, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, + 0x68, 0x6f, 0x77, 0x44, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, + 0x77, 0x44, 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x3a, 0x01, 0x2a, 0x22, 0x19, + 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, + 0x61, 0x74, 0x61, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x95, 0x01, 0x0a, 0x17, 0x55, 0x70, + 0x73, 0x65, 0x72, 0x74, 0x41, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, + 0x73, 0x65, 0x72, 0x74, 0x41, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x41, 0x73, 0x73, 0x65, 0x73, + 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x1a, + 0x20, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, + 0x61, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, + 0x6b, 0x12, 0x95, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65, + 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x25, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65, + 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, + 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x2a, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x61, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x8f, 0x01, 0x0a, 0x15, 0x53, 0x68, + 0x6f, 0x77, 0x41, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x12, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, + 0x41, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, + 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x41, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x22, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x61, 0x73, 0x73, 0x65, 0x73, 0x73, + 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x95, 0x01, 0x0a, 0x17, + 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, + 0x2a, 0x1a, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x12, 0x95, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, + 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x2a, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x8f, 0x01, 0x0a, 0x15, + 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, + 0x6f, 0x77, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x22, 0x20, 0x2f, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x95, 0x01, + 0x0a, 0x17, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, + 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, + 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x1a, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2f, 0x61, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x68, 0x61, 0x31, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x95, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x41, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, + 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x41, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, + 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, 0x67, - 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, 0x6d, + 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x2a, 0x20, 0x2f, 0x61, 0x70, - 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x61, 0x73, 0x73, 0x65, - 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x8f, 0x01, - 0x0a, 0x15, 0x53, 0x68, 0x6f, 0x77, 0x41, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, - 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x53, 0x68, 0x6f, 0x77, 0x41, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x8f, 0x01, + 0x0a, 0x15, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, + 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x41, 0x73, 0x73, 0x65, 0x73, 0x73, 0x4d, - 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, + 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x22, 0x20, 0x2f, - 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x61, 0x73, - 0x73, 0x65, 0x73, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, - 0x95, 0x01, 0x0a, 0x17, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x25, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, - 0x74, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, - 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x1a, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, - 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x95, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x12, 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, - 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x2a, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, - 0x8f, 0x01, 0x0a, 0x15, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, + 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, + 0x8d, 0x01, 0x0a, 0x15, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x74, 0x6d, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, + 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x74, 0x6d, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x72, 0x75, 0x63, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x74, 0x6d, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x22, - 0x20, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, - 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, - 0x6b, 0x12, 0x95, 0x01, 0x0a, 0x17, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x25, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, - 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, - 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x1a, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, - 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x95, 0x01, 0x0a, 0x17, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, - 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, - 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x2a, - 0x20, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, - 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, - 0x6b, 0x12, 0x8f, 0x01, 0x0a, 0x15, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x23, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, - 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x3a, 0x01, - 0x2a, 0x22, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x12, 0x8d, 0x01, 0x0a, 0x15, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x74, - 0x6d, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x23, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x74, 0x6d, 0x74, - 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, - 0x74, 0x53, 0x74, 0x6d, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, - 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x12, 0x8d, 0x01, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, - 0x6d, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x23, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x6d, 0x74, - 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x53, 0x74, 0x6d, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, - 0x3a, 0x01, 0x2a, 0x2a, 0x1e, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x12, 0x87, 0x01, 0x0a, 0x13, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x6d, 0x74, - 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x21, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x6d, 0x74, 0x4d, 0x69, 0x67, 0x72, - 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x6d, 0x74, 0x4d, - 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, - 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, - 0x74, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x89, 0x01, - 0x0a, 0x14, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, - 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x1a, + 0x1e, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, + 0x64, 0x61, 0x74, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, + 0x8d, 0x01, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x6d, 0x74, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x6d, 0x74, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x6d, + 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x2a, + 0x1e, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, + 0x64, 0x61, 0x74, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, + 0x87, 0x01, 0x0a, 0x13, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x6d, 0x74, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x6d, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x74, 0x6d, 0x74, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x89, 0x01, 0x0a, 0x14, 0x55, 0x70, + 0x73, 0x65, 0x72, 0x74, 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, + 0x73, 0x6b, 0x12, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, + 0x74, 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, - 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x3a, 0x01, 0x2a, 0x1a, 0x1d, 0x2f, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x71, 0x6c, 0x4d, 0x69, - 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x89, 0x01, 0x0a, 0x14, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, - 0x73, 0x6b, 0x12, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x22, 0x3a, 0x01, 0x2a, 0x2a, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, + 0x93, 0x02, 0x22, 0x3a, 0x01, 0x2a, 0x1a, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, - 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x83, 0x01, 0x0a, 0x12, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x71, - 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x20, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, - 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x71, 0x6c, 0x4d, 0x69, - 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x3a, 0x01, 0x2a, 0x22, 0x1d, 0x2f, 0x61, - 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x71, 0x6c, - 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x89, 0x01, 0x0a, 0x14, - 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x43, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, - 0x54, 0x61, 0x73, 0x6b, 0x12, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, - 0x65, 0x72, 0x74, 0x43, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x43, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x89, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x22, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x71, 0x6c, + 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x3a, + 0x01, 0x2a, 0x2a, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2f, 0x73, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, + 0x6b, 0x12, 0x83, 0x01, 0x0a, 0x12, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x20, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x53, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x3a, 0x01, 0x2a, 0x1a, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, - 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x89, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x43, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, - 0x12, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, + 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x3a, 0x01, 0x2a, 0x22, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x71, 0x6c, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x89, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x73, 0x65, + 0x72, 0x74, 0x43, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, + 0x12, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x43, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x43, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, + 0x65, 0x72, 0x74, 0x43, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x22, 0x3a, 0x01, 0x2a, 0x2a, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, + 0x22, 0x3a, 0x01, 0x2a, 0x1a, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x12, 0x83, 0x01, 0x0a, 0x12, 0x53, 0x68, 0x6f, 0x77, 0x43, 0x73, 0x76, 0x4d, - 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x20, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x43, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, - 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x43, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, - 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x3a, 0x01, 0x2a, 0x22, 0x1d, 0x2f, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x73, 0x76, 0x4d, 0x69, - 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x8d, 0x01, 0x0a, 0x15, 0x55, 0x70, - 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x12, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, - 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, - 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0x2f, 0x61, 0x70, 0x69, 0x73, - 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f, - 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x8d, 0x01, 0x0a, 0x15, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x12, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, - 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x2a, 0x1e, 0x2f, 0x61, 0x70, 0x69, 0x73, - 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f, - 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x87, 0x01, 0x0a, 0x13, 0x53, 0x68, - 0x6f, 0x77, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, - 0x6b, 0x12, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, + 0x61, 0x73, 0x6b, 0x12, 0x89, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x73, + 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x22, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x73, 0x76, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, + 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x3a, 0x01, 0x2a, + 0x2a, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2f, 0x63, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, + 0x83, 0x01, 0x0a, 0x12, 0x53, 0x68, 0x6f, 0x77, 0x43, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x20, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, + 0x68, 0x6f, 0x77, 0x43, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, + 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x43, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x22, 0x3a, 0x01, 0x2a, 0x22, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x73, 0x76, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x8d, 0x01, 0x0a, 0x15, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, + 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, + 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, - 0x77, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, - 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x12, 0x81, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, + 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, + 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x8d, 0x01, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, + 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, + 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, + 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x2a, 0x1e, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x87, 0x01, 0x0a, 0x13, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, + 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x21, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, + 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x22, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, 0x74, + 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, + 0x1e, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, + 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, + 0x81, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x53, 0x63, + 0x61, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x20, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, + 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, 0x54, 0x61, 0x73, + 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, 0x54, + 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x1a, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, 0x54, + 0x61, 0x73, 0x6b, 0x12, 0x81, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x20, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, + 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x53, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x1a, 0x1b, 0x2f, 0x61, 0x70, 0x69, + 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x2a, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x53, - 0x63, 0x61, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x81, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x20, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, - 0x61, 0x53, 0x63, 0x61, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x21, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, - 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x2a, 0x1b, - 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, - 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x7b, 0x0a, 0x10, 0x53, - 0x68, 0x6f, 0x77, 0x44, 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x12, - 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, 0x74, 0x61, - 0x53, 0x63, 0x61, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, 0x74, 0x61, - 0x53, 0x63, 0x61, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x22, 0x1b, 0x2f, 0x61, 0x70, - 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, - 0x53, 0x63, 0x61, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x64, 0x0a, 0x0b, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x3a, 0x01, 0x2a, 0x1a, 0x13, 0x2f, 0x61, 0x70, 0x69, 0x73, - 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x42, 0x0a, - 0x5a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x63, 0x61, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x7b, 0x0a, 0x10, 0x53, 0x68, 0x6f, 0x77, 0x44, + 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, + 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x77, 0x44, 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, + 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x22, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x53, 0x63, 0x61, 0x6e, + 0x54, 0x61, 0x73, 0x6b, 0x12, 0x64, 0x0a, 0x0b, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x12, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x61, + 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x18, 0x3a, 0x01, 0x2a, 0x1a, 0x13, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x42, 0x0a, 0x5a, 0x08, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/service/csv_migrate.go b/service/csv_migrate.go index 649dc3d..c409eda 100644 --- a/service/csv_migrate.go +++ b/service/csv_migrate.go @@ -20,7 +20,7 @@ import ( "encoding/json" "fmt" "github.com/fatih/color" - "github.com/wentaojin/dbms/database/oracle/taskflow" + "github.com/wentaojin/dbms/database/taskflow" "github.com/wentaojin/dbms/proto/pb" "github.com/wentaojin/dbms/utils/etcdutil" clientv3 "go.etcd.io/etcd/client/v3" @@ -375,8 +375,6 @@ func StartCsvMigrateTask(ctx context.Context, taskName, workerAddr string) error } if strings.EqualFold(sourceDatasource.DbType, constant.DatabaseTypeOracle) { - logger.Info("csv migrate task process task", zap.String("task_name", taskInfo.TaskName), zap.String("task_mode", taskInfo.TaskMode), zap.String("task_flow", taskInfo.TaskFlow)) - taskTime := time.Now() dm := &taskflow.CsvMigrateTask{ Ctx: ctx, Task: taskInfo, @@ -388,9 +386,6 @@ func StartCsvMigrateTask(ctx context.Context, taskName, workerAddr string) error if err != nil { return err } - logger.Info("csv migrate task process task", - zap.String("task_name", taskInfo.TaskName), zap.String("task_mode", taskInfo.TaskMode), zap.String("task_flow", taskInfo.TaskFlow), - zap.String("cost", time.Now().Sub(taskTime).String())) } else { return fmt.Errorf("current csv migrate task [%s] datasource [%s] source [%s] isn't support, please contact auhtor or reselect", taskName, sourceDatasource.DatasourceName, sourceDatasource.DbType) } diff --git a/service/data_compare.go b/service/data_compare.go index c7a51cd..8c1796f 100644 --- a/service/data_compare.go +++ b/service/data_compare.go @@ -563,7 +563,7 @@ func StopDataCompareTask(ctx context.Context, taskName string) error { return nil } -func GenDataCompareTask(ctx context.Context, serverAddr, taskName, outputDir string) error { +func GenDataCompareTask(ctx context.Context, serverAddr, taskName, outputDir string, force bool) error { etcdClient, err := etcdutil.CreateClient(ctx, []string{stringutil.WithHostPort(serverAddr)}, nil) if err != nil { return err @@ -595,9 +595,11 @@ func GenDataCompareTask(ctx context.Context, serverAddr, taskName, outputDir str return err } - if !strings.EqualFold(taskInfo.TaskStatus, constant.TaskDatabaseStatusSuccess) { - return fmt.Errorf("the [%v] task [%v] is status [%v] in the worker [%v], please waiting success and retry", stringutil.StringLower(taskInfo.TaskMode), - taskInfo.TaskName, stringutil.StringLower(taskInfo.TaskStatus), taskInfo.WorkerAddr) + if !force { + if !strings.EqualFold(taskInfo.TaskStatus, constant.TaskDatabaseStatusSuccess) { + return fmt.Errorf("the [%v] task [%v] is status [%v] in the worker [%v], please waiting success and retry", stringutil.StringLower(taskInfo.TaskMode), + taskInfo.TaskName, stringutil.StringLower(taskInfo.TaskStatus), taskInfo.WorkerAddr) + } } var w database.IFileWriter diff --git a/service/data_scan.go b/service/data_scan.go index f4f6879..2dabe0c 100644 --- a/service/data_scan.go +++ b/service/data_scan.go @@ -21,8 +21,8 @@ import ( "fmt" "github.com/fatih/color" "github.com/wentaojin/dbms/database" - "github.com/wentaojin/dbms/database/oracle/taskflow" "github.com/wentaojin/dbms/database/processor" + "github.com/wentaojin/dbms/database/taskflow" "github.com/wentaojin/dbms/logger" "github.com/wentaojin/dbms/model" "github.com/wentaojin/dbms/model/common" diff --git a/service/sql_migrate.go b/service/sql_migrate.go index 9215752..7fb57cd 100644 --- a/service/sql_migrate.go +++ b/service/sql_migrate.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "github.com/fatih/color" + "github.com/wentaojin/dbms/database/taskflow" "github.com/wentaojin/dbms/proto/pb" "github.com/wentaojin/dbms/utils/etcdutil" clientv3 "go.etcd.io/etcd/client/v3" @@ -27,7 +28,6 @@ import ( "strings" "time" - "github.com/wentaojin/dbms/database/oracle/taskflow" "github.com/wentaojin/dbms/logger" "go.uber.org/zap" @@ -234,6 +234,10 @@ func ShowSqlMigrateTask(ctx context.Context, req *pb.ShowSqlMigrateTaskRequest) if err != nil { return err } + enableCheckpoint, err := strconv.ParseBool(paramMap[constant.ParamNameSqlMigrateEnableCheckpoint]) + if err != nil { + return err + } param = &pb.SqlMigrateParam{ BatchSize: batchSize, @@ -244,6 +248,7 @@ func ShowSqlMigrateTask(ctx context.Context, req *pb.ShowSqlMigrateTaskRequest) CallTimeout: callTimeout, EnableConsistentRead: enableConsistentRead, EnableSafeMode: enableSafeMode, + EnableCheckpoint: enableCheckpoint, } sqlRouteRules, err := ShowSqlMigrateRule(txnCtx, taskInfo.TaskName) @@ -606,6 +611,13 @@ func getSqlMigrateTasKParams(ctx context.Context, taskName string) (*pb.SqlMigra } taskParam.EnableSafeMode = enableSafeMode } + if strings.EqualFold(p.ParamName, constant.ParamNameSqlMigrateEnableCheckpoint) { + enableCheckpoint, err := strconv.ParseBool(p.ParamValue) + if err != nil { + return taskParam, err + } + taskParam.EnableCheckpoint = enableCheckpoint + } } return taskParam, nil } diff --git a/service/stmt_migrate.go b/service/stmt_migrate.go index 81d01fc..29d6d03 100644 --- a/service/stmt_migrate.go +++ b/service/stmt_migrate.go @@ -20,13 +20,13 @@ import ( "encoding/json" "fmt" "github.com/fatih/color" + "github.com/wentaojin/dbms/database/taskflow" "github.com/wentaojin/dbms/utils/etcdutil" clientv3 "go.etcd.io/etcd/client/v3" "strconv" "strings" "time" - "github.com/wentaojin/dbms/database/oracle/taskflow" "github.com/wentaojin/dbms/logger" "github.com/wentaojin/dbms/model" "github.com/wentaojin/dbms/model/common" diff --git a/service/struct_compare.go b/service/struct_compare.go index 02db804..edcf66a 100644 --- a/service/struct_compare.go +++ b/service/struct_compare.go @@ -21,6 +21,7 @@ import ( "fmt" "github.com/fatih/color" "github.com/wentaojin/dbms/database/processor" + "github.com/wentaojin/dbms/database/taskflow" "strconv" "strings" "time" @@ -36,8 +37,6 @@ import ( "github.com/wentaojin/dbms/database" "github.com/wentaojin/dbms/logger" - "github.com/wentaojin/dbms/database/oracle/taskflow" - "github.com/wentaojin/dbms/model/datasource" "github.com/wentaojin/dbms/model/migrate" diff --git a/service/task.go b/service/task.go index 93f4737..79f6bd7 100644 --- a/service/task.go +++ b/service/task.go @@ -381,7 +381,6 @@ type List struct { DatasourceNameS string `json:"datasourceNameS"` DatasourceNameT string `json:"datasourceNameT"` TaskStatus string `json:"taskStatus"` - TaskInit string `json:"taskInit"` WorkerAddr string `json:"workerAddr"` } @@ -429,7 +428,6 @@ func ListTask(ctx context.Context, taskName, serverAddr string) ([]List, error) DatasourceNameS: t.DatasourceNameS, DatasourceNameT: t.DatasourceNameT, TaskStatus: t.TaskStatus, - TaskInit: t.TaskInit, WorkerAddr: t.WorkerAddr, }) } @@ -452,7 +450,6 @@ func ListTask(ctx context.Context, taskName, serverAddr string) ([]List, error) DatasourceNameS: t.DatasourceNameS, DatasourceNameT: t.DatasourceNameT, TaskStatus: t.TaskStatus, - TaskInit: t.TaskInit, WorkerAddr: t.WorkerAddr, }) return lists, nil diff --git a/test/c.go b/test/c.go new file mode 100644 index 0000000..0a9a92c --- /dev/null +++ b/test/c.go @@ -0,0 +1,125 @@ +/* +Copyright © 2020 Marvin + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import ( + "context" + "fmt" + "golang.org/x/sync/errgroup" + "time" +) + +// Task represents a unit of work. +type Task struct { + ID int +} + +// Run executes the task and prints its ID. +func (t *Task) Run() { + fmt.Printf("consume: %v\n", t.ID) +} + +// TaskNum defines the number of tasks to produce. +const TaskNum int = 300000 + +// TaskChannel is the buffered channel for tasks. +var taskCh = make(chan Task, 10) + +// producer generates tasks and sends them to the task channel. +func producer(ctx context.Context, prodConc int, wo chan<- Task) error { + // Create an errgroup with concurrency limit. + g, _ := errgroup.WithContext(ctx) + g.SetLimit(prodConc) + // Start the producers. + for i := 0; i <= TaskNum; i++ { + idx := i + g.Go(func() error { + select { + case <-ctx.Done(): + fmt.Println("Producer exiting due to context cancellation.") + return ctx.Err() + default: + ts := Task{ID: idx} + wo <- ts + return nil + } + }) + } + if err := g.Wait(); err != nil { + return err + } + return nil +} + +// consumer receives tasks from the task channel and processes them. +func consumer(ctx context.Context, consConc int, ro <-chan Task) error { + // Start the consumers with concurrency limit. + cg, _ := errgroup.WithContext(ctx) + cg.SetLimit(consConc) + + // Start the consumers. + for r := range ro { + c := r + cg.Go(func() error { + select { + case <-ctx.Done(): + fmt.Println("Consumer exiting due to context cancellation.") + return ctx.Err() + default: + if c.ID != 0 { + c.Run() + } + return nil + } + }) + } + return cg.Wait() +} + +func main() { + // Create a context with a cancel function. + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + time.Sleep(100 * time.Millisecond) + // Cancel the context after some time to simulate shutdown. + cancel() + }() + // Call the Exec function with the context and concurrency limit. + g, gCtx := errgroup.WithContext(ctx) + g.Go(func() error { + defer close(taskCh) + err := producer(gCtx, 3, taskCh) + if err != nil { + return err + } + return nil + }) + + g.Go(func() error { + err := consumer(gCtx, 3, taskCh) + if err != nil { + return err + } + return nil + }) + + if err := g.Wait(); err != nil { + panic(err) + } + + fmt.Println("Main function exited.") +} diff --git a/utils/constant/migrate.go b/utils/constant/migrate.go index a11d9ec..b60ff89 100644 --- a/utils/constant/migrate.go +++ b/utils/constant/migrate.go @@ -508,6 +508,7 @@ const ( ParamNameSqlMigrateCallTimeout = "callTimeout" ParamNameSqlMigrateEnableConsistentRead = "enableConsistentRead" ParamNameSqlMigrateEnableSafeMode = "enableSafeMode" + ParamNameSqlMigrateEnableCheckpoint = "enableCheckpoint" // ParamValueSqlMigrateCaseFieldRuleOrigin case-field-name params value // - 0 represent keeping origin diff --git a/utils/constant/task.go b/utils/constant/task.go index 5aba6d8..6ffee5f 100644 --- a/utils/constant/task.go +++ b/utils/constant/task.go @@ -68,8 +68,14 @@ const ( ) const ( - TaskInitStatusFinished = "Y" - TaskInitStatusNotFinished = "N" + TaskInitStatusFinished = "Y" + TaskInitStatusNotFinished = "N" + TaskMigrateStatusFinished = "Y" + TaskMigrateStatusNotFinished = "N" + TaskScanStatusFinished = "Y" + TaskScanStatusNotFinished = "N" + TaskCompareStatusFinished = "Y" + TaskCompareStatusNotFinished = "N" ) // ServiceDatabaseSqlQueryCallTimeout represent package service database request sql query timeout, uint: seconds diff --git a/utils/stringutil/interger.go b/utils/stringutil/interger.go new file mode 100644 index 0000000..ec16bda --- /dev/null +++ b/utils/stringutil/interger.go @@ -0,0 +1,26 @@ +/* +Copyright © 2020 Marvin + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package stringutil + +import "math" + +// LogDigitWidth calculates the number of digits of a given positive integer +func LogDigitWidth(number int) int { + if number == 0 { + return 1 + } + return int(math.Log10(float64(number))) + 1 +} diff --git a/utils/structure/bucket.go b/utils/structure/bucket.go index fef349a..460d013 100644 --- a/utils/structure/bucket.go +++ b/utils/structure/bucket.go @@ -17,14 +17,16 @@ package structure import ( "fmt" + "github.com/wentaojin/dbms/logger" "github.com/wentaojin/dbms/utils/constant" "github.com/wentaojin/dbms/utils/stringutil" + "go.uber.org/zap" "sort" "strings" ) -// HighestBucket store the highest selectivity constraint or index bucket -type HighestBucket struct { +// Selectivity store the highest selectivity constraint or index bucket +type Selectivity struct { IndexName string IndexColumn []string ColumnDatatype []string @@ -33,7 +35,142 @@ type HighestBucket struct { Buckets []Bucket } -func (h *HighestBucket) String() string { +func (h *Selectivity) TransSelectivity(dbTypeS, dbCharsetS string, caseFieldRuleS string, enableCollationSetting bool) error { + // column name charset transform + var newColumns []string + for _, col := range h.IndexColumn { + var columnName string + switch stringutil.StringUpper(dbTypeS) { + case constant.DatabaseTypeOracle: + convertUtf8Raws, err := stringutil.CharsetConvert([]byte(col), constant.MigrateOracleCharsetStringConvertMapping[stringutil.StringUpper(dbCharsetS)], constant.CharsetUTF8MB4) + if err != nil { + return fmt.Errorf("the database type [%s] higest bucket column charset convert failed: %v", dbTypeS, err) + } + columnName = stringutil.BytesToString(convertUtf8Raws) + + if strings.EqualFold(caseFieldRuleS, constant.ParamValueDataCompareCaseFieldRuleLower) { + columnName = strings.ToLower(stringutil.BytesToString(convertUtf8Raws)) + } + if strings.EqualFold(caseFieldRuleS, constant.ParamValueDataCompareCaseFieldRuleUpper) { + columnName = strings.ToUpper(stringutil.BytesToString(convertUtf8Raws)) + } + + case constant.DatabaseTypeMySQL, constant.DatabaseTypeTiDB: + convertUtf8Raws, err := stringutil.CharsetConvert([]byte(col), constant.MigrateMySQLCompatibleCharsetStringConvertMapping[stringutil.StringUpper(dbCharsetS)], constant.CharsetUTF8MB4) + if err != nil { + return fmt.Errorf("the database type [%s] higest bucket column charset convert failed: %v", dbTypeS, err) + } + columnName = stringutil.BytesToString(convertUtf8Raws) + + if strings.EqualFold(caseFieldRuleS, constant.ParamValueDataCompareCaseFieldRuleLower) { + columnName = strings.ToLower(stringutil.BytesToString(convertUtf8Raws)) + } + if strings.EqualFold(caseFieldRuleS, constant.ParamValueDataCompareCaseFieldRuleUpper) { + columnName = strings.ToUpper(stringutil.BytesToString(convertUtf8Raws)) + } + default: + return fmt.Errorf("the database type [%s] is not supported, please contact author or reselect", dbTypeS) + } + + newColumns = append(newColumns, columnName) + } + + h.IndexColumn = newColumns + + for _, b := range h.Buckets { + switch stringutil.StringUpper(dbTypeS) { + case constant.DatabaseTypeOracle: + convertUtf8Raws, err := stringutil.CharsetConvert([]byte(b.LowerBound), constant.MigrateOracleCharsetStringConvertMapping[stringutil.StringUpper(dbCharsetS)], constant.CharsetUTF8MB4) + if err != nil { + return fmt.Errorf("the database type [%s] higest bucket charset convert failed: %v", dbTypeS, err) + } + b.LowerBound = stringutil.BytesToString(convertUtf8Raws) + + convertUtf8Raws, err = stringutil.CharsetConvert([]byte(b.UpperBound), constant.MigrateOracleCharsetStringConvertMapping[stringutil.StringUpper(dbCharsetS)], constant.CharsetUTF8MB4) + if err != nil { + return fmt.Errorf("the database type [%s] higest bucket charset convert failed: %v", dbTypeS, err) + } + b.UpperBound = stringutil.BytesToString(convertUtf8Raws) + case constant.DatabaseTypeMySQL, constant.DatabaseTypeTiDB: + convertUtf8Raws, err := stringutil.CharsetConvert([]byte(b.LowerBound), constant.MigrateMySQLCompatibleCharsetStringConvertMapping[stringutil.StringUpper(dbCharsetS)], constant.CharsetUTF8MB4) + if err != nil { + return fmt.Errorf("the database type [%s] higest bucket charset convert failed: %v", dbTypeS, err) + } + b.LowerBound = stringutil.BytesToString(convertUtf8Raws) + + convertUtf8Raws, err = stringutil.CharsetConvert([]byte(b.UpperBound), constant.MigrateMySQLCompatibleCharsetStringConvertMapping[stringutil.StringUpper(dbCharsetS)], constant.CharsetUTF8MB4) + if err != nil { + return fmt.Errorf("the database type [%s] higest bucket charset convert failed: %v", dbTypeS, err) + } + b.UpperBound = stringutil.BytesToString(convertUtf8Raws) + default: + return fmt.Errorf("the database type [%s] is not supported, please contact author or reselect", dbTypeS) + } + } + + // collation enable setting + for i, _ := range h.ColumnCollation { + if !enableCollationSetting { + // ignore collation setting, fill "" + h.ColumnCollation[i] = constant.DataCompareDisabledCollationSettingFillEmptyString + } + } + return nil +} + +func (h *Selectivity) TransSelectivityRule(taskFlow, dbTypeT, dbCharsetS string, columnDatatypeT []string, columnRouteRule map[string]string) (*Rule, error) { + var columnCollationDownStreams []string + + switch stringutil.StringUpper(dbTypeT) { + case constant.DatabaseTypeOracle: + for _, c := range h.ColumnCollation { + if !strings.EqualFold(c, constant.DataCompareDisabledCollationSettingFillEmptyString) { + collationTStr := constant.MigrateTableStructureDatabaseCollationMap[taskFlow][stringutil.StringUpper(c)][constant.MigrateTableStructureDatabaseCharsetMap[taskFlow][dbCharsetS]] + collationTSli := stringutil.StringSplit(collationTStr, constant.StringSeparatorSlash) + // get first collation + columnCollationDownStreams = append(columnCollationDownStreams, collationTSli[0]) + } else { + columnCollationDownStreams = append(columnCollationDownStreams, c) + } + } + case constant.DatabaseTypeMySQL, constant.DatabaseTypeTiDB: + for _, c := range h.ColumnCollation { + if !strings.EqualFold(c, constant.DataCompareDisabledCollationSettingFillEmptyString) { + collationTStr := constant.MigrateTableStructureDatabaseCollationMap[taskFlow][stringutil.StringUpper(c)][constant.MigrateTableStructureDatabaseCharsetMap[taskFlow][dbCharsetS]] + collationTSli := stringutil.StringSplit(collationTStr, constant.StringSeparatorSlash) + // get first collation + columnCollationDownStreams = append(columnCollationDownStreams, collationTSli[0]) + } else { + columnCollationDownStreams = append(columnCollationDownStreams, c) + } + } + default: + return nil, fmt.Errorf("unsupported the downstream database type: %s", dbTypeT) + } + + columnDatatypeM := make(map[string]string) + columnCollationM := make(map[string]string) + columnDatePrecisionM := make(map[string]string) + + for i, c := range h.IndexColumn { + columnDatatypeM[c] = columnDatatypeT[i] + columnCollationM[c] = columnCollationDownStreams[i] + columnDatePrecisionM[c] = h.DatetimePrecision[i] + } + + rule := &Rule{ + IndexColumnRule: columnRouteRule, + ColumnDatatypeRule: columnDatatypeM, + ColumnCollationRule: columnCollationM, + DatetimePrecisionRule: columnDatePrecisionM, + } + logger.Info("data compare task init table chunk", + zap.Any("upstream selectivity", h), + zap.Any("downstream selectivity rule", rule)) + return rule, nil +} + +func (h *Selectivity) String() string { jsonStr, _ := stringutil.MarshalJSON(h) return jsonStr } @@ -143,13 +280,13 @@ func SortDistinctCountHistogram(histogramMap map[string]Histogram, consColumns m return hists } -func FindMatchDistinctCountBucket(sortHists SortHistograms, bucketMap map[string][]Bucket, consColumns map[string]string) (*HighestBucket, error) { +func FindMatchDistinctCountBucket(sortHists SortHistograms, bucketMap map[string][]Bucket, consColumns map[string]string) (*Selectivity, error) { var ( - sortBuckets []*HighestBucket + sortBuckets []*Selectivity ) for _, hist := range sortHists { if val, ok := bucketMap[hist.Key]; ok { - sortBuckets = append(sortBuckets, &HighestBucket{ + sortBuckets = append(sortBuckets, &Selectivity{ IndexName: hist.Key, IndexColumn: stringutil.StringSplit(consColumns[hist.Key], constant.StringSeparatorComplexSymbol), ColumnDatatype: nil, @@ -164,5 +301,5 @@ func FindMatchDistinctCountBucket(sortHists SortHistograms, bucketMap map[string if len(sortBuckets) >= 1 { return sortBuckets[0], nil } - return &HighestBucket{}, fmt.Errorf("the database table index name is empty, please contact author or analyze table statistics and historgam rerunning") + return &Selectivity{}, fmt.Errorf("the database table index name is empty, please contact author or analyze table statistics and historgam rerunning") }