diff --git a/_utils/terror_gen/errors_release.txt b/_utils/terror_gen/errors_release.txt index 4462d72c7a..8167a0bb79 100644 --- a/_utils/terror_gen/errors_release.txt +++ b/_utils/terror_gen/errors_release.txt @@ -247,6 +247,7 @@ ErrLoadUnitNoTableFile,[code=34013:class=load-unit:scope=internal:level=high], " ErrLoadUnitDumpDirNotFound,[code=34014:class=load-unit:scope=internal:level=high], "Message: %s does not exist or it's not a dir" ErrLoadUnitDuplicateTableFile,[code=34015:class=load-unit:scope=internal:level=high], "Message: invalid table schema file, duplicated item - %s" ErrLoadUnitGenBAList,[code=34016:class=load-unit:scope=internal:level=high], "Message: generate block allow list, Workaround: Please check the `block-allow-list` config in task configuration file." +ErrLoadUnitNoTableFileForView,[code=34017:class=load-unit:scope=internal:level=high], "Message: invalid view sql file, cannot find table - %s" ErrSyncerUnitPanic,[code=36001:class=sync-unit:scope=internal:level=high], "Message: panic error: %v" ErrSyncUnitInvalidTableName,[code=36002:class=sync-unit:scope=internal:level=high], "Message: extract table name for DML error: %s" ErrSyncUnitTableNameQuery,[code=36003:class=sync-unit:scope=internal:level=high], "Message: table name parse error: %s" diff --git a/dumpling/dumpling.go b/dumpling/dumpling.go index 3c70ee17f0..10eaee1bea 100644 --- a/dumpling/dumpling.go +++ b/dumpling/dumpling.go @@ -218,6 +218,7 @@ func (m *Dumpling) constructArgs() (*export.Config, error) { dumpConfig.TableFilter = tableFilter dumpConfig.CompleteInsert = true // always keep column name in `INSERT INTO` statements. dumpConfig.Logger = m.logger.Logger + dumpConfig.NoViews = false if cfg.Threads > 0 { dumpConfig.Threads = cfg.Threads diff --git a/errors.toml b/errors.toml index c320c84869..904f39ab78 100644 --- a/errors.toml +++ b/errors.toml @@ -1492,6 +1492,12 @@ description = "" workaround = "Please check the `block-allow-list` config in task configuration file." tags = ["internal", "high"] +[error.DM-load-unit-34017] +message = "invalid view sql file, cannot find table - %s" +description = "" +workaround = "" +tags = ["internal", "high"] + [error.DM-sync-unit-36001] message = "panic error: %v" description = "" diff --git a/go.mod b/go.mod index 8bae49bb7b..e983811e7e 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d github.com/pingcap/br v5.0.0-rc.0.20201223100334-c344d1edf20c+incompatible // indirect github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 - github.com/pingcap/dumpling v0.0.0-20201224084933-34903c7475cf + github.com/pingcap/dumpling v0.0.0-20201230072552-5f1acb878f3e github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 github.com/pingcap/failpoint v0.0.0-20200702092429-9f69995143ce github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8 diff --git a/go.sum b/go.sum index aa225ac733..fdecc42421 100644 --- a/go.sum +++ b/go.sum @@ -811,8 +811,8 @@ github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 h1:R8gStypOBmpnHEx1q github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/dm v1.1.0-alpha.0.20200521025928-83063141c5fd/go.mod h1:I5AAhwb0JPfLZINukML5VU9rB6mCcVA/Jq5OFFtMuEk= github.com/pingcap/dumpling v0.0.0-20200423082233-887d037b5b5c/go.mod h1:VJTcnA0MLL9tzDceTDoRh3k5UnOq9Hk6wh/ATo+B8I8= -github.com/pingcap/dumpling v0.0.0-20201224084933-34903c7475cf h1:D1ujZCR0h3BS4ppd+nR870k26NGDpVCYeKrNLOHPlNU= -github.com/pingcap/dumpling v0.0.0-20201224084933-34903c7475cf/go.mod h1:qHuvF07zoRcpovYVKqbGortzOyct/e9SdWa3wGop9sE= +github.com/pingcap/dumpling v0.0.0-20201230072552-5f1acb878f3e h1:2s0tKThi9KzkQhPt002wr4GxebjYveO376LD2G+ALtE= +github.com/pingcap/dumpling v0.0.0-20201230072552-5f1acb878f3e/go.mod h1:qHuvF07zoRcpovYVKqbGortzOyct/e9SdWa3wGop9sE= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errcode v0.3.0/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errors v0.11.0 h1:DCJQB8jrHbQ1VVlMFIrbj2ApScNNotVmkSNplu2yUt4= diff --git a/loader/loader.go b/loader/loader.go index 2ee7ed402b..4c1475ab9e 100644 --- a/loader/loader.go +++ b/loader/loader.go @@ -28,6 +28,9 @@ import ( "sync/atomic" "time" + "github.com/pingcap/parser" + tmysql "github.com/pingcap/parser/mysql" + "github.com/pingcap/dm/dm/config" "github.com/pingcap/dm/dm/pb" "github.com/pingcap/dm/dm/unit" @@ -36,6 +39,7 @@ import ( "github.com/pingcap/dm/pkg/dumpling" fr "github.com/pingcap/dm/pkg/func-rollback" "github.com/pingcap/dm/pkg/log" + parserpkg "github.com/pingcap/dm/pkg/parser" "github.com/pingcap/dm/pkg/terror" "github.com/pingcap/dm/pkg/utils" @@ -384,7 +388,9 @@ type Loader struct { // db -> tables // table -> data files - db2Tables map[string]Tables2DataFiles + db2Tables map[string]Tables2DataFiles + // db -> views + db2Views map[string]map[string]struct{} tableInfos map[string]*tableInfo // for every worker goroutine, not for every data file @@ -421,6 +427,7 @@ func NewLoader(cfg *config.SubTaskConfig) *Loader { loader := &Loader{ cfg: cfg, db2Tables: make(map[string]Tables2DataFiles), + db2Views: make(map[string]map[string]struct{}), tableInfos: make(map[string]*tableInfo), workerWg: new(sync.WaitGroup), logger: log.With(zap.String("task", cfg.Name), zap.String("unit", "load")), @@ -946,6 +953,44 @@ func (l *Loader) prepareTableFiles(files map[string]struct{}) error { return nil } +func (l *Loader) prepareViewFiles(files map[string]struct{}) error { + for file := range files { + if !strings.HasSuffix(file, "-schema-view.sql") { + continue + } + + idx := strings.LastIndex(file, "-schema-view.sql") + name := file[:idx] + fields := strings.Split(name, ".") + if len(fields) != 2 { + l.logger.Warn("invalid view file", zap.String("file", file)) + continue + } + + db, table := fields[0], fields[1] + if l.skipSchemaAndTable(&filter.Table{Schema: db, Name: table}) { + l.logger.Warn("ignore view file", zap.String("view file", file)) + continue + } + // because there's a table file for view file, we skip this check + tables := l.db2Tables[db] + if _, ok := tables[table]; !ok { + return terror.ErrLoadUnitNoTableFileForView.Generate(file) + } + + views, ok := l.db2Views[db] + if !ok { + l.db2Views[db] = map[string]struct{}{} + views = l.db2Views[db] + } + views[table] = struct{}{} + + l.totalFileCount.Add(1) // for view + } + + return nil +} + func (l *Loader) prepareDataFiles(files map[string]struct{}) error { var dataFilesNumber float64 @@ -1046,6 +1091,11 @@ func (l *Loader) prepare() error { return err } + // Sql file for create view + if err := l.prepareViewFiles(files); err != nil { + return err + } + // Sql file for restore data return l.prepareDataFiles(files) } @@ -1084,6 +1134,104 @@ func (l *Loader) restoreTable(ctx context.Context, conn *DBConn, sqlFile, schema return nil } +// restoreView drops dummy table and create view +func (l *Loader) restoreView(ctx context.Context, conn *DBConn, sqlFile, schema, view string) error { + // dumpling will generate such a viewFile + // /*!40101 SET NAMES binary*/; + // DROP TABLE IF EXISTS `v2`; + // DROP VIEW IF EXISTS `v2`; + // SET @PREV_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT; + // SET @PREV_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS; + // SET @PREV_COLLATION_CONNECTION=@@COLLATION_CONNECTION; + // SET character_set_client = utf8; + // SET character_set_results = utf8; + // SET collation_connection = utf8_general_ci; + // CREATE ALGORITHM=UNDEFINED DEFINER="root"@"localhost" SQL SECURITY DEFINER VIEW "all_mode"."v2" AS select "all_mode"."t2"."id" AS "id" from "all_mode"."t2"; + // SET character_set_client = @PREV_CHARACTER_SET_CLIENT; + // SET character_set_results = @PREV_CHARACTER_SET_RESULTS; + // SET collation_connection = @PREV_COLLATION_CONNECTION; + f, err := os.Open(sqlFile) + if err != nil { + return terror.ErrLoadUnitReadSchemaFile.Delegate(err) + } + defer f.Close() + + tctx := tcontext.NewContext(ctx, l.logger) + + var sqls []string + dstSchema, dstView := fetchMatchedLiteral(tctx, l.tableRouter, schema, view) + sqls = append(sqls, fmt.Sprintf("USE `%s`;", unescapePercent(dstSchema, l.logger))) + sqlMode, err := tmysql.GetSQLMode(l.cfg.SQLMode) + if err != nil { + // should not happened + return terror.ErrGetSQLModeFromStr.Generate(l.cfg.SQLMode) + } + + data := make([]byte, 0, 1024*1024) + br := bufio.NewReader(f) + for { + line, err := br.ReadString('\n') + if err == io.EOF { + break + } + + realLine := strings.TrimSpace(line[:len(line)-1]) + if len(realLine) == 0 { + continue + } + + data = append(data, []byte(realLine)...) + if data[len(data)-1] == ';' { + query := string(data) + data = data[0:0] + if strings.HasPrefix(query, "/*") && strings.HasSuffix(query, "*/;") { + continue + } + + // handle route-rules below. we could skip SET and only check DROP/CREATE + if strings.HasPrefix(query, "DROP") { + query = renameShardingTable(query, view, dstView, false) + } else if strings.HasPrefix(query, "CREATE") { + // create view statement could be complicated because it has a select + p := parser.New() + p.SetSQLMode(sqlMode) + stmt, err := p.ParseOneStmt(query, "", "") + if err != nil { + return terror.ErrLoadUnitParseStatement.Generate(query) + } + + tableNames, err := parserpkg.FetchDDLTableNames(schema, stmt) + if err != nil { + return terror.WithScope(err, terror.ScopeInternal) + } + targetTableNames := make([]*filter.Table, 0, len(tableNames)) + for i := range tableNames { + dstSchema, dstTable := fetchMatchedLiteral(tctx, l.tableRouter, tableNames[i].Schema, tableNames[i].Name) + tableName := &filter.Table{ + Schema: dstSchema, + Name: dstTable, + } + targetTableNames = append(targetTableNames, tableName) + } + query, err = parserpkg.RenameDDLTable(stmt, targetTableNames) + if err != nil { + return terror.WithScope(err, terror.ScopeInternal) + } + } + + l.logger.Debug("view create statement", zap.String("sql", query)) + + sqls = append(sqls, query) + } + } + err = conn.executeSQL(tctx, sqls) + if err != nil { + return terror.WithScope(err, terror.ScopeDownstream) + } + + return nil +} + // restoreStruture creates schema or table func (l *Loader) restoreStructure(ctx context.Context, conn *DBConn, sqlFile string, schema string, table string) error { f, err := os.Open(sqlFile) @@ -1194,18 +1342,9 @@ func (l *Loader) restoreData(ctx context.Context) error { } dispatchMap := make(map[string]*fileJob) - - // restore db in sort - dbs := make([]string, 0, len(l.db2Tables)) - for db := range l.db2Tables { - dbs = append(dbs, db) - } - tctx := tcontext.NewContext(ctx, l.logger) - for _, db := range dbs { - tables := l.db2Tables[db] - + for db, tables := range l.db2Tables { // create db dbFile := fmt.Sprintf("%s/%s-schema-create.sql", l.cfg.Dir, db) l.logger.Info("start to create schema", zap.String("schema file", dbFile)) @@ -1274,7 +1413,19 @@ func (l *Loader) restoreData(ctx context.Context) error { } } } - l.logger.Info("finish to create tables", zap.Duration("cost time", time.Since(begin))) + + for db, views := range l.db2Views { + for view := range views { + viewFile := fmt.Sprintf("%s/%s.%s-schema-view.sql", l.cfg.Dir, db, view) + l.logger.Info("start to create view", zap.String("view file", viewFile)) + err := l.restoreView(ctx, dbConn, viewFile, db, view) + if err != nil { + return err + } + l.logger.Info("finish to create view", zap.String("view file", viewFile)) + } + } + l.logger.Info("finish to create tables and views", zap.Duration("cost time", time.Since(begin))) // a simple and naive approach to dispatch files randomly based on the feature of golang map(range by random) for _, j := range dispatchMap { diff --git a/pkg/parser/common.go b/pkg/parser/common.go index 9128fa7d0c..da70441fd4 100644 --- a/pkg/parser/common.go +++ b/pkg/parser/common.go @@ -51,14 +51,27 @@ type tableNameExtractor struct { } func (tne *tableNameExtractor) Enter(in ast.Node) (ast.Node, bool) { - if t, ok := in.(*ast.TableName); ok { - tb := &filter.Table{Schema: t.Schema.L, Name: t.Name.L} + switch n := in.(type) { + case *ast.TableName: + tb := &filter.Table{Schema: n.Schema.L, Name: n.Name.L} if tb.Schema == "" { tb.Schema = tne.curDB } tne.names = append(tne.names, tb) return in, true + case *ast.ColumnName: + tb := &filter.Table{Schema: n.Schema.L, Name: n.Table.L} + // this column has specified a table, such as + // CREATE VIEW `v1` AS SELECT `t1`.`c1` AS `c1` FROM `t1` + if tb.Name != "" { + if tb.Schema == "" { + tb.Schema = tne.curDB + } + tne.names = append(tne.names, tb) + } + return in, true } + return in, false } @@ -107,16 +120,29 @@ func (v *tableRenameVisitor) Enter(in ast.Node) (ast.Node, bool) { if v.hasErr { return in, true } - if t, ok := in.(*ast.TableName); ok { + switch n := in.(type) { + case *ast.TableName: if v.i >= len(v.targetNames) { v.hasErr = true return in, true } - t.Schema = model.NewCIStr(v.targetNames[v.i].Schema) - t.Name = model.NewCIStr(v.targetNames[v.i].Name) + n.Schema = model.NewCIStr(v.targetNames[v.i].Schema) + n.Name = model.NewCIStr(v.targetNames[v.i].Name) v.i++ return in, true + case *ast.ColumnName: + if n.Table.L != "" { + if v.i >= len(v.targetNames) { + v.hasErr = true + return in, true + } + n.Schema = model.NewCIStr(v.targetNames[v.i].Schema) + n.Table = model.NewCIStr(v.targetNames[v.i].Name) + v.i++ + } + return in, true } + return in, false } @@ -167,6 +193,31 @@ func RenameDDLTable(stmt ast.StmtNode, targetTableNames []*filter.Table) (string return bf.String(), nil } +type dbNameAppender struct { + curDB model.CIStr +} + +func (v *dbNameAppender) Enter(in ast.Node) (ast.Node, bool) { + switch n := in.(type) { + case *ast.TableName: + if n.Schema.O == "" { + n.Schema = v.curDB + } + return in, true + case *ast.ColumnName: + if n.Table.O != "" && n.Schema.O == "" { + n.Schema = v.curDB + } + return in, true + } + + return in, false +} + +func (v *dbNameAppender) Leave(in ast.Node) (ast.Node, bool) { + return in, true +} + // SplitDDL splits multiple operations in one DDL statement into multiple DDL statements // returned DDL is formatted like StringSingleQuotes, KeyWordUppercase and NameBackQuotes // if fail to restore, it would not restore the value of `stmt` (it changes it's values if `stmt` is one of DropTableStmt, RenameTableStmt, AlterTableStmt) @@ -309,6 +360,9 @@ func SplitDDL(stmt ast.StmtNode, schema string) (sqls []string, err error) { v.Table = table return sqls, nil + case *ast.CreateViewStmt: + visitor := &dbNameAppender{curDB: schemaName} + v.Accept(visitor) default: return nil, terror.ErrUnknownTypeDDL.Generate(stmt) } diff --git a/pkg/parser/common_test.go b/pkg/parser/common_test.go index de85c2a753..958f584e8d 100644 --- a/pkg/parser/common_test.go +++ b/pkg/parser/common_test.go @@ -67,6 +67,9 @@ var sqls = []string{ "alter table `t1` partition by list (a) (partition x default)", "alter table `t1` partition by system_time (partition x history, partition y current)", "alter database `test` charset utf8mb4", + "create view `v1` as select * from `t1`", + "create view `v1` as select `t1`.`c1` AS `c1` from `t1`", + "drop view `v1`", "alter table `t1` add column (c1 int, c2 int)", } @@ -172,6 +175,9 @@ func (t *testParserSuite) TestResolveDDL(c *C) { {"ALTER TABLE `test`.`t1` PARTITION BY LIST (`a`) (PARTITION `x` DEFAULT)"}, {"ALTER TABLE `test`.`t1` PARTITION BY SYSTEM_TIME (PARTITION `x` HISTORY,PARTITION `y` CURRENT)"}, {"ALTER DATABASE `test` CHARACTER SET = utf8mb4"}, + {"CREATE ALGORITHM = UNDEFINED DEFINER = CURRENT_USER SQL SECURITY DEFINER VIEW `test`.`v1` AS SELECT * FROM `test`.`t1`"}, + {"CREATE ALGORITHM = UNDEFINED DEFINER = CURRENT_USER SQL SECURITY DEFINER VIEW `test`.`v1` AS SELECT `test`.`t1`.`c1` AS `c1` FROM `test`.`t1`"}, + {"DROP VIEW IF EXISTS `test`.`v1`"}, {"ALTER TABLE `test`.`t1` ADD COLUMN `c1` INT", "ALTER TABLE `test`.`t1` ADD COLUMN `c2` INT"}, } @@ -217,6 +223,9 @@ func (t *testParserSuite) TestResolveDDL(c *C) { {{genTableName("test", "t1")}}, {{genTableName("test", "t1")}}, {{genTableName("test", "")}}, + {{genTableName("test", "v1"), genTableName("test", "t1")}}, + {{genTableName("test", "v1"), genTableName("test", "t1"), genTableName("test", "t1")}}, + {{genTableName("test", "v1")}}, {{genTableName("test", "t1")}, {genTableName("test", "t1")}}, } @@ -262,6 +271,9 @@ func (t *testParserSuite) TestResolveDDL(c *C) { {{genTableName("xtest", "xt1")}}, {{genTableName("xtest", "xt1")}}, {{genTableName("xtest", "")}}, + {{genTableName("xtest", "v1"), genTableName("xtest", "t1")}}, + {{genTableName("xtest", "v1"), genTableName("xtest", "t1"), genTableName("xtest", "t1")}}, + {{genTableName("xtest", "v1")}}, {{genTableName("xtest", "t1")}, {genTableName("xtest", "t1")}}, } @@ -307,6 +319,9 @@ func (t *testParserSuite) TestResolveDDL(c *C) { {"ALTER TABLE `xtest`.`xt1` PARTITION BY LIST (`a`) (PARTITION `x` DEFAULT)"}, {"ALTER TABLE `xtest`.`xt1` PARTITION BY SYSTEM_TIME (PARTITION `x` HISTORY,PARTITION `y` CURRENT)"}, {"ALTER DATABASE `xtest` CHARACTER SET = utf8mb4"}, + {"CREATE ALGORITHM = UNDEFINED DEFINER = CURRENT_USER SQL SECURITY DEFINER VIEW `xtest`.`v1` AS SELECT * FROM `xtest`.`t1`"}, + {"CREATE ALGORITHM = UNDEFINED DEFINER = CURRENT_USER SQL SECURITY DEFINER VIEW `xtest`.`v1` AS SELECT `xtest`.`t1`.`c1` AS `c1` FROM `xtest`.`t1`"}, + {"DROP VIEW IF EXISTS `xtest`.`v1`"}, {"ALTER TABLE `xtest`.`t1` ADD COLUMN `c1` INT", "ALTER TABLE `xtest`.`t1` ADD COLUMN `c2` INT"}, } diff --git a/pkg/terror/error_list.go b/pkg/terror/error_list.go index 27a71f7753..ae0649f1a9 100644 --- a/pkg/terror/error_list.go +++ b/pkg/terror/error_list.go @@ -334,6 +334,7 @@ const ( codeLoadUnitDumpDirNotFound codeLoadUnitDuplicateTableFile codeLoadUnitGenBAList + codeLoadUnitNoTableFileForView ) // Sync unit error code @@ -914,6 +915,7 @@ var ( ErrLoadUnitDumpDirNotFound = New(codeLoadUnitDumpDirNotFound, ClassLoadUnit, ScopeInternal, LevelHigh, "%s does not exist or it's not a dir", "") ErrLoadUnitDuplicateTableFile = New(codeLoadUnitDuplicateTableFile, ClassLoadUnit, ScopeInternal, LevelHigh, "invalid table schema file, duplicated item - %s", "") ErrLoadUnitGenBAList = New(codeLoadUnitGenBAList, ClassLoadUnit, ScopeInternal, LevelHigh, "generate block allow list", "Please check the `block-allow-list` config in task configuration file.") + ErrLoadUnitNoTableFileForView = New(codeLoadUnitNoTableFileForView, ClassLoadUnit, ScopeInternal, LevelHigh, "invalid view sql file, cannot find table - %s", "") // Sync unit error ErrSyncerUnitPanic = New(codeSyncerUnitPanic, ClassSyncUnit, ScopeInternal, LevelHigh, "panic error: %v", "") diff --git a/pkg/utils/util.go b/pkg/utils/util.go index 57ec74944c..41fc96c947 100644 --- a/pkg/utils/util.go +++ b/pkg/utils/util.go @@ -63,8 +63,6 @@ var ( "^ALTER\\s+PROCEDURE", // view - "^CREATE\\s*(OR REPLACE)?\\s+(ALGORITHM\\s?=.+?)?(DEFINER\\s?=.+?)?\\s+(SQL SECURITY DEFINER)?VIEW", - "^DROP\\s+VIEW", "^ALTER\\s+(ALGORITHM\\s?=.+?)?(DEFINER\\s?=.+?)?(SQL SECURITY DEFINER)?VIEW", // function diff --git a/syncer/filter.go b/syncer/filter.go index 55d6683fdc..97c5bb3ac4 100644 --- a/syncer/filter.go +++ b/syncer/filter.go @@ -14,10 +14,13 @@ package syncer import ( + "fmt" + "github.com/pingcap/parser/ast" bf "github.com/pingcap/tidb-tools/pkg/binlog-filter" "github.com/pingcap/tidb-tools/pkg/filter" "github.com/siddontang/go-mysql/replication" + "go.uber.org/zap" "github.com/pingcap/dm/pkg/terror" "github.com/pingcap/dm/pkg/utils" @@ -37,6 +40,9 @@ func (s *Syncer) skipQuery(tables []*filter.Table, stmt ast.StmtNode, sql string if len(tables) > 0 { tbs := s.baList.ApplyOn(tables) if len(tbs) != len(tables) { + s.tctx.L().Info("not all tables passed block-allow list", + zap.String("before", fmt.Sprintf("%v", tables)), + zap.String("after", fmt.Sprintf("%v", tbs))) return true, nil } } diff --git a/syncer/filter_test.go b/syncer/filter_test.go index 5dce9f42cc..31e714aae2 100644 --- a/syncer/filter_test.go +++ b/syncer/filter_test.go @@ -63,10 +63,10 @@ BEGIN END`, true}, // view - {"CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`%` SQL SECURITY DEFINER VIEW `v` AS SELECT qty, price, qty*price AS value FROM t", true}, - {"CREATE OR REPLACE ALGORITHM=UNDEFINED DEFINER=`root`@`%` SQL SECURITY DEFINER VIEW `v` AS SELECT qty, price, qty*price AS value FROM t", true}, + {"CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`%` SQL SECURITY DEFINER VIEW `v` AS SELECT qty, price, qty*price AS value FROM t", false}, + {"CREATE OR REPLACE ALGORITHM=UNDEFINED DEFINER=`root`@`%` SQL SECURITY DEFINER VIEW `v` AS SELECT qty, price, qty*price AS value FROM t", false}, {"ALTER ALGORITHM=UNDEFINED DEFINER=`root`@`%` SQL SECURITY DEFINER VIEW `v` AS SELECT qty, price, qty*price AS value FROM t", true}, - {"DROP VIEW v", true}, + {"DROP VIEW v", false}, {"CREATE TABLE `VIEW`(id int)", false}, {"ALTER TABLE `VIEW`(id int)", false}, diff --git a/syncer/syncer.go b/syncer/syncer.go index 8a330fc3b7..8157ef7eb6 100644 --- a/syncer/syncer.go +++ b/syncer/syncer.go @@ -1798,7 +1798,7 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e // pre-filter of sharding if s.cfg.ShardMode == config.ShardPessimistic { - switch stmt.(type) { + switch n := stmt.(type) { case *ast.DropDatabaseStmt: err = s.dropSchemaInSharding(ec.tctx, tableNames[0][0].Schema) if err != nil { @@ -1806,6 +1806,10 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e } continue case *ast.DropTableStmt: + if n.IsView { + // `break` to avoid below `continue`, so this DROP VIEW sql could be added to `needHandleDDLs` + break + } sourceID, _ := GenTableID(tableNames[0][0].Schema, tableNames[0][0].Name) err = s.sgk.LeaveGroup(tableNames[1][0].Schema, tableNames[1][0].Name, []string{sourceID}) if err != nil { @@ -1868,7 +1872,18 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e return err } - if s.cfg.ShardMode == "" { + // VIEW statements are not needed sharding synchronized, so pretend this is no-shard-mode + skipShardHandle := false + switch n := parseResult.stmt.(type) { + case *ast.CreateViewStmt: + skipShardHandle = true + case *ast.DropTableStmt: + if n.IsView { + skipShardHandle = true + } + } + + if s.cfg.ShardMode == "" || skipShardHandle { ec.tctx.L().Info("start to handle ddls in normal mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), log.WrapStringerField("location", ec.currentLocation)) // interrupted after flush old checkpoint and before track DDL. @@ -1942,8 +1957,11 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e source, _ = GenTableID(ddlInfo.tableNames[0][0].Schema, ddlInfo.tableNames[0][0].Name) - var annotate string - switch ddlInfo.stmt.(type) { + var ( + annotate string + trySync bool // below `switch` may let some statements skip sharding sync + ) + switch n := ddlInfo.stmt.(type) { case *ast.CreateDatabaseStmt: // for CREATE DATABASE, we do nothing. when CREATE TABLE under this DATABASE, sharding groups will be added case *ast.CreateTableStmt: @@ -1953,7 +1971,18 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e return err } annotate = "add table to shard group" + case *ast.CreateViewStmt: + // for CREATE VIEW, we directly execute it in downstream to avoid sharding sync cost. + case *ast.DropTableStmt: + // for DROP VIEW, we directly execute it in downstream to avoid sharding sync cost. + if !n.IsView { + trySync = true + } default: + trySync = true + } + + if trySync { needShardingHandle, group, synced, active, remain, err = s.sgk.TrySync(ddlInfo.tableNames[1][0].Schema, ddlInfo.tableNames[1][0].Name, source, *startLocation, *ec.currentLocation, needHandleDDLs) if err != nil { return err diff --git a/syncer/syncer_test.go b/syncer/syncer_test.go index 017e96a663..3fb4de634a 100644 --- a/syncer/syncer_test.go +++ b/syncer/syncer_test.go @@ -1567,6 +1567,15 @@ func (s *testSyncerSuite) TestTrackDDL(c *C) { sqlmock.NewRows([]string{"Table", "Create Table"}). AddRow(testTbl, " CREATE TABLE `"+testTbl+"` (\n `c` int(11) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) }}, + // test VIEW + {"CREATE VIEW tmp AS SELECT * FROM " + testTbl, func() { + mock.ExpectQuery("SHOW VARIABLES LIKE 'sql_mode'").WillReturnRows( + sqlmock.NewRows([]string{"Variable_name", "Value"}).AddRow("sql_mode", "")) + mock.ExpectQuery("SHOW CREATE TABLE.*").WillReturnRows( + sqlmock.NewRows([]string{"Table", "Create Table"}). + AddRow(testTbl, " CREATE TABLE `"+testTbl+"` (\n `c` int(11) DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) + }}, + {"DROP VIEW IF EXISTS tmp", func() {}}, } p := parser.New() diff --git a/tests/_utils/test_prepare b/tests/_utils/test_prepare index 579a04d9ec..01098679ea 100644 --- a/tests/_utils/test_prepare +++ b/tests/_utils/test_prepare @@ -8,6 +8,7 @@ function cleanup_data() { run_sql "drop database if exists \`${target_db}\`" $TIDB_PORT $TIDB_PASSWORD done run_sql "drop database if exists dm_meta" $TIDB_PORT $TIDB_PASSWORD + run_sql "drop database if exists sync_diff_inspector" $TIDB_PORT $TIDB_PASSWORD } function cleanup_process() { diff --git a/tests/all_mode/data/db1.increment.sql b/tests/all_mode/data/db1.increment.sql index a172eb5aa9..bc2d51592c 100644 --- a/tests/all_mode/data/db1.increment.sql +++ b/tests/all_mode/data/db1.increment.sql @@ -1,4 +1,5 @@ use all_mode; +create view v1 as select id from t1; insert into t1 (id, name) values (3, 'Eddard Stark'); update t1 set name = 'Arya Stark' where id = 1; update t1 set name = 'Catelyn Stark' where name = 'catelyn'; diff --git a/tests/all_mode/data/db2.prepare.sql b/tests/all_mode/data/db2.prepare.sql index c7688c5a7c..202c588548 100644 --- a/tests/all_mode/data/db2.prepare.sql +++ b/tests/all_mode/data/db2.prepare.sql @@ -3,6 +3,7 @@ create database `all_mode`; use `all_mode`; create table t2 (id int auto_increment, name varchar(20), primary key (`id`)); insert into t2 (name) values ('Arya'), ('Bran'), ('Sansa'); +create view v2 as select id from t2; -- test block-allow-list drop database if exists `ignore_db`; diff --git a/tests/all_mode/run.sh b/tests/all_mode/run.sh index 9b1fab4086..87916befa4 100755 --- a/tests/all_mode/run.sh +++ b/tests/all_mode/run.sh @@ -141,6 +141,8 @@ function run() { # use sync_diff_inspector to check full dump loader check_sync_diff $WORK_DIR $cur/conf/diff_config.toml + run_sql_tidb "show create view all_mode.v2" + check_contains "View: v2" # check default session config check_log_contain_with_retry '\\"tidb_txn_mode\\":\\"optimistic\\"' $WORK_DIR/worker1/log/dm-worker.log @@ -197,10 +199,18 @@ function run() { # check_metric $WORKER2_PORT 'dm_syncer_replication_lag{task="test"}' 3 0 2 # test block-allow-list by the way - run_sql "show databases;" $TIDB_PORT $TIDB_PASSWORD + run_sql_tidb "show databases;" check_not_contains "ignore_db" check_contains "all_mode" + run_sql "select count(*) from all_mode.v1;" $TIDB_PORT $TIDB_PASSWORD + check_contains "count(*): 11" + + run_sql_source1 "drop view all_mode.v1;" + sleep 1 + # doesn't exist + run_sql_tidb "select count(*) from all_mode.v1;" $TIDB_PORT $TIDB_PASSWORD && exit 1 || echo "view dropped" + echo "check dump files have been cleaned" ls $WORK_DIR/worker1/dumped_data.$ILLEGAL_CHAR_NAME && exit 1 || echo "worker1 auto removed dump files" ls $WORK_DIR/worker2/dumped_data.$ILLEGAL_CHAR_NAME && exit 1 || echo "worker2 auto removed dump files" diff --git a/tests/full_mode/run.sh b/tests/full_mode/run.sh index 1f5bcd12bc..36f7223c3a 100755 --- a/tests/full_mode/run.sh +++ b/tests/full_mode/run.sh @@ -64,7 +64,7 @@ function escape_schema() { cp $cur/data/db2.prepare.sql $WORK_DIR/db2.prepare.sql cp $cur/conf/dm-task.yaml $WORK_DIR/dm-task.yaml cp $cur/conf/diff_config.toml $WORK_DIR/diff_config.toml - sed -i "s/full_mode/full\/mode/g" $WORK_DIR/db1.prepare.sql $WORK_DIR/db2.prepare.sql $WORK_DIR/dm-task.yaml $WORK_DIR/diff_config.toml + sed -i "s/full_mode/full\/mo-de/g" $WORK_DIR/db1.prepare.sql $WORK_DIR/db2.prepare.sql $WORK_DIR/dm-task.yaml $WORK_DIR/diff_config.toml run_sql_file $WORK_DIR/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 check_contains 'Query OK, 2 rows affected' @@ -72,9 +72,9 @@ function escape_schema() { check_contains 'Query OK, 3 rows affected' # test load data with `/` in the table name - run_sql_source1 "create table \`full/mode\`.\`tb\/1\` (id int, name varchar(10), primary key(\`id\`));" - run_sql_source1 "insert into \`full/mode\`.\`tb\/1\` values(1,'haha');" - run_sql_source1 "insert into \`full/mode\`.\`tb\/1\` values(2,'hihi');" + run_sql_source1 "create table \`full/mo-de\`.\`tb\/1\` (id int, name varchar(10), primary key(\`id\`));" + run_sql_source1 "insert into \`full/mo-de\`.\`tb\/1\` values(1,'haha');" + run_sql_source1 "insert into \`full/mo-de\`.\`tb\/1\` values(2,'hihi');" run_sql_file $cur/data/db1.prepare.user.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 check_count 'Query OK, 0 rows affected' 7 @@ -103,7 +103,7 @@ function escape_schema() { ls $WORK_DIR/worker1/dumped_data.test && exit 1 || echo "worker1 auto removed dump files" ls $WORK_DIR/worker2/dumped_data.test && exit 1 || echo "worker2 auto removed dump files" - cleanup_data full/mode + cleanup_data full/mo-de cleanup_process $* } diff --git a/tests/shardddl3/conf/double-source-optimistic-view.yaml b/tests/shardddl3/conf/double-source-optimistic-view.yaml new file mode 100644 index 0000000000..2b7ef1aa83 --- /dev/null +++ b/tests/shardddl3/conf/double-source-optimistic-view.yaml @@ -0,0 +1,63 @@ +--- +name: test +task-mode: all +is-sharding: true +shard-mode: "optimistic" +meta-schema: "dm_meta" +timezone: "Asia/Shanghai" + +target-database: + host: "127.0.0.1" + port: 4000 + user: "test" + password: "/Q7B9DizNLLTTfiZHv9WoEAKamfpIUs=" + +mysql-instances: + - source-id: "mysql-replica-01" + block-allow-list: "instance" + route-rules: ["sharding-table-rules","sharding-schema-rules","sharding-view-rules"] + mydumper-config-name: "global" + loader-config-name: "global" + syncer-config-name: "global" + - source-id: "mysql-replica-02" + block-allow-list: "instance" + route-rules: ["sharding-table-rules","sharding-schema-rules","sharding-view-rules"] + mydumper-config-name: "global" + loader-config-name: "global" + syncer-config-name: "global" + +block-allow-list: + instance: + do-dbs: ["shardddl1","shardddl2"] + +routes: + sharding-table-rules: + schema-pattern: "shardddl*" + target-schema: "shardddl" + table-pattern: "tb*" + target-table: "tb" + sharding-view-rules: + schema-pattern: "shardddl*" + target-schema: "shardddl" + table-pattern: "v*" + target-table: "v" + sharding-schema-rules: + schema-pattern: "shardddl*" + target-schema: "shardddl" + +mydumpers: + global: + threads: 4 + chunk-filesize: 64 + skip-tz-utc: true + extra-args: "" + +loaders: + global: + pool-size: 16 + dir: "./dumped_data" + +syncers: + global: + worker-count: 16 + batch: 100 diff --git a/tests/shardddl3/conf/double-source-pessimistic-view.yaml b/tests/shardddl3/conf/double-source-pessimistic-view.yaml new file mode 100644 index 0000000000..c5e9b217c3 --- /dev/null +++ b/tests/shardddl3/conf/double-source-pessimistic-view.yaml @@ -0,0 +1,63 @@ +--- +name: test +task-mode: all +is-sharding: true +shard-mode: "pessimistic" +meta-schema: "dm_meta" +timezone: "Asia/Shanghai" + +target-database: + host: "127.0.0.1" + port: 4000 + user: "test" + password: "/Q7B9DizNLLTTfiZHv9WoEAKamfpIUs=" + +mysql-instances: + - source-id: "mysql-replica-01" + block-allow-list: "instance" + route-rules: ["sharding-table-rules","sharding-schema-rules","sharding-view-rules"] + mydumper-config-name: "global" + loader-config-name: "global" + syncer-config-name: "global" + - source-id: "mysql-replica-02" + block-allow-list: "instance" + route-rules: ["sharding-table-rules","sharding-schema-rules","sharding-view-rules"] + mydumper-config-name: "global" + loader-config-name: "global" + syncer-config-name: "global" + +block-allow-list: + instance: + do-dbs: ["shardddl1","shardddl2"] + +routes: + sharding-table-rules: + schema-pattern: "shardddl*" + target-schema: "shardddl" + table-pattern: "tb*" + target-table: "tb" + sharding-view-rules: + schema-pattern: "shardddl*" + target-schema: "shardddl" + table-pattern: "v*" + target-table: "v" + sharding-schema-rules: + schema-pattern: "shardddl*" + target-schema: "shardddl" + +mydumpers: + global: + threads: 4 + chunk-filesize: 64 + skip-tz-utc: true + extra-args: "" + +loaders: + global: + pool-size: 16 + dir: "./dumped_data" + +syncers: + global: + worker-count: 16 + batch: 100 diff --git a/tests/shardddl3/run.sh b/tests/shardddl3/run.sh index 1962f2f89b..28adfad76c 100644 --- a/tests/shardddl3/run.sh +++ b/tests/shardddl3/run.sh @@ -801,9 +801,135 @@ function DM_RestartMaster() { "clean_table" "optimistic" } +function DM_SyncView_CASE() { + # test sharding lock mixed VIEW + # 1/3 shard DDL + run_sql_source1 "alter table ${shardddl1}.${tb1} add column c int;" + + run_sql_source1 "create view ${shardddl1}.v1 as select * from ${shardddl1}.${tb1};" + sleep 1 + run_sql_tidb "show create view ${shardddl}.v" + check_contains "View: v" + if [[ "$1" = "pessimistic" ]]; then + run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "query-status test" \ + 'ALTER TABLE `shardddl`.`tb` ADD COLUMN `c` INT' 1 + run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "show-ddl-locks" \ + "\"result\": true" 1 \ + "no DDL lock exists" 1 + else + run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "show-ddl-locks" \ + "\"ID\": \"test-\`shardddl\`.\`tb\`\"" 1 + fi + + # 2/3 shard DDL + run_sql_source1 "alter table ${shardddl1}.${tb2} add column c int;" + + run_sql_source1 "drop view ${shardddl1}.v1;" + if [[ "$1" = "pessimistic" ]]; then + run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "query-status test" \ + 'ALTER TABLE `shardddl`.`tb` ADD COLUMN `c` INT' 1 + run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "show-ddl-locks" \ + "\"result\": true" 1 \ + 'ALTER TABLE `shardddl`.`tb` ADD COLUMN `c` INT' 1 + # DDL lock will stop sync of VIEW + else + run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "show-ddl-locks" \ + "\"ID\": \"test-\`shardddl\`.\`tb\`\"" 1 + sleep 5 + run_sql_tidb "show create view ${shardddl}.v" && exit 1 || true + fi + + # 3/3 shard DDL + run_sql_source2 "alter table ${shardddl1}.${tb1} add column c int;" + if [[ "$1" = "pessimistic" ]]; then + sleep 1 + run_sql_tidb "show create view ${shardddl}.v" && exit 1 || true + fi + run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "show-ddl-locks" \ + "\"result\": true" 1 \ + "no DDL lock exists" 1 + + # -------------------------------------------------- + # sync of view didn't need shard DDL synchronization + run_sql_source2 "create view ${shardddl1}.v1 as select * from ${shardddl1}.${tb1};" + sleep 1 + run_sql_tidb "show create view ${shardddl}.v" + check_contains "View: v" + run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "show-ddl-locks" \ + "\"result\": true" 1 \ + "no DDL lock exists" 1 + + # test reference across database + run_sql_source1 "create view ${shardddl2}.v2 as select * from ${shardddl1}.${tb1};" + sleep 1 + run_sql_tidb "show create view ${shardddl}.v" + check_contains "View: v" + run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "show-ddl-locks" \ + "\"result\": true" 1 \ + "no DDL lock exists" 1 + run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "query-status test" \ + "\"result\": true" 3 + + # test drop view + run_sql_source2 "drop view ${shardddl1}.v1;" + sleep 1 + # drop so "not exist" error + run_sql_tidb "show create view ${shardddl}.v" && exit 1 || true + run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "show-ddl-locks" \ + "\"result\": true" 1 \ + "no DDL lock exists" 1 + run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "query-status test" \ + "\"result\": true" 3 + + run_sql_source1 "drop view ${shardddl2}.v2;" + sleep 1 + run_sql_tidb "show create view ${shardddl}.v" && exit 1 || true + run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "show-ddl-locks" \ + "\"result\": true" 1 \ + "no DDL lock exists" 1 + run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "query-status test" \ + "\"result\": true" 3 + + # test not in sync group + run_sql_source1 "create view ${shardddl2}.notsyncview as select * from ${shardddl1}.${tb1};" + sleep 1 + run_sql_tidb "show create view ${shardddl}.notsyncview" + check_contains "View: notsyncview" + run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "query-status test" \ + "\"result\": true" 3 + + run_sql_source1 "drop view ${shardddl2}.notsyncview;" + sleep 1 + run_sql_tidb "show create view ${shardddl}.notsyncview" && exit 1 || true + run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ + "query-status test" \ + "\"result\": true" 3 +} + +function DM_SyncView() { + run_case SyncView "double-source-pessimistic-view" "init_table 111 112 211" "clean_table" "pessimistic" + run_case SyncView "double-source-optimistic-view" "init_table 111 112 211" "clean_table" "optimistic" +} + function run() { init_cluster init_database + start=71 end=103 except=(072 074 075 083 084 087 088 089 090 091 092 093) @@ -816,8 +942,8 @@ function run() { done DM_RemoveLock - DM_RestartMaster + DM_SyncView } cleanup_data $shardddl diff --git a/tests/sharding/conf/dm-task.yaml b/tests/sharding/conf/dm-task.yaml index 7e72a97cf3..2a3d81eb6d 100644 --- a/tests/sharding/conf/dm-task.yaml +++ b/tests/sharding/conf/dm-task.yaml @@ -15,7 +15,7 @@ target-database: mysql-instances: - source-id: "mysql-replica-01" block-allow-list: "instance" - route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] + route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema", "shardview-route-table", "shardview-route-view", "shardview-route-schema"] column-mapping-rules: ["instance-1"] mydumper-config-name: "global" loader-config-name: "global" @@ -31,10 +31,16 @@ mysql-instances: block-allow-list: instance: - do-dbs: ["~^sharding[\\d]+"] + do-dbs: ["~^sharding[\\d]+", "~^shardview[\\d]+"] do-tables: - db-name: "~^sharding[\\d]+" tbl-name: "~^t[\\d]+" + - db-name: "~^sharding[\\d]+" + tbl-name: "~^v[\\d]+" + - db-name: "~^shardview[\\d]+" + tbl-name: "~^t[\\d]+" + - db-name: "~^shardview[\\d]+" + tbl-name: "~^v[\\d]+" routes: sharding-route-rules-table: @@ -47,6 +53,22 @@ routes: schema-pattern: sharding* target-schema: db_target + shardview-route-table: + schema-pattern: shardview* + table-pattern: t* + target-schema: dbview_target + target-table: t_target + + shardview-route-view: + schema-pattern: shardview* + table-pattern: v* + target-schema: dbview_target + target-table: v_target + + shardview-route-schema: + schema-pattern: shardview* + target-schema: dbview_target + column-mappings: instance-1: schema-pattern: "sharding*" diff --git a/tests/sharding/data/db1.prepare.sql b/tests/sharding/data/db1.prepare.sql index 91f4203f0d..7f79888a4b 100644 --- a/tests/sharding/data/db1.prepare.sql +++ b/tests/sharding/data/db1.prepare.sql @@ -6,3 +6,14 @@ create table t1 (id bigint auto_increment, uid int, name varchar(80), info varch create table t2 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4; insert into t1 (uid, name) values (10001, 'Gabriel García Márquez'), (10002, 'Cien años de soledad'); insert into t2 (uid, name) values (20001, 'José Arcadio Buendía'), (20002, 'Úrsula Iguarán'), (20003, 'José Arcadio'); +create view v1 as select id from t1; + +drop database if exists `shardview1`; +drop database if exists `shardview2`; +create database `shardview1`; +create database `shardview2`; +create table shardview1.t1 (id int primary key); +create table shardview2.t1 (id int primary key); +create view shardview1.v1 as select id from shardview2.t1; +create view shardview2.v1 as select id from shardview1.t1; +-- after sharding route merge, there should be a view reference merged table diff --git a/tests/sharding/run.sh b/tests/sharding/run.sh index f798115e08..62a3dc77a7 100755 --- a/tests/sharding/run.sh +++ b/tests/sharding/run.sh @@ -17,8 +17,8 @@ EOF } function run() { - run_sql "SET @@GLOBAL.SQL_MODE='NO_ZERO_IN_DATE,NO_ZERO_DATE'" $MYSQL_PORT1 $MYSQL_PASSWORD1 - run_sql "SET @@GLOBAL.SQL_MODE='ANSI_QUOTES'" $MYSQL_PORT2 $MYSQL_PASSWORD2 + run_sql_source1 "SET @@GLOBAL.SQL_MODE='NO_ZERO_IN_DATE,NO_ZERO_DATE,ANSI_QUOTES'" + run_sql_source2 "SET @@GLOBAL.SQL_MODE=''" run_sql_file $cur/data/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 check_contains 'Query OK, 2 rows affected' @@ -59,9 +59,14 @@ function run() { # TODO: check sharding partition id # use sync_diff_inspector to check full dump loader echo "check sync diff for full dump and load" - run_sql "SET @@GLOBAL.SQL_MODE=''" $MYSQL_PORT2 $MYSQL_PASSWORD2 + run_sql_source1 "SET @@GLOBAL.SQL_MODE='NO_ZERO_IN_DATE,NO_ZERO_DATE'" check_sync_diff $WORK_DIR $cur/conf/diff_config.toml + run_sql_tidb "show create view db_target.v1" + check_contains "View: v1" + run_sql_tidb "show create view dbview_target.v_target" + check_contains "View: v_target" + run_sql_file $cur/data/db1.increment.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 run_sql_file $cur/data/db2.increment.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2