Skip to content
This repository was archived by the owner on Nov 24, 2023. It is now read-only.

Commit a9fdc88

Browse files
committed
VIEW didn't need sync group
1 parent 1fa1a7e commit a9fdc88

File tree

7 files changed

+234
-8
lines changed

7 files changed

+234
-8
lines changed

go.mod

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ require (
1818
github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d
1919
github.com/pingcap/br v5.0.0-rc.0.20201223100334-c344d1edf20c+incompatible // indirect
2020
github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712
21-
github.com/pingcap/dumpling v0.0.0-20201224084933-34903c7475cf
21+
github.com/pingcap/dumpling v0.0.0-20201230072552-5f1acb878f3e
2222
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3
2323
github.com/pingcap/failpoint v0.0.0-20200702092429-9f69995143ce
2424
github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8

go.sum

+2-2
Original file line numberDiff line numberDiff line change
@@ -811,8 +811,8 @@ github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 h1:R8gStypOBmpnHEx1q
811811
github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc=
812812
github.com/pingcap/dm v1.1.0-alpha.0.20200521025928-83063141c5fd/go.mod h1:I5AAhwb0JPfLZINukML5VU9rB6mCcVA/Jq5OFFtMuEk=
813813
github.com/pingcap/dumpling v0.0.0-20200423082233-887d037b5b5c/go.mod h1:VJTcnA0MLL9tzDceTDoRh3k5UnOq9Hk6wh/ATo+B8I8=
814-
github.com/pingcap/dumpling v0.0.0-20201224084933-34903c7475cf h1:D1ujZCR0h3BS4ppd+nR870k26NGDpVCYeKrNLOHPlNU=
815-
github.com/pingcap/dumpling v0.0.0-20201224084933-34903c7475cf/go.mod h1:qHuvF07zoRcpovYVKqbGortzOyct/e9SdWa3wGop9sE=
814+
github.com/pingcap/dumpling v0.0.0-20201230072552-5f1acb878f3e h1:2s0tKThi9KzkQhPt002wr4GxebjYveO376LD2G+ALtE=
815+
github.com/pingcap/dumpling v0.0.0-20201230072552-5f1acb878f3e/go.mod h1:qHuvF07zoRcpovYVKqbGortzOyct/e9SdWa3wGop9sE=
816816
github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM=
817817
github.com/pingcap/errcode v0.3.0/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM=
818818
github.com/pingcap/errors v0.11.0 h1:DCJQB8jrHbQ1VVlMFIrbj2ApScNNotVmkSNplu2yUt4=

loader/loader.go

+1
Original file line numberDiff line numberDiff line change
@@ -1188,6 +1188,7 @@ func (l *Loader) restoreView(ctx context.Context, conn *DBConn, sqlFile, schema,
11881188
continue
11891189
}
11901190

1191+
// handle route-rules below. we could skip SET and only check DROP/CREATE
11911192
if strings.HasPrefix(query, "DROP") {
11921193
query = renameShardingTable(query, view, dstView, false)
11931194
} else if strings.HasPrefix(query, "CREATE") {

syncer/syncer.go

+33-4
Original file line numberDiff line numberDiff line change
@@ -1798,14 +1798,18 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e
17981798

17991799
// pre-filter of sharding
18001800
if s.cfg.ShardMode == config.ShardPessimistic {
1801-
switch stmt.(type) {
1801+
switch n := stmt.(type) {
18021802
case *ast.DropDatabaseStmt:
18031803
err = s.dropSchemaInSharding(ec.tctx, tableNames[0][0].Schema)
18041804
if err != nil {
18051805
return err
18061806
}
18071807
continue
18081808
case *ast.DropTableStmt:
1809+
if n.IsView {
1810+
// `break` to avoid below `continue`, so this DROP VIEW sql could be added to `needHandleDDLs`
1811+
break
1812+
}
18091813
sourceID, _ := GenTableID(tableNames[0][0].Schema, tableNames[0][0].Name)
18101814
err = s.sgk.LeaveGroup(tableNames[1][0].Schema, tableNames[1][0].Name, []string{sourceID})
18111815
if err != nil {
@@ -1868,7 +1872,18 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e
18681872
return err
18691873
}
18701874

1871-
if s.cfg.ShardMode == "" {
1875+
// VIEW statements are not needed sharding synchronized, so pretend this is no-shard-mode
1876+
skipShardHandle := false
1877+
switch n := parseResult.stmt.(type) {
1878+
case *ast.CreateViewStmt:
1879+
skipShardHandle = true
1880+
case *ast.DropTableStmt:
1881+
if n.IsView {
1882+
skipShardHandle = true
1883+
}
1884+
}
1885+
1886+
if s.cfg.ShardMode == "" || skipShardHandle {
18721887
ec.tctx.L().Info("start to handle ddls in normal mode", zap.String("event", "query"), zap.Strings("ddls", needHandleDDLs), zap.ByteString("raw statement", ev.Query), log.WrapStringerField("location", ec.currentLocation))
18731888

18741889
// interrupted after flush old checkpoint and before track DDL.
@@ -1942,8 +1957,11 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e
19421957

19431958
source, _ = GenTableID(ddlInfo.tableNames[0][0].Schema, ddlInfo.tableNames[0][0].Name)
19441959

1945-
var annotate string
1946-
switch ddlInfo.stmt.(type) {
1960+
var (
1961+
annotate string
1962+
trySync bool // below `switch` may let some statements skip sharding sync
1963+
)
1964+
switch n := ddlInfo.stmt.(type) {
19471965
case *ast.CreateDatabaseStmt:
19481966
// for CREATE DATABASE, we do nothing. when CREATE TABLE under this DATABASE, sharding groups will be added
19491967
case *ast.CreateTableStmt:
@@ -1953,7 +1971,18 @@ func (s *Syncer) handleQueryEvent(ev *replication.QueryEvent, ec eventContext) e
19531971
return err
19541972
}
19551973
annotate = "add table to shard group"
1974+
case *ast.CreateViewStmt:
1975+
// for CREATE VIEW, we directly execute it in downstream to avoid sharding sync cost.
1976+
case *ast.DropTableStmt:
1977+
// for DROP VIEW, we directly execute it in downstream to avoid sharding sync cost.
1978+
if !n.IsView {
1979+
trySync = true
1980+
}
19561981
default:
1982+
trySync = true
1983+
}
1984+
1985+
if trySync {
19571986
needShardingHandle, group, synced, active, remain, err = s.sgk.TrySync(ddlInfo.tableNames[1][0].Schema, ddlInfo.tableNames[1][0].Name, source, *startLocation, *ec.currentLocation, needHandleDDLs)
19581987
if err != nil {
19591988
return err
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
---
2+
name: test
3+
task-mode: all
4+
is-sharding: true
5+
shard-mode: "optimistic"
6+
meta-schema: "dm_meta"
7+
timezone: "Asia/Shanghai"
8+
9+
target-database:
10+
host: "127.0.0.1"
11+
port: 4000
12+
user: "test"
13+
password: "/Q7B9DizNLLTTfiZHv9WoEAKamfpIUs="
14+
15+
mysql-instances:
16+
- source-id: "mysql-replica-01"
17+
block-allow-list: "instance"
18+
route-rules: ["sharding-table-rules","sharding-schema-rules","sharding-view-rules"]
19+
mydumper-config-name: "global"
20+
loader-config-name: "global"
21+
syncer-config-name: "global"
22+
- source-id: "mysql-replica-02"
23+
block-allow-list: "instance"
24+
route-rules: ["sharding-table-rules","sharding-schema-rules","sharding-view-rules"]
25+
mydumper-config-name: "global"
26+
loader-config-name: "global"
27+
syncer-config-name: "global"
28+
29+
block-allow-list:
30+
instance:
31+
do-dbs: ["shardddl1","shardddl2"]
32+
33+
routes:
34+
sharding-table-rules:
35+
schema-pattern: "shardddl*"
36+
target-schema: "shardddl"
37+
table-pattern: "tb*"
38+
target-table: "tb"
39+
sharding-view-rules:
40+
schema-pattern: "shardddl*"
41+
target-schema: "shardddl"
42+
table-pattern: "v*"
43+
target-table: "v"
44+
sharding-schema-rules:
45+
schema-pattern: "shardddl*"
46+
target-schema: "shardddl"
47+
48+
mydumpers:
49+
global:
50+
threads: 4
51+
chunk-filesize: 64
52+
skip-tz-utc: true
53+
extra-args: ""
54+
55+
loaders:
56+
global:
57+
pool-size: 16
58+
dir: "./dumped_data"
59+
60+
syncers:
61+
global:
62+
worker-count: 16
63+
batch: 100
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
---
2+
name: test
3+
task-mode: all
4+
is-sharding: true
5+
shard-mode: "pessimistic"
6+
meta-schema: "dm_meta"
7+
timezone: "Asia/Shanghai"
8+
9+
target-database:
10+
host: "127.0.0.1"
11+
port: 4000
12+
user: "test"
13+
password: "/Q7B9DizNLLTTfiZHv9WoEAKamfpIUs="
14+
15+
mysql-instances:
16+
- source-id: "mysql-replica-01"
17+
block-allow-list: "instance"
18+
route-rules: ["sharding-table-rules","sharding-schema-rules","sharding-view-rules"]
19+
mydumper-config-name: "global"
20+
loader-config-name: "global"
21+
syncer-config-name: "global"
22+
- source-id: "mysql-replica-02"
23+
block-allow-list: "instance"
24+
route-rules: ["sharding-table-rules","sharding-schema-rules","sharding-view-rules"]
25+
mydumper-config-name: "global"
26+
loader-config-name: "global"
27+
syncer-config-name: "global"
28+
29+
block-allow-list:
30+
instance:
31+
do-dbs: ["shardddl1","shardddl2"]
32+
33+
routes:
34+
sharding-table-rules:
35+
schema-pattern: "shardddl*"
36+
target-schema: "shardddl"
37+
table-pattern: "tb*"
38+
target-table: "tb"
39+
sharding-view-rules:
40+
schema-pattern: "shardddl*"
41+
target-schema: "shardddl"
42+
table-pattern: "v*"
43+
target-table: "v"
44+
sharding-schema-rules:
45+
schema-pattern: "shardddl*"
46+
target-schema: "shardddl"
47+
48+
mydumpers:
49+
global:
50+
threads: 4
51+
chunk-filesize: 64
52+
skip-tz-utc: true
53+
extra-args: ""
54+
55+
loaders:
56+
global:
57+
pool-size: 16
58+
dir: "./dumped_data"
59+
60+
syncers:
61+
global:
62+
worker-count: 16
63+
batch: 100

tests/shardddl3/run.sh

+71-1
Original file line numberDiff line numberDiff line change
@@ -801,6 +801,76 @@ function DM_RestartMaster() {
801801
"clean_table" "optimistic"
802802
}
803803

804+
function DM_SyncView_CASE() {
805+
# sync of view didn't need shard DDL synchronization
806+
run_sql_source2 "create view ${shardddl1}.v1 as select * from ${shardddl1}.${tb1};"
807+
sleep 1
808+
run_sql_tidb "show create view ${shardddl}.v"
809+
check_contains "View: v"
810+
run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
811+
"show-ddl-locks" \
812+
"\"result\": true" 1 \
813+
"no DDL lock exists" 1
814+
815+
# test reference across database
816+
run_sql_source1 "create view ${shardddl2}.v2 as select * from ${shardddl1}.${tb1};"
817+
sleep 1
818+
run_sql_tidb "show create view ${shardddl}.v"
819+
check_contains "View: v"
820+
run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
821+
"show-ddl-locks" \
822+
"\"result\": true" 1 \
823+
"no DDL lock exists" 1
824+
run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
825+
"query-status test" \
826+
"\"result\": true" 3
827+
828+
# test drop view
829+
run_sql_source2 "drop view ${shardddl1}.v1;"
830+
sleep 1
831+
# drop so "not exist" error
832+
run_sql_tidb "show create view ${shardddl}.v" && exit 1 || true
833+
run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
834+
"show-ddl-locks" \
835+
"\"result\": true" 1 \
836+
"no DDL lock exists" 1
837+
run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
838+
"query-status test" \
839+
"\"result\": true" 3
840+
841+
run_sql_source1 "drop view ${shardddl2}.v2;"
842+
sleep 1
843+
run_sql_tidb "show create view ${shardddl}.v" && exit 1 || true
844+
run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
845+
"show-ddl-locks" \
846+
"\"result\": true" 1 \
847+
"no DDL lock exists" 1
848+
run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
849+
"query-status test" \
850+
"\"result\": true" 3
851+
852+
# test not in sync group
853+
run_sql_source1 "create view ${shardddl2}.notsyncview as select * from ${shardddl1}.${tb1};"
854+
sleep 1
855+
run_sql_tidb "show create view ${shardddl}.notsyncview"
856+
check_contains "View: notsyncview"
857+
run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
858+
"query-status test" \
859+
"\"result\": true" 3
860+
861+
run_sql_source1 "drop view ${shardddl2}.notsyncview;"
862+
sleep 1
863+
run_sql_tidb "show create view ${shardddl}.notsyncview" && exit 1 || true
864+
run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \
865+
"query-status test" \
866+
"\"result\": true" 3
867+
}
868+
869+
function DM_SyncView() {
870+
run_case SyncView "double-source-pessimistic-view" "init_table 111 112 211" "clean_table" "pessimistic"
871+
run_case SyncView "double-source-optimistic-view" "init_table 111 112 211" "clean_table" "optimistic"
872+
}
873+
804874
function run() {
805875
init_cluster
806876
init_database
@@ -816,8 +886,8 @@ function run() {
816886
done
817887

818888
DM_RemoveLock
819-
820889
DM_RestartMaster
890+
DM_SyncView
821891
}
822892

823893
cleanup_data $shardddl

0 commit comments

Comments
 (0)